cppgc: shared-cage: Use dedicated reservations for normal/large pages

The CL splits the Oilpan giga-cage in two 2GB reservations: one for
normal pages and the other for large ones. The split enables fast
page-header lookup (assuming most objects reside on normal pages), which
is needed for:
1) the young generation project, where the remembered set will move to
   pages;
2) the shared-cage project, to find HeapBase* from page-headers.

Bug: v8:12231, chromium:1029379
Change-Id: I4ae9e8a75a307ed0dff9a2ec4f1247b80e17ebd9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3688519
Auto-Submit: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80950}
This commit is contained in:
Anton Bikineev 2022-06-05 15:30:41 +02:00 committed by V8 LUCI CQ
parent ff64360d0a
commit d8da11ae05
9 changed files with 126 additions and 27 deletions

View File

@ -37,6 +37,9 @@ static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
#if defined(CPPGC_CAGED_HEAP)
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
constexpr size_t kCagedHeapNormalPageReservationSize =
kCagedHeapReservationSize / 2;
#endif
static constexpr size_t kDefaultAlignment = sizeof(void*);

View File

@ -16,6 +16,7 @@
#include "src/base/platform/platform.h"
#include "src/heap/cppgc/caged-heap.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/member.h"
namespace cppgc {
@ -25,6 +26,8 @@ static_assert(api_constants::kCagedHeapReservationSize ==
kCagedHeapReservationSize);
static_assert(api_constants::kCagedHeapReservationAlignment ==
kCagedHeapReservationAlignment);
static_assert(api_constants::kCagedHeapNormalPageReservationSize ==
kCagedHeapNormalPageReservationSize);
namespace {
@ -78,9 +81,20 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
caged_heap_start -
reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<v8::base::BoundedPageAllocator>(
normal_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
&platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize,
kCagedHeapNormalPageReservationSize - local_data_size_with_padding,
kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible);
large_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
&platform_allocator,
reinterpret_cast<uintptr_t>(reserved_area_.address()) +
kCagedHeapNormalPageReservationSize,
kCagedHeapNormalPageReservationSize, kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible);
}
@ -99,5 +113,37 @@ void CagedHeap::EnableGenerationalGC() {
}
#endif // defined(CPPGC_YOUNG_GENERATION)
void CagedHeap::NotifyLargePageCreated(LargePage* page) {
DCHECK(page);
auto result = large_pages_.insert(page);
USE(result);
DCHECK(result.second);
}
void CagedHeap::NotifyLargePageDestroyed(LargePage* page) {
DCHECK(page);
auto size = large_pages_.erase(page);
USE(size);
DCHECK_EQ(1u, size);
}
BasePage* CagedHeap::LookupPageFromInnerPointer(void* ptr) const {
DCHECK(IsOnHeap(ptr));
if (V8_LIKELY(IsWithinNormalPageReservation(ptr))) {
return NormalPage::FromPayload(ptr);
} else {
return LookupLargePageFromInnerPointer(ptr);
}
}
LargePage* CagedHeap::LookupLargePageFromInnerPointer(void* ptr) const {
auto it = large_pages_.upper_bound(static_cast<LargePage*>(ptr));
DCHECK_NE(large_pages_.begin(), it);
auto* page = *std::next(it, -1);
DCHECK(page);
DCHECK(page->PayloadContains(static_cast<ConstAddress>(ptr)));
return page;
}
} // namespace internal
} // namespace cppgc

View File

@ -7,6 +7,7 @@
#include <limits>
#include <memory>
#include <set>
#include "include/cppgc/platform.h"
#include "src/base/bounded-page-allocator.h"
@ -18,6 +19,8 @@ namespace internal {
struct CagedHeapLocalData;
class HeapBase;
class BasePage;
class LargePage;
class CagedHeap final {
public:
@ -37,6 +40,10 @@ class CagedHeap final {
~(kCagedHeapReservationAlignment - 1);
}
static bool IsWithinNormalPageReservation(const void* address) {
return OffsetFromAddress(address) < kCagedHeapNormalPageReservationSize;
}
CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
~CagedHeap();
@ -47,8 +54,24 @@ class CagedHeap final {
void EnableGenerationalGC();
#endif // defined(CPPGC_YOUNG_GENERATION)
AllocatorType& allocator() { return *bounded_allocator_; }
const AllocatorType& allocator() const { return *bounded_allocator_; }
AllocatorType& normal_page_allocator() {
return *normal_page_bounded_allocator_;
}
const AllocatorType& normal_page_allocator() const {
return *normal_page_bounded_allocator_;
}
AllocatorType& large_page_allocator() {
return *large_page_bounded_allocator_;
}
const AllocatorType& large_page_allocator() const {
return *large_page_bounded_allocator_;
}
void NotifyLargePageCreated(LargePage* page);
void NotifyLargePageDestroyed(LargePage* page);
BasePage* LookupPageFromInnerPointer(void* ptr) const;
CagedHeapLocalData& local_data() {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
@ -65,8 +88,12 @@ class CagedHeap final {
void* base() const { return reserved_area_.address(); }
private:
LargePage* LookupLargePageFromInnerPointer(void* ptr) const;
const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
std::unique_ptr<AllocatorType> normal_page_bounded_allocator_;
std::unique_ptr<AllocatorType> large_page_bounded_allocator_;
std::set<LargePage*> large_pages_;
};
} // namespace internal

View File

@ -75,6 +75,11 @@ constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
// TODO(v8:12231): To reduce OOM probability, instead of the fixed-size
// reservation consider to use a moving needle implementation or simply
// calibrating this 2GB/2GB split.
constexpr size_t kCagedHeapNormalPageReservationSize =
kCagedHeapReservationSize / 2;
} // namespace internal
} // namespace cppgc

View File

@ -66,11 +66,12 @@ HeapBase::HeapBase(
#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
caged_heap_(*this, *page_allocator()),
page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator(),
*oom_handler_.get())),
page_backend_(std::make_unique<PageBackend>(
caged_heap_.normal_page_allocator(),
caged_heap_.large_page_allocator(), *oom_handler_.get())),
#else // !CPPGC_CAGED_HEAP
page_backend_(std::make_unique<PageBackend>(*page_allocator(),
*oom_handler_.get())),
page_backend_(std::make_unique<PageBackend>(
*page_allocator(), *page_allocator(), *oom_handler_.get())),
#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
@ -158,7 +159,7 @@ void HeapBase::ResetRememberedSet() {
return;
}
caged_heap().local_data().age_table.Reset(&caged_heap().allocator());
caged_heap().local_data().age_table.Reset(page_allocator());
remembered_set_.Reset();
return;
}

View File

@ -40,8 +40,13 @@ BasePage* BasePage::FromInnerAddress(const HeapBase* heap, void* address) {
// static
const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
const void* address) {
#if defined(CPPGC_CAGED_HEAP)
return heap->caged_heap().LookupPageFromInnerPointer(
const_cast<void*>(address));
#else // !defined(CPPGC_CAGED_HEAP)
return reinterpret_cast<const BasePage*>(
heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
#endif // !defined(CPPGC_CAGED_HEAP)
}
// static
@ -231,6 +236,9 @@ LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
void* memory = page_backend.AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(*heap, space, size);
page->SynchronizedStore();
#if defined(CPPGC_CAGED_HEAP)
heap->caged_heap().NotifyLargePageCreated(page);
#endif // defined(CPPGC_CAGED_HEAP)
page->heap().stats_collector()->NotifyAllocatedMemory(allocation_size);
return page;
}
@ -238,6 +246,8 @@ LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
// static
void LargePage::Destroy(LargePage* page) {
DCHECK(page);
HeapBase& heap = page->heap();
const size_t payload_size = page->PayloadSize();
#if DEBUG
const BaseSpace& space = page->space();
{
@ -249,9 +259,11 @@ void LargePage::Destroy(LargePage* page) {
}
#endif // DEBUG
page->~LargePage();
PageBackend* backend = page->heap().page_backend();
page->heap().stats_collector()->NotifyFreedMemory(
AllocationSize(page->PayloadSize()));
PageBackend* backend = heap.page_backend();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().NotifyLargePageDestroyed(page);
#endif // defined(CPPGC_CAGED_HEAP)
heap.stats_collector()->NotifyFreedMemory(AllocationSize(payload_size));
backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
}

View File

@ -186,9 +186,12 @@ std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
return pair;
}
PageBackend::PageBackend(PageAllocator& allocator,
PageBackend::PageBackend(PageAllocator& normal_page_allocator,
PageAllocator& large_page_allocator,
FatalOutOfMemoryHandler& oom_handler)
: allocator_(allocator), oom_handler_(oom_handler) {}
: normal_page_allocator_(normal_page_allocator),
large_page_allocator_(large_page_allocator),
oom_handler_(oom_handler) {}
PageBackend::~PageBackend() = default;
@ -196,8 +199,8 @@ Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
v8::base::MutexGuard guard(&mutex_);
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
if (!result.first) {
auto pmr =
std::make_unique<NormalPageMemoryRegion>(allocator_, oom_handler_);
auto pmr = std::make_unique<NormalPageMemoryRegion>(normal_page_allocator_,
oom_handler_);
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
page_pool_.Add(bucket, pmr.get(),
pmr->GetPageMemory(i).writeable_region().base());
@ -221,10 +224,10 @@ void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
Address PageBackend::AllocateLargePageMemory(size_t size) {
v8::base::MutexGuard guard(&mutex_);
auto pmr =
std::make_unique<LargePageMemoryRegion>(allocator_, oom_handler_, size);
auto pmr = std::make_unique<LargePageMemoryRegion>(large_page_allocator_,
oom_handler_, size);
const PageMemory pm = pmr->GetPageMemory();
Unprotect(allocator_, oom_handler_, pm);
Unprotect(large_page_allocator_, oom_handler_, pm);
page_memory_region_tree_.Add(pmr.get());
large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
return pm.writeable_region().base();

View File

@ -198,7 +198,8 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
PageBackend(PageAllocator&, FatalOutOfMemoryHandler&);
PageBackend(PageAllocator& normal_page_allocator,
PageAllocator& large_page_allocator, FatalOutOfMemoryHandler&);
~PageBackend();
// Allocates a normal page from the backend.
@ -230,7 +231,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
private:
// Guards against concurrent uses of `Lookup()`.
mutable v8::base::Mutex mutex_;
PageAllocator& allocator_;
PageAllocator& normal_page_allocator_;
PageAllocator& large_page_allocator_;
FatalOutOfMemoryHandler& oom_handler_;
NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_;

View File

@ -269,7 +269,7 @@ TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
TEST(PageBackendTest, AllocateNormalUsesPool) {
v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler);
PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kBucket = 0;
Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket);
EXPECT_NE(nullptr, writeable_base1);
@ -282,7 +282,7 @@ TEST(PageBackendTest, AllocateNormalUsesPool) {
TEST(PageBackendTest, AllocateLarge) {
v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler);
PageBackend backend(allocator, allocator, oom_handler);
Address writeable_base1 = backend.AllocateLargePageMemory(13731);
EXPECT_NE(nullptr, writeable_base1);
Address writeable_base2 = backend.AllocateLargePageMemory(9478);
@ -295,7 +295,7 @@ TEST(PageBackendTest, AllocateLarge) {
TEST(PageBackendTest, LookupNormal) {
v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler);
PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kBucket = 0;
Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
if (kGuardPageSize) {
@ -316,7 +316,7 @@ TEST(PageBackendTest, LookupNormal) {
TEST(PageBackendTest, LookupLarge) {
v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler);
PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kSize = 7934;
Address writeable_base = backend.AllocateLargePageMemory(kSize);
if (kGuardPageSize) {
@ -332,7 +332,7 @@ TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
FatalOutOfMemoryHandler oom_handler;
Address base;
{
PageBackend backend(allocator, oom_handler);
PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kBucket = 0;
base = backend.AllocateNormalPageMemory(kBucket);
}