cppgc: shared-cage: Use dedicated reservations for normal/large pages

The CL splits the Oilpan giga-cage in two 2GB reservations: one for
normal pages and the other for large ones. The split enables fast
page-header lookup (assuming most objects reside on normal pages), which
is needed for:
1) the young generation project, where the remembered set will move to
   pages;
2) the shared-cage project, to find HeapBase* from page-headers.

Bug: v8:12231, chromium:1029379
Change-Id: I4ae9e8a75a307ed0dff9a2ec4f1247b80e17ebd9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3688519
Auto-Submit: Anton Bikineev <bikineev@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Anton Bikineev <bikineev@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80950}
This commit is contained in:
Anton Bikineev 2022-06-05 15:30:41 +02:00 committed by V8 LUCI CQ
parent ff64360d0a
commit d8da11ae05
9 changed files with 126 additions and 27 deletions

View File

@ -37,6 +37,9 @@ static constexpr size_t kLargeObjectSizeThreshold = kPageSize / 2;
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB; constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize; constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
constexpr size_t kCagedHeapNormalPageReservationSize =
kCagedHeapReservationSize / 2;
#endif #endif
static constexpr size_t kDefaultAlignment = sizeof(void*); static constexpr size_t kDefaultAlignment = sizeof(void*);

View File

@ -16,6 +16,7 @@
#include "src/base/platform/platform.h" #include "src/base/platform/platform.h"
#include "src/heap/cppgc/caged-heap.h" #include "src/heap/cppgc/caged-heap.h"
#include "src/heap/cppgc/globals.h" #include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/member.h" #include "src/heap/cppgc/member.h"
namespace cppgc { namespace cppgc {
@ -25,6 +26,8 @@ static_assert(api_constants::kCagedHeapReservationSize ==
kCagedHeapReservationSize); kCagedHeapReservationSize);
static_assert(api_constants::kCagedHeapReservationAlignment == static_assert(api_constants::kCagedHeapReservationAlignment ==
kCagedHeapReservationAlignment); kCagedHeapReservationAlignment);
static_assert(api_constants::kCagedHeapNormalPageReservationSize ==
kCagedHeapNormalPageReservationSize);
namespace { namespace {
@ -78,9 +81,20 @@ CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
caged_heap_start - caged_heap_start -
reinterpret_cast<CagedAddress>(reserved_area_.address()); reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<v8::base::BoundedPageAllocator>( normal_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
&platform_allocator, caged_heap_start, &platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize, kCagedHeapNormalPageReservationSize - local_data_size_with_padding,
kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible);
large_page_bounded_allocator_ = std::make_unique<
v8::base::BoundedPageAllocator>(
&platform_allocator,
reinterpret_cast<uintptr_t>(reserved_area_.address()) +
kCagedHeapNormalPageReservationSize,
kCagedHeapNormalPageReservationSize, kPageSize,
v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized, v8::base::PageInitializationMode::kAllocatedPagesMustBeZeroInitialized,
v8::base::PageFreeingMode::kMakeInaccessible); v8::base::PageFreeingMode::kMakeInaccessible);
} }
@ -99,5 +113,37 @@ void CagedHeap::EnableGenerationalGC() {
} }
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
void CagedHeap::NotifyLargePageCreated(LargePage* page) {
DCHECK(page);
auto result = large_pages_.insert(page);
USE(result);
DCHECK(result.second);
}
void CagedHeap::NotifyLargePageDestroyed(LargePage* page) {
DCHECK(page);
auto size = large_pages_.erase(page);
USE(size);
DCHECK_EQ(1u, size);
}
BasePage* CagedHeap::LookupPageFromInnerPointer(void* ptr) const {
DCHECK(IsOnHeap(ptr));
if (V8_LIKELY(IsWithinNormalPageReservation(ptr))) {
return NormalPage::FromPayload(ptr);
} else {
return LookupLargePageFromInnerPointer(ptr);
}
}
LargePage* CagedHeap::LookupLargePageFromInnerPointer(void* ptr) const {
auto it = large_pages_.upper_bound(static_cast<LargePage*>(ptr));
DCHECK_NE(large_pages_.begin(), it);
auto* page = *std::next(it, -1);
DCHECK(page);
DCHECK(page->PayloadContains(static_cast<ConstAddress>(ptr)));
return page;
}
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc

View File

@ -7,6 +7,7 @@
#include <limits> #include <limits>
#include <memory> #include <memory>
#include <set>
#include "include/cppgc/platform.h" #include "include/cppgc/platform.h"
#include "src/base/bounded-page-allocator.h" #include "src/base/bounded-page-allocator.h"
@ -18,6 +19,8 @@ namespace internal {
struct CagedHeapLocalData; struct CagedHeapLocalData;
class HeapBase; class HeapBase;
class BasePage;
class LargePage;
class CagedHeap final { class CagedHeap final {
public: public:
@ -37,6 +40,10 @@ class CagedHeap final {
~(kCagedHeapReservationAlignment - 1); ~(kCagedHeapReservationAlignment - 1);
} }
static bool IsWithinNormalPageReservation(const void* address) {
return OffsetFromAddress(address) < kCagedHeapNormalPageReservationSize;
}
CagedHeap(HeapBase& heap, PageAllocator& platform_allocator); CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
~CagedHeap(); ~CagedHeap();
@ -47,8 +54,24 @@ class CagedHeap final {
void EnableGenerationalGC(); void EnableGenerationalGC();
#endif // defined(CPPGC_YOUNG_GENERATION) #endif // defined(CPPGC_YOUNG_GENERATION)
AllocatorType& allocator() { return *bounded_allocator_; } AllocatorType& normal_page_allocator() {
const AllocatorType& allocator() const { return *bounded_allocator_; } return *normal_page_bounded_allocator_;
}
const AllocatorType& normal_page_allocator() const {
return *normal_page_bounded_allocator_;
}
AllocatorType& large_page_allocator() {
return *large_page_bounded_allocator_;
}
const AllocatorType& large_page_allocator() const {
return *large_page_bounded_allocator_;
}
void NotifyLargePageCreated(LargePage* page);
void NotifyLargePageDestroyed(LargePage* page);
BasePage* LookupPageFromInnerPointer(void* ptr) const;
CagedHeapLocalData& local_data() { CagedHeapLocalData& local_data() {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address()); return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
@ -65,8 +88,12 @@ class CagedHeap final {
void* base() const { return reserved_area_.address(); } void* base() const { return reserved_area_.address(); }
private: private:
LargePage* LookupLargePageFromInnerPointer(void* ptr) const;
const VirtualMemory reserved_area_; const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_; std::unique_ptr<AllocatorType> normal_page_bounded_allocator_;
std::unique_ptr<AllocatorType> large_page_bounded_allocator_;
std::set<LargePage*> large_pages_;
}; };
} // namespace internal } // namespace internal

View File

@ -75,6 +75,11 @@ constexpr size_t kFreeListEntrySize = 2 * sizeof(uintptr_t);
constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB; constexpr size_t kCagedHeapReservationSize = static_cast<size_t>(4) * kGB;
constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize; constexpr size_t kCagedHeapReservationAlignment = kCagedHeapReservationSize;
// TODO(v8:12231): To reduce OOM probability, instead of the fixed-size
// reservation consider to use a moving needle implementation or simply
// calibrating this 2GB/2GB split.
constexpr size_t kCagedHeapNormalPageReservationSize =
kCagedHeapReservationSize / 2;
} // namespace internal } // namespace internal
} // namespace cppgc } // namespace cppgc

View File

@ -66,11 +66,12 @@ HeapBase::HeapBase(
#endif // LEAK_SANITIZER #endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP) #if defined(CPPGC_CAGED_HEAP)
caged_heap_(*this, *page_allocator()), caged_heap_(*this, *page_allocator()),
page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator(), page_backend_(std::make_unique<PageBackend>(
*oom_handler_.get())), caged_heap_.normal_page_allocator(),
caged_heap_.large_page_allocator(), *oom_handler_.get())),
#else // !CPPGC_CAGED_HEAP #else // !CPPGC_CAGED_HEAP
page_backend_(std::make_unique<PageBackend>(*page_allocator(), page_backend_(std::make_unique<PageBackend>(
*oom_handler_.get())), *page_allocator(), *page_allocator(), *oom_handler_.get())),
#endif // !CPPGC_CAGED_HEAP #endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(platform_.get())), stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>( stack_(std::make_unique<heap::base::Stack>(
@ -158,7 +159,7 @@ void HeapBase::ResetRememberedSet() {
return; return;
} }
caged_heap().local_data().age_table.Reset(&caged_heap().allocator()); caged_heap().local_data().age_table.Reset(page_allocator());
remembered_set_.Reset(); remembered_set_.Reset();
return; return;
} }

View File

@ -40,8 +40,13 @@ BasePage* BasePage::FromInnerAddress(const HeapBase* heap, void* address) {
// static // static
const BasePage* BasePage::FromInnerAddress(const HeapBase* heap, const BasePage* BasePage::FromInnerAddress(const HeapBase* heap,
const void* address) { const void* address) {
#if defined(CPPGC_CAGED_HEAP)
return heap->caged_heap().LookupPageFromInnerPointer(
const_cast<void*>(address));
#else // !defined(CPPGC_CAGED_HEAP)
return reinterpret_cast<const BasePage*>( return reinterpret_cast<const BasePage*>(
heap->page_backend()->Lookup(static_cast<ConstAddress>(address))); heap->page_backend()->Lookup(static_cast<ConstAddress>(address)));
#endif // !defined(CPPGC_CAGED_HEAP)
} }
// static // static
@ -231,6 +236,9 @@ LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
void* memory = page_backend.AllocateLargePageMemory(allocation_size); void* memory = page_backend.AllocateLargePageMemory(allocation_size);
LargePage* page = new (memory) LargePage(*heap, space, size); LargePage* page = new (memory) LargePage(*heap, space, size);
page->SynchronizedStore(); page->SynchronizedStore();
#if defined(CPPGC_CAGED_HEAP)
heap->caged_heap().NotifyLargePageCreated(page);
#endif // defined(CPPGC_CAGED_HEAP)
page->heap().stats_collector()->NotifyAllocatedMemory(allocation_size); page->heap().stats_collector()->NotifyAllocatedMemory(allocation_size);
return page; return page;
} }
@ -238,6 +246,8 @@ LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
// static // static
void LargePage::Destroy(LargePage* page) { void LargePage::Destroy(LargePage* page) {
DCHECK(page); DCHECK(page);
HeapBase& heap = page->heap();
const size_t payload_size = page->PayloadSize();
#if DEBUG #if DEBUG
const BaseSpace& space = page->space(); const BaseSpace& space = page->space();
{ {
@ -249,9 +259,11 @@ void LargePage::Destroy(LargePage* page) {
} }
#endif // DEBUG #endif // DEBUG
page->~LargePage(); page->~LargePage();
PageBackend* backend = page->heap().page_backend(); PageBackend* backend = heap.page_backend();
page->heap().stats_collector()->NotifyFreedMemory( #if defined(CPPGC_CAGED_HEAP)
AllocationSize(page->PayloadSize())); heap.caged_heap().NotifyLargePageDestroyed(page);
#endif // defined(CPPGC_CAGED_HEAP)
heap.stats_collector()->NotifyFreedMemory(AllocationSize(payload_size));
backend->FreeLargePageMemory(reinterpret_cast<Address>(page)); backend->FreeLargePageMemory(reinterpret_cast<Address>(page));
} }

View File

@ -186,9 +186,12 @@ std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
return pair; return pair;
} }
PageBackend::PageBackend(PageAllocator& allocator, PageBackend::PageBackend(PageAllocator& normal_page_allocator,
PageAllocator& large_page_allocator,
FatalOutOfMemoryHandler& oom_handler) FatalOutOfMemoryHandler& oom_handler)
: allocator_(allocator), oom_handler_(oom_handler) {} : normal_page_allocator_(normal_page_allocator),
large_page_allocator_(large_page_allocator),
oom_handler_(oom_handler) {}
PageBackend::~PageBackend() = default; PageBackend::~PageBackend() = default;
@ -196,8 +199,8 @@ Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
v8::base::MutexGuard guard(&mutex_); v8::base::MutexGuard guard(&mutex_);
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket); std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
if (!result.first) { if (!result.first) {
auto pmr = auto pmr = std::make_unique<NormalPageMemoryRegion>(normal_page_allocator_,
std::make_unique<NormalPageMemoryRegion>(allocator_, oom_handler_); oom_handler_);
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) { for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
page_pool_.Add(bucket, pmr.get(), page_pool_.Add(bucket, pmr.get(),
pmr->GetPageMemory(i).writeable_region().base()); pmr->GetPageMemory(i).writeable_region().base());
@ -221,10 +224,10 @@ void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
Address PageBackend::AllocateLargePageMemory(size_t size) { Address PageBackend::AllocateLargePageMemory(size_t size) {
v8::base::MutexGuard guard(&mutex_); v8::base::MutexGuard guard(&mutex_);
auto pmr = auto pmr = std::make_unique<LargePageMemoryRegion>(large_page_allocator_,
std::make_unique<LargePageMemoryRegion>(allocator_, oom_handler_, size); oom_handler_, size);
const PageMemory pm = pmr->GetPageMemory(); const PageMemory pm = pmr->GetPageMemory();
Unprotect(allocator_, oom_handler_, pm); Unprotect(large_page_allocator_, oom_handler_, pm);
page_memory_region_tree_.Add(pmr.get()); page_memory_region_tree_.Add(pmr.get());
large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr))); large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
return pm.writeable_region().base(); return pm.writeable_region().base();

View File

@ -198,7 +198,8 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// regions alive. // regions alive.
class V8_EXPORT_PRIVATE PageBackend final { class V8_EXPORT_PRIVATE PageBackend final {
public: public:
PageBackend(PageAllocator&, FatalOutOfMemoryHandler&); PageBackend(PageAllocator& normal_page_allocator,
PageAllocator& large_page_allocator, FatalOutOfMemoryHandler&);
~PageBackend(); ~PageBackend();
// Allocates a normal page from the backend. // Allocates a normal page from the backend.
@ -230,7 +231,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
private: private:
// Guards against concurrent uses of `Lookup()`. // Guards against concurrent uses of `Lookup()`.
mutable v8::base::Mutex mutex_; mutable v8::base::Mutex mutex_;
PageAllocator& allocator_; PageAllocator& normal_page_allocator_;
PageAllocator& large_page_allocator_;
FatalOutOfMemoryHandler& oom_handler_; FatalOutOfMemoryHandler& oom_handler_;
NormalPageMemoryPool page_pool_; NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_; PageMemoryRegionTree page_memory_region_tree_;

View File

@ -269,7 +269,7 @@ TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
TEST(PageBackendTest, AllocateNormalUsesPool) { TEST(PageBackendTest, AllocateNormalUsesPool) {
v8::base::PageAllocator allocator; v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler; FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler); PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kBucket = 0; constexpr size_t kBucket = 0;
Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket); Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket);
EXPECT_NE(nullptr, writeable_base1); EXPECT_NE(nullptr, writeable_base1);
@ -282,7 +282,7 @@ TEST(PageBackendTest, AllocateNormalUsesPool) {
TEST(PageBackendTest, AllocateLarge) { TEST(PageBackendTest, AllocateLarge) {
v8::base::PageAllocator allocator; v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler; FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler); PageBackend backend(allocator, allocator, oom_handler);
Address writeable_base1 = backend.AllocateLargePageMemory(13731); Address writeable_base1 = backend.AllocateLargePageMemory(13731);
EXPECT_NE(nullptr, writeable_base1); EXPECT_NE(nullptr, writeable_base1);
Address writeable_base2 = backend.AllocateLargePageMemory(9478); Address writeable_base2 = backend.AllocateLargePageMemory(9478);
@ -295,7 +295,7 @@ TEST(PageBackendTest, AllocateLarge) {
TEST(PageBackendTest, LookupNormal) { TEST(PageBackendTest, LookupNormal) {
v8::base::PageAllocator allocator; v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler; FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler); PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kBucket = 0; constexpr size_t kBucket = 0;
Address writeable_base = backend.AllocateNormalPageMemory(kBucket); Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
if (kGuardPageSize) { if (kGuardPageSize) {
@ -316,7 +316,7 @@ TEST(PageBackendTest, LookupNormal) {
TEST(PageBackendTest, LookupLarge) { TEST(PageBackendTest, LookupLarge) {
v8::base::PageAllocator allocator; v8::base::PageAllocator allocator;
FatalOutOfMemoryHandler oom_handler; FatalOutOfMemoryHandler oom_handler;
PageBackend backend(allocator, oom_handler); PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kSize = 7934; constexpr size_t kSize = 7934;
Address writeable_base = backend.AllocateLargePageMemory(kSize); Address writeable_base = backend.AllocateLargePageMemory(kSize);
if (kGuardPageSize) { if (kGuardPageSize) {
@ -332,7 +332,7 @@ TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
FatalOutOfMemoryHandler oom_handler; FatalOutOfMemoryHandler oom_handler;
Address base; Address base;
{ {
PageBackend backend(allocator, oom_handler); PageBackend backend(allocator, allocator, oom_handler);
constexpr size_t kBucket = 0; constexpr size_t kBucket = 0;
base = backend.AllocateNormalPageMemory(kBucket); base = backend.AllocateNormalPageMemory(kBucket);
} }