[cppgc, cppgc-js] Implement GC on allocation failure
So far Oilpan garbage collection was only ever triggered via growing strategies in either V8 or stand-alone heap growing. This CL implements a fallback for GC on allocation. - Stand-alone implementation will defer to GCInvoker which is aware of stack support. - CppHeap implementation will just trigger a full V8 GC. Bug: chromium:1352649 Change-Id: If92f705b4e272290ca7022864fd7b90f0fcb809e Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3865148 Reviewed-by: Anton Bikineev <bikineev@chromium.org> Commit-Queue: Michael Lippautz <mlippautz@chromium.org> Cr-Commit-Position: refs/heads/main@{#82844}
This commit is contained in:
parent
6229eee365
commit
881fc0496c
@ -855,7 +855,7 @@ class BackingStoreBase {};
|
||||
|
||||
// The maximum value in enum GarbageCollectionReason, defined in heap.h.
|
||||
// This is needed for histograms sampling garbage collection reasons.
|
||||
constexpr int kGarbageCollectionReasonMaxValue = 25;
|
||||
constexpr int kGarbageCollectionReasonMaxValue = 27;
|
||||
|
||||
} // namespace internal
|
||||
|
||||
|
@ -47,6 +47,7 @@
|
||||
#include "src/heap/embedder-tracing.h"
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/global-handle-marking-visitor.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/marking-worklist.h"
|
||||
#include "src/heap/sweeper.h"
|
||||
#include "src/init/v8.h"
|
||||
@ -485,7 +486,7 @@ CppHeap::CppHeap(
|
||||
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
|
||||
cppgc::internal::HeapBase::StackSupport::
|
||||
kSupportsConservativeStackScan,
|
||||
marking_support, sweeping_support),
|
||||
marking_support, sweeping_support, *this),
|
||||
wrapper_descriptor_(wrapper_descriptor) {
|
||||
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
|
||||
wrapper_descriptor_.embedder_id_for_garbage_collected);
|
||||
@ -1004,5 +1005,24 @@ CppHeap::PauseConcurrentMarkingScope::PauseConcurrentMarkingScope(
|
||||
}
|
||||
}
|
||||
|
||||
void CppHeap::CollectGarbage(Config config) {
|
||||
if (in_no_gc_scope() || !isolate_) return;
|
||||
|
||||
// TODO(mlippautz): Respect full config.
|
||||
const int flags = (config.free_memory_handling ==
|
||||
Config::FreeMemoryHandling::kDiscardWherePossible)
|
||||
? Heap::kReduceMemoryFootprintMask
|
||||
: Heap::kNoGCFlags;
|
||||
isolate_->heap()->CollectAllGarbage(
|
||||
flags, GarbageCollectionReason::kCppHeapAllocationFailure);
|
||||
}
|
||||
|
||||
const cppgc::EmbedderStackState* CppHeap::override_stack_state() const {
|
||||
return HeapBase::override_stack_state();
|
||||
}
|
||||
|
||||
void CppHeap::StartIncrementalGarbageCollection(Config) { UNIMPLEMENTED(); }
|
||||
size_t CppHeap::epoch() const { UNIMPLEMENTED(); }
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -33,7 +33,8 @@ class CppMarkingState;
|
||||
class V8_EXPORT_PRIVATE CppHeap final
|
||||
: public cppgc::internal::HeapBase,
|
||||
public v8::CppHeap,
|
||||
public cppgc::internal::StatsCollector::AllocationObserver {
|
||||
public cppgc::internal::StatsCollector::AllocationObserver,
|
||||
public cppgc::internal::GarbageCollector {
|
||||
public:
|
||||
enum GarbageCollectionFlagValues : uint8_t {
|
||||
kNoFlags = 0,
|
||||
@ -166,6 +167,12 @@ class V8_EXPORT_PRIVATE CppHeap final
|
||||
std::unique_ptr<CppMarkingState> CreateCppMarkingState();
|
||||
std::unique_ptr<CppMarkingState> CreateCppMarkingStateForMutatorThread();
|
||||
|
||||
// cppgc::internal::GarbageCollector interface.
|
||||
void CollectGarbage(Config) override;
|
||||
const cppgc::EmbedderStackState* override_stack_state() const override;
|
||||
void StartIncrementalGarbageCollection(Config) override;
|
||||
size_t epoch() const override;
|
||||
|
||||
private:
|
||||
void ReduceGCCapabilititesFromFlags();
|
||||
|
||||
|
@ -96,7 +96,7 @@ HeapBase::HeapBase(
|
||||
std::shared_ptr<cppgc::Platform> platform,
|
||||
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
|
||||
StackSupport stack_support, MarkingType marking_support,
|
||||
SweepingType sweeping_support)
|
||||
SweepingType sweeping_support, GarbageCollector& garbage_collector)
|
||||
: raw_heap_(this, custom_spaces),
|
||||
platform_(std::move(platform)),
|
||||
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
|
||||
@ -111,7 +111,8 @@ HeapBase::HeapBase(
|
||||
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
|
||||
compactor_(raw_heap_),
|
||||
object_allocator_(raw_heap_, *page_backend_, *stats_collector_,
|
||||
*prefinalizer_handler_),
|
||||
*prefinalizer_handler_, *oom_handler_,
|
||||
garbage_collector),
|
||||
sweeper_(*this),
|
||||
strong_persistent_region_(*oom_handler_.get()),
|
||||
weak_persistent_region_(*oom_handler_.get()),
|
||||
|
@ -15,7 +15,6 @@
|
||||
#include "include/cppgc/macros.h"
|
||||
#include "src/base/macros.h"
|
||||
#include "src/heap/cppgc/compactor.h"
|
||||
#include "src/heap/cppgc/garbage-collector.h"
|
||||
#include "src/heap/cppgc/heap-object-header.h"
|
||||
#include "src/heap/cppgc/marker.h"
|
||||
#include "src/heap/cppgc/metric-recorder.h"
|
||||
@ -60,6 +59,7 @@ class Platform;
|
||||
namespace internal {
|
||||
|
||||
class FatalOutOfMemoryHandler;
|
||||
class GarbageCollector;
|
||||
class PageBackend;
|
||||
class PreFinalizerHandler;
|
||||
class StatsCollector;
|
||||
@ -83,7 +83,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
|
||||
HeapBase(std::shared_ptr<cppgc::Platform> platform,
|
||||
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
|
||||
StackSupport stack_support, MarkingType marking_support,
|
||||
SweepingType sweeping_support);
|
||||
SweepingType sweeping_support, GarbageCollector& garbage_collector);
|
||||
virtual ~HeapBase();
|
||||
|
||||
HeapBase(const HeapBase&) = delete;
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "src/heap/cppgc/heap-page.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cstddef>
|
||||
|
||||
#include "include/cppgc/internal/api-constants.h"
|
||||
#include "src/base/logging.h"
|
||||
@ -132,9 +133,11 @@ BasePage::BasePage(HeapBase& heap, BaseSpace& space, PageType type)
|
||||
}
|
||||
|
||||
// static
|
||||
NormalPage* NormalPage::Create(PageBackend& page_backend,
|
||||
NormalPageSpace& space) {
|
||||
void* memory = page_backend.AllocateNormalPageMemory();
|
||||
NormalPage* NormalPage::TryCreate(PageBackend& page_backend,
|
||||
NormalPageSpace& space) {
|
||||
void* memory = page_backend.TryAllocateNormalPageMemory();
|
||||
if (!memory) return nullptr;
|
||||
|
||||
auto* normal_page = new (memory) NormalPage(*space.raw_heap()->heap(), space);
|
||||
normal_page->SynchronizedStore();
|
||||
normal_page->heap().stats_collector()->NotifyAllocatedMemory(kPageSize);
|
||||
@ -226,8 +229,8 @@ size_t LargePage::AllocationSize(size_t payload_size) {
|
||||
}
|
||||
|
||||
// static
|
||||
LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
|
||||
size_t size) {
|
||||
LargePage* LargePage::TryCreate(PageBackend& page_backend,
|
||||
LargePageSpace& space, size_t size) {
|
||||
// Ensure that the API-provided alignment guarantees does not violate the
|
||||
// internally guaranteed alignment of large page allocations.
|
||||
static_assert(kGuaranteedObjectAlignment <=
|
||||
@ -239,7 +242,9 @@ LargePage* LargePage::Create(PageBackend& page_backend, LargePageSpace& space,
|
||||
const size_t allocation_size = AllocationSize(size);
|
||||
|
||||
auto* heap = space.raw_heap()->heap();
|
||||
void* memory = page_backend.AllocateLargePageMemory(allocation_size);
|
||||
void* memory = page_backend.TryAllocateLargePageMemory(allocation_size);
|
||||
if (!memory) return nullptr;
|
||||
|
||||
LargePage* page = new (memory) LargePage(*heap, space, size);
|
||||
page->SynchronizedStore();
|
||||
#if defined(CPPGC_CAGED_HEAP)
|
||||
|
@ -151,7 +151,7 @@ class V8_EXPORT_PRIVATE NormalPage final : public BasePage {
|
||||
using const_iterator = IteratorImpl<const HeapObjectHeader>;
|
||||
|
||||
// Allocates a new page in the detached state.
|
||||
static NormalPage* Create(PageBackend&, NormalPageSpace&);
|
||||
static NormalPage* TryCreate(PageBackend&, NormalPageSpace&);
|
||||
// Destroys and frees the page. The page must be detached from the
|
||||
// corresponding space (i.e. be swept when called).
|
||||
static void Destroy(NormalPage*);
|
||||
@ -221,7 +221,7 @@ class V8_EXPORT_PRIVATE LargePage final : public BasePage {
|
||||
// Returns the allocation size required for a payload of size |size|.
|
||||
static size_t AllocationSize(size_t size);
|
||||
// Allocates a new page in the detached state.
|
||||
static LargePage* Create(PageBackend&, LargePageSpace&, size_t);
|
||||
static LargePage* TryCreate(PageBackend&, LargePageSpace&, size_t);
|
||||
// Destroys and frees the page. The page must be detached from the
|
||||
// corresponding space (i.e. be swept when called).
|
||||
static void Destroy(LargePage*);
|
||||
|
@ -79,7 +79,7 @@ void CheckConfig(Heap::Config config, HeapBase::MarkingType marking_support,
|
||||
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
|
||||
cppgc::Heap::HeapOptions options)
|
||||
: HeapBase(platform, options.custom_spaces, options.stack_support,
|
||||
options.marking_support, options.sweeping_support),
|
||||
options.marking_support, options.sweeping_support, gc_invoker_),
|
||||
gc_invoker_(this, platform_.get(), options.stack_support),
|
||||
growing_(&gc_invoker_, stats_collector_.get(),
|
||||
options.resource_constraints, options.marking_support,
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include "src/heap/cppgc/memory.h"
|
||||
#include "src/heap/cppgc/object-start-bitmap.h"
|
||||
#include "src/heap/cppgc/page-memory.h"
|
||||
#include "src/heap/cppgc/platform.h"
|
||||
#include "src/heap/cppgc/prefinalizer-handler.h"
|
||||
#include "src/heap/cppgc/stats-collector.h"
|
||||
#include "src/heap/cppgc/sweeper.h"
|
||||
@ -79,10 +80,12 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
|
||||
}
|
||||
}
|
||||
|
||||
void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
|
||||
StatsCollector& stats_collector, size_t size,
|
||||
GCInfoIndex gcinfo) {
|
||||
LargePage* page = LargePage::Create(page_backend, space, size);
|
||||
void* TryAllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
|
||||
StatsCollector& stats_collector, size_t size,
|
||||
GCInfoIndex gcinfo) {
|
||||
LargePage* page = LargePage::TryCreate(page_backend, space, size);
|
||||
if (!page) return nullptr;
|
||||
|
||||
space.AddPage(page);
|
||||
|
||||
auto* header = new (page->ObjectHeader())
|
||||
@ -100,11 +103,15 @@ constexpr size_t ObjectAllocator::kSmallestSpaceSize;
|
||||
|
||||
ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
|
||||
StatsCollector& stats_collector,
|
||||
PreFinalizerHandler& prefinalizer_handler)
|
||||
PreFinalizerHandler& prefinalizer_handler,
|
||||
FatalOutOfMemoryHandler& oom_handler,
|
||||
GarbageCollector& garbage_collector)
|
||||
: raw_heap_(heap),
|
||||
page_backend_(page_backend),
|
||||
stats_collector_(stats_collector),
|
||||
prefinalizer_handler_(prefinalizer_handler) {}
|
||||
prefinalizer_handler_(prefinalizer_handler),
|
||||
oom_handler_(oom_handler),
|
||||
garbage_collector_(garbage_collector) {}
|
||||
|
||||
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
|
||||
AlignVal alignment,
|
||||
@ -138,8 +145,20 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
|
||||
*raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
|
||||
// LargePage has a natural alignment that already satisfies
|
||||
// `kMaxSupportedAlignment`.
|
||||
return AllocateLargeObject(page_backend_, large_space, stats_collector_,
|
||||
size, gcinfo);
|
||||
void* result = TryAllocateLargeObject(page_backend_, large_space,
|
||||
stats_collector_, size, gcinfo);
|
||||
if (!result) {
|
||||
auto config = GarbageCollector::Config::ConservativeAtomicConfig();
|
||||
config.free_memory_handling =
|
||||
GarbageCollector::Config::FreeMemoryHandling::kDiscardWherePossible;
|
||||
garbage_collector_.CollectGarbage(config);
|
||||
result = TryAllocateLargeObject(page_backend_, large_space,
|
||||
stats_collector_, size, gcinfo);
|
||||
if (!result) {
|
||||
oom_handler_("Oilpan: Large allocation.");
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
size_t request_size = size;
|
||||
@ -150,7 +169,15 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
|
||||
request_size += kAllocationGranularity;
|
||||
}
|
||||
|
||||
RefillLinearAllocationBuffer(space, request_size);
|
||||
if (!TryRefillLinearAllocationBuffer(space, request_size)) {
|
||||
auto config = GarbageCollector::Config::ConservativeAtomicConfig();
|
||||
config.free_memory_handling =
|
||||
GarbageCollector::Config::FreeMemoryHandling::kDiscardWherePossible;
|
||||
garbage_collector_.CollectGarbage(config);
|
||||
if (!TryRefillLinearAllocationBuffer(space, request_size)) {
|
||||
oom_handler_("Oilpan: Normal allocation.");
|
||||
}
|
||||
}
|
||||
|
||||
// The allocation must succeed, as we just refilled the LAB.
|
||||
void* result = (dynamic_alignment == kAllocationGranularity)
|
||||
@ -160,10 +187,10 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
|
||||
return result;
|
||||
}
|
||||
|
||||
void ObjectAllocator::RefillLinearAllocationBuffer(NormalPageSpace& space,
|
||||
size_t size) {
|
||||
bool ObjectAllocator::TryRefillLinearAllocationBuffer(NormalPageSpace& space,
|
||||
size_t size) {
|
||||
// Try to allocate from the freelist.
|
||||
if (RefillLinearAllocationBufferFromFreeList(space, size)) return;
|
||||
if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
|
||||
|
||||
// Lazily sweep pages of this heap until we find a freed area for this
|
||||
// allocation or we finish sweeping all pages of this heap.
|
||||
@ -179,22 +206,26 @@ void ObjectAllocator::RefillLinearAllocationBuffer(NormalPageSpace& space,
|
||||
// may only potentially fit the block. For the bucket that may exactly fit
|
||||
// the allocation of `size` bytes (no overallocation), only the first
|
||||
// entry is checked.
|
||||
if (RefillLinearAllocationBufferFromFreeList(space, size)) return;
|
||||
if (TryRefillLinearAllocationBufferFromFreeList(space, size)) return true;
|
||||
}
|
||||
|
||||
sweeper.FinishIfRunning();
|
||||
// TODO(chromium:1056170): Make use of the synchronously freed memory.
|
||||
|
||||
auto* new_page = NormalPage::Create(page_backend_, space);
|
||||
space.AddPage(new_page);
|
||||
auto* new_page = NormalPage::TryCreate(page_backend_, space);
|
||||
if (!new_page) {
|
||||
return false;
|
||||
}
|
||||
|
||||
space.AddPage(new_page);
|
||||
// Set linear allocation buffer to new page.
|
||||
ReplaceLinearAllocationBuffer(space, stats_collector_,
|
||||
new_page->PayloadStart(),
|
||||
new_page->PayloadSize());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ObjectAllocator::RefillLinearAllocationBufferFromFreeList(
|
||||
bool ObjectAllocator::TryRefillLinearAllocationBufferFromFreeList(
|
||||
NormalPageSpace& space, size_t size) {
|
||||
const FreeList::Block entry = space.free_list().Allocate(size);
|
||||
if (!entry.address) return false;
|
||||
|
@ -34,14 +34,14 @@ namespace internal {
|
||||
|
||||
class StatsCollector;
|
||||
class PageBackend;
|
||||
class GarbageCollector;
|
||||
|
||||
class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
|
||||
public:
|
||||
static constexpr size_t kSmallestSpaceSize = 32;
|
||||
|
||||
ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
|
||||
StatsCollector& stats_collector,
|
||||
PreFinalizerHandler& prefinalizer_handler);
|
||||
ObjectAllocator(RawHeap&, PageBackend&, StatsCollector&, PreFinalizerHandler&,
|
||||
FatalOutOfMemoryHandler&, GarbageCollector&);
|
||||
|
||||
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
|
||||
inline void* AllocateObject(size_t size, AlignVal alignment,
|
||||
@ -71,13 +71,15 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
|
||||
void* OutOfLineAllocate(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
|
||||
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, AlignVal, GCInfoIndex);
|
||||
|
||||
void RefillLinearAllocationBuffer(NormalPageSpace&, size_t);
|
||||
bool RefillLinearAllocationBufferFromFreeList(NormalPageSpace&, size_t);
|
||||
bool TryRefillLinearAllocationBuffer(NormalPageSpace&, size_t);
|
||||
bool TryRefillLinearAllocationBufferFromFreeList(NormalPageSpace&, size_t);
|
||||
|
||||
RawHeap& raw_heap_;
|
||||
PageBackend& page_backend_;
|
||||
StatsCollector& stats_collector_;
|
||||
PreFinalizerHandler& prefinalizer_handler_;
|
||||
FatalOutOfMemoryHandler& oom_handler_;
|
||||
GarbageCollector& garbage_collector_;
|
||||
};
|
||||
|
||||
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include "src/heap/cppgc/page-memory.h"
|
||||
|
||||
#include <cstddef>
|
||||
|
||||
#include "src/base/macros.h"
|
||||
#include "src/base/sanitizer/asan.h"
|
||||
#include "src/heap/cppgc/platform.h"
|
||||
@ -13,50 +15,40 @@ namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
void Unprotect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
|
||||
const PageMemory& page_memory) {
|
||||
V8_WARN_UNUSED_RESULT bool TryUnprotect(PageAllocator& allocator,
|
||||
const PageMemory& page_memory) {
|
||||
if (SupportsCommittingGuardPages(allocator)) {
|
||||
if (!allocator.SetPermissions(page_memory.writeable_region().base(),
|
||||
page_memory.writeable_region().size(),
|
||||
PageAllocator::Permission::kReadWrite)) {
|
||||
oom_handler("Oilpan: Unprotecting memory.");
|
||||
}
|
||||
} else {
|
||||
// No protection in case the allocator cannot commit at the required
|
||||
// granularity. Only protect if the allocator supports committing at that
|
||||
// granularity.
|
||||
//
|
||||
// The allocator needs to support committing the overall range.
|
||||
CHECK_EQ(0u,
|
||||
page_memory.overall_region().size() % allocator.CommitPageSize());
|
||||
if (!allocator.SetPermissions(page_memory.overall_region().base(),
|
||||
page_memory.overall_region().size(),
|
||||
PageAllocator::Permission::kReadWrite)) {
|
||||
oom_handler("Oilpan: Unprotecting memory.");
|
||||
}
|
||||
return allocator.SetPermissions(page_memory.writeable_region().base(),
|
||||
page_memory.writeable_region().size(),
|
||||
PageAllocator::Permission::kReadWrite);
|
||||
}
|
||||
// No protection using guard pages in case the allocator cannot commit at
|
||||
// the required granularity. Only protect if the allocator supports
|
||||
// committing at that granularity.
|
||||
//
|
||||
// The allocator needs to support committing the overall range.
|
||||
CHECK_EQ(0u,
|
||||
page_memory.overall_region().size() % allocator.CommitPageSize());
|
||||
return allocator.SetPermissions(page_memory.overall_region().base(),
|
||||
page_memory.overall_region().size(),
|
||||
PageAllocator::Permission::kReadWrite);
|
||||
}
|
||||
|
||||
void Protect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
|
||||
const PageMemory& page_memory) {
|
||||
V8_WARN_UNUSED_RESULT bool TryProtect(PageAllocator& allocator,
|
||||
const PageMemory& page_memory) {
|
||||
if (SupportsCommittingGuardPages(allocator)) {
|
||||
// Swap the same region, providing the OS with a chance for fast lookup and
|
||||
// change.
|
||||
if (!allocator.SetPermissions(page_memory.writeable_region().base(),
|
||||
page_memory.writeable_region().size(),
|
||||
PageAllocator::Permission::kNoAccess)) {
|
||||
oom_handler("Oilpan: Protecting memory.");
|
||||
}
|
||||
} else {
|
||||
// See Unprotect().
|
||||
CHECK_EQ(0u,
|
||||
page_memory.overall_region().size() % allocator.CommitPageSize());
|
||||
if (!allocator.SetPermissions(page_memory.overall_region().base(),
|
||||
page_memory.overall_region().size(),
|
||||
PageAllocator::Permission::kNoAccess)) {
|
||||
oom_handler("Oilpan: Protecting memory.");
|
||||
}
|
||||
return allocator.SetPermissions(page_memory.writeable_region().base(),
|
||||
page_memory.writeable_region().size(),
|
||||
PageAllocator::Permission::kNoAccess);
|
||||
}
|
||||
// See Unprotect().
|
||||
CHECK_EQ(0u,
|
||||
page_memory.overall_region().size() % allocator.CommitPageSize());
|
||||
return allocator.SetPermissions(page_memory.overall_region().base(),
|
||||
page_memory.overall_region().size(),
|
||||
PageAllocator::Permission::kNoAccess);
|
||||
}
|
||||
|
||||
MemoryRegion ReserveMemoryRegion(PageAllocator& allocator,
|
||||
@ -84,10 +76,8 @@ void FreeMemoryRegion(PageAllocator& allocator,
|
||||
} // namespace
|
||||
|
||||
PageMemoryRegion::PageMemoryRegion(PageAllocator& allocator,
|
||||
FatalOutOfMemoryHandler& oom_handler,
|
||||
MemoryRegion reserved_region, bool is_large)
|
||||
: allocator_(allocator),
|
||||
oom_handler_(oom_handler),
|
||||
reserved_region_(reserved_region),
|
||||
is_large_(is_large) {}
|
||||
|
||||
@ -101,7 +91,7 @@ constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
|
||||
NormalPageMemoryRegion::NormalPageMemoryRegion(
|
||||
PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler)
|
||||
: PageMemoryRegion(
|
||||
allocator, oom_handler,
|
||||
allocator,
|
||||
ReserveMemoryRegion(allocator, oom_handler,
|
||||
RoundUp(kPageSize * kNumPageRegions,
|
||||
allocator.AllocatePageSize())),
|
||||
@ -115,21 +105,24 @@ NormalPageMemoryRegion::NormalPageMemoryRegion(
|
||||
|
||||
NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
|
||||
|
||||
void NormalPageMemoryRegion::Allocate(Address writeable_base) {
|
||||
bool NormalPageMemoryRegion::TryAllocate(Address writeable_base) {
|
||||
const size_t index = GetIndex(writeable_base);
|
||||
ChangeUsed(index, true);
|
||||
Unprotect(allocator_, oom_handler_, GetPageMemory(index));
|
||||
if (TryUnprotect(allocator_, GetPageMemory(index))) {
|
||||
ChangeUsed(index, true);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void NormalPageMemoryRegion::Free(Address writeable_base) {
|
||||
const size_t index = GetIndex(writeable_base);
|
||||
ChangeUsed(index, false);
|
||||
Protect(allocator_, oom_handler_, GetPageMemory(index));
|
||||
CHECK(TryProtect(allocator_, GetPageMemory(index)));
|
||||
}
|
||||
|
||||
void NormalPageMemoryRegion::UnprotectForTesting() {
|
||||
for (size_t i = 0; i < kNumPageRegions; ++i) {
|
||||
Unprotect(allocator_, oom_handler_, GetPageMemory(i));
|
||||
CHECK(TryUnprotect(allocator_, GetPageMemory(i)));
|
||||
}
|
||||
}
|
||||
|
||||
@ -137,7 +130,7 @@ LargePageMemoryRegion::LargePageMemoryRegion(
|
||||
PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
|
||||
size_t length)
|
||||
: PageMemoryRegion(
|
||||
allocator, oom_handler,
|
||||
allocator,
|
||||
ReserveMemoryRegion(allocator, oom_handler,
|
||||
RoundUp(length + 2 * kGuardPageSize,
|
||||
allocator.AllocatePageSize())),
|
||||
@ -146,7 +139,7 @@ LargePageMemoryRegion::LargePageMemoryRegion(
|
||||
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
|
||||
|
||||
void LargePageMemoryRegion::UnprotectForTesting() {
|
||||
Unprotect(allocator_, oom_handler_, GetPageMemory());
|
||||
CHECK(TryUnprotect(allocator_, GetPageMemory()));
|
||||
}
|
||||
|
||||
PageMemoryRegionTree::PageMemoryRegionTree() = default;
|
||||
@ -192,7 +185,7 @@ PageBackend::PageBackend(PageAllocator& normal_page_allocator,
|
||||
|
||||
PageBackend::~PageBackend() = default;
|
||||
|
||||
Address PageBackend::AllocateNormalPageMemory() {
|
||||
Address PageBackend::TryAllocateNormalPageMemory() {
|
||||
v8::base::MutexGuard guard(&mutex_);
|
||||
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take();
|
||||
if (!result.first) {
|
||||
@ -207,8 +200,11 @@ Address PageBackend::AllocateNormalPageMemory() {
|
||||
result = page_pool_.Take();
|
||||
DCHECK(result.first);
|
||||
}
|
||||
result.first->Allocate(result.second);
|
||||
return result.second;
|
||||
if (V8_LIKELY(result.first->TryAllocate(result.second))) {
|
||||
return result.second;
|
||||
}
|
||||
page_pool_.Add(result.first, result.second);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
|
||||
@ -219,15 +215,18 @@ void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
|
||||
page_pool_.Add(pmr, writeable_base);
|
||||
}
|
||||
|
||||
Address PageBackend::AllocateLargePageMemory(size_t size) {
|
||||
Address PageBackend::TryAllocateLargePageMemory(size_t size) {
|
||||
v8::base::MutexGuard guard(&mutex_);
|
||||
auto pmr = std::make_unique<LargePageMemoryRegion>(large_page_allocator_,
|
||||
oom_handler_, size);
|
||||
const PageMemory pm = pmr->GetPageMemory();
|
||||
Unprotect(large_page_allocator_, oom_handler_, pm);
|
||||
page_memory_region_tree_.Add(pmr.get());
|
||||
large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
|
||||
return pm.writeable_region().base();
|
||||
if (V8_LIKELY(TryUnprotect(large_page_allocator_, pm))) {
|
||||
page_memory_region_tree_.Add(pmr.get());
|
||||
large_page_memory_regions_.insert(
|
||||
std::make_pair(pmr.get(), std::move(pmr)));
|
||||
return pm.writeable_region().base();
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void PageBackend::FreeLargePageMemory(Address writeable_base) {
|
||||
|
@ -82,11 +82,9 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
|
||||
virtual void UnprotectForTesting() = 0;
|
||||
|
||||
protected:
|
||||
PageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, MemoryRegion,
|
||||
bool);
|
||||
PageMemoryRegion(PageAllocator&, MemoryRegion, bool);
|
||||
|
||||
PageAllocator& allocator_;
|
||||
FatalOutOfMemoryHandler& oom_handler_;
|
||||
const MemoryRegion reserved_region_;
|
||||
const bool is_large_;
|
||||
};
|
||||
@ -110,7 +108,8 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
|
||||
|
||||
// Allocates a normal page at |writeable_base| address. Changes page
|
||||
// protection.
|
||||
void Allocate(Address writeable_base);
|
||||
// Returns true when the allocation was successful and false otherwise.
|
||||
V8_WARN_UNUSED_RESULT bool TryAllocate(Address writeable_base);
|
||||
|
||||
// Frees a normal page at at |writeable_base| address. Changes page
|
||||
// protection.
|
||||
@ -203,7 +202,7 @@ class V8_EXPORT_PRIVATE PageBackend final {
|
||||
// Allocates a normal page from the backend.
|
||||
//
|
||||
// Returns the writeable base of the region.
|
||||
Address AllocateNormalPageMemory();
|
||||
Address TryAllocateNormalPageMemory();
|
||||
|
||||
// Returns normal page memory back to the backend. Expects the
|
||||
// |writeable_base| returned by |AllocateNormalMemory()|.
|
||||
@ -212,7 +211,7 @@ class V8_EXPORT_PRIVATE PageBackend final {
|
||||
// Allocates a large page from the backend.
|
||||
//
|
||||
// Returns the writeable base of the region.
|
||||
Address AllocateLargePageMemory(size_t size);
|
||||
Address TryAllocateLargePageMemory(size_t size);
|
||||
|
||||
// Returns large page memory back to the backend. Expects the |writeable_base|
|
||||
// returned by |AllocateLargePageMemory()|.
|
||||
|
@ -4284,8 +4284,9 @@ const char* Heap::GarbageCollectionReasonToString(
|
||||
return "background allocation failure";
|
||||
case GarbageCollectionReason::kFinalizeMinorMC:
|
||||
return "finalize MinorMC";
|
||||
case GarbageCollectionReason::kCppHeapAllocationFailure:
|
||||
return "CppHeap allocation failure";
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
bool Heap::Contains(HeapObject value) const {
|
||||
|
@ -178,8 +178,9 @@ enum class GarbageCollectionReason : int {
|
||||
kMeasureMemory = 24,
|
||||
kBackgroundAllocationFailure = 25,
|
||||
kFinalizeMinorMC = 26,
|
||||
kCppHeapAllocationFailure = 27,
|
||||
|
||||
kLastReason = kBackgroundAllocationFailure,
|
||||
kLastReason = kCppHeapAllocationFailure,
|
||||
};
|
||||
|
||||
static_assert(kGarbageCollectionReasonMaxValue ==
|
||||
|
@ -188,7 +188,8 @@ TEST_F(PageTest, NormalPageCreationDestruction) {
|
||||
const PageBackend* backend = Heap::From(GetHeap())->page_backend();
|
||||
auto* space = static_cast<NormalPageSpace*>(
|
||||
heap.Space(RawHeap::RegularSpaceType::kNormal1));
|
||||
auto* page = NormalPage::Create(GetPageBackend(), *space);
|
||||
auto* page = NormalPage::TryCreate(GetPageBackend(), *space);
|
||||
EXPECT_NE(nullptr, page);
|
||||
EXPECT_NE(nullptr, backend->Lookup(page->PayloadStart()));
|
||||
|
||||
space->AddPage(page);
|
||||
@ -213,7 +214,8 @@ TEST_F(PageTest, LargePageCreationDestruction) {
|
||||
const PageBackend* backend = Heap::From(GetHeap())->page_backend();
|
||||
auto* space = static_cast<LargePageSpace*>(
|
||||
heap.Space(RawHeap::RegularSpaceType::kLarge));
|
||||
auto* page = LargePage::Create(GetPageBackend(), *space, kObjectSize);
|
||||
auto* page = LargePage::TryCreate(GetPageBackend(), *space, kObjectSize);
|
||||
EXPECT_NE(nullptr, page);
|
||||
EXPECT_NE(nullptr, backend->Lookup(page->PayloadStart()));
|
||||
|
||||
space->AddPage(page);
|
||||
@ -231,15 +233,17 @@ TEST_F(PageTest, UnsweptPageDestruction) {
|
||||
{
|
||||
auto* space = static_cast<NormalPageSpace*>(
|
||||
heap.Space(RawHeap::RegularSpaceType::kNormal1));
|
||||
auto* page = NormalPage::Create(GetPageBackend(), *space);
|
||||
auto* page = NormalPage::TryCreate(GetPageBackend(), *space);
|
||||
EXPECT_NE(nullptr, page);
|
||||
space->AddPage(page);
|
||||
EXPECT_DEATH_IF_SUPPORTED(NormalPage::Destroy(page), "");
|
||||
}
|
||||
{
|
||||
auto* space = static_cast<LargePageSpace*>(
|
||||
heap.Space(RawHeap::RegularSpaceType::kLarge));
|
||||
auto* page = LargePage::Create(GetPageBackend(), *space,
|
||||
2 * kLargeObjectSizeThreshold);
|
||||
auto* page = LargePage::TryCreate(GetPageBackend(), *space,
|
||||
2 * kLargeObjectSizeThreshold);
|
||||
EXPECT_NE(nullptr, page);
|
||||
space->AddPage(page);
|
||||
EXPECT_DEATH_IF_SUPPORTED(LargePage::Destroy(page), "");
|
||||
// Detach page and really destroy page in the parent process so that sweeper
|
||||
|
@ -253,10 +253,10 @@ TEST(PageBackendTest, AllocateNormalUsesPool) {
|
||||
FatalOutOfMemoryHandler oom_handler;
|
||||
PageBackend backend(allocator, allocator, oom_handler);
|
||||
constexpr size_t kBucket = 0;
|
||||
Address writeable_base1 = backend.AllocateNormalPageMemory();
|
||||
Address writeable_base1 = backend.TryAllocateNormalPageMemory();
|
||||
EXPECT_NE(nullptr, writeable_base1);
|
||||
backend.FreeNormalPageMemory(kBucket, writeable_base1);
|
||||
Address writeable_base2 = backend.AllocateNormalPageMemory();
|
||||
Address writeable_base2 = backend.TryAllocateNormalPageMemory();
|
||||
EXPECT_NE(nullptr, writeable_base2);
|
||||
EXPECT_EQ(writeable_base1, writeable_base2);
|
||||
}
|
||||
@ -265,9 +265,9 @@ TEST(PageBackendTest, AllocateLarge) {
|
||||
v8::base::PageAllocator allocator;
|
||||
FatalOutOfMemoryHandler oom_handler;
|
||||
PageBackend backend(allocator, allocator, oom_handler);
|
||||
Address writeable_base1 = backend.AllocateLargePageMemory(13731);
|
||||
Address writeable_base1 = backend.TryAllocateLargePageMemory(13731);
|
||||
EXPECT_NE(nullptr, writeable_base1);
|
||||
Address writeable_base2 = backend.AllocateLargePageMemory(9478);
|
||||
Address writeable_base2 = backend.TryAllocateLargePageMemory(9478);
|
||||
EXPECT_NE(nullptr, writeable_base2);
|
||||
EXPECT_NE(writeable_base1, writeable_base2);
|
||||
backend.FreeLargePageMemory(writeable_base1);
|
||||
@ -278,7 +278,7 @@ TEST(PageBackendTest, LookupNormal) {
|
||||
v8::base::PageAllocator allocator;
|
||||
FatalOutOfMemoryHandler oom_handler;
|
||||
PageBackend backend(allocator, allocator, oom_handler);
|
||||
Address writeable_base = backend.AllocateNormalPageMemory();
|
||||
Address writeable_base = backend.TryAllocateNormalPageMemory();
|
||||
if (kGuardPageSize) {
|
||||
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
|
||||
}
|
||||
@ -299,7 +299,7 @@ TEST(PageBackendTest, LookupLarge) {
|
||||
FatalOutOfMemoryHandler oom_handler;
|
||||
PageBackend backend(allocator, allocator, oom_handler);
|
||||
constexpr size_t kSize = 7934;
|
||||
Address writeable_base = backend.AllocateLargePageMemory(kSize);
|
||||
Address writeable_base = backend.TryAllocateLargePageMemory(kSize);
|
||||
if (kGuardPageSize) {
|
||||
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
|
||||
}
|
||||
@ -314,7 +314,7 @@ TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
|
||||
Address base;
|
||||
{
|
||||
PageBackend backend(allocator, allocator, oom_handler);
|
||||
base = backend.AllocateNormalPageMemory();
|
||||
base = backend.TryAllocateNormalPageMemory();
|
||||
}
|
||||
EXPECT_DEATH_IF_SUPPORTED(access(base[0]), "");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user