[heap] Add page evacuation mode for new->old

In a full mark-compact GC, instead of copying memory to old space for
pages that have more than X% live bytes, we just move the whole page over to old
space.

X=70 (default value)

BUG=chromium:581412
LOG=N

Review URL: https://codereview.chromium.org/1863983002

Cr-Commit-Position: refs/heads/master@{#35610}
This commit is contained in:
mlippautz 2016-04-19 03:07:46 -07:00 committed by Commit bot
parent d784c2d1ea
commit 0d7e23a6ed
9 changed files with 251 additions and 70 deletions

View File

@ -253,6 +253,9 @@ DEFINE_BOOL(compiled_keyed_generic_loads, false,
"use optimizing compiler to generate keyed generic load stubs")
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
DEFINE_BOOL(page_promotion, true, "promote pages based on utilization")
DEFINE_INT(page_promotion_threshold, 70,
"min percentage of live bytes on a page to enable fast evacuation")
DEFINE_BOOL(trace_pretenuring, false,
"trace pretenuring decisions of HAllocate instructions")
DEFINE_BOOL(trace_pretenuring_statistics, false,

View File

@ -6217,7 +6217,6 @@ void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
}
void Heap::ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
@ -6252,7 +6251,6 @@ void Heap::ExternalStringTable::CleanUp() {
#endif
}
void Heap::ExternalStringTable::TearDown() {
for (int i = 0; i < new_space_strings_.length(); ++i) {
heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));

View File

@ -1666,7 +1666,7 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
semispace_copied_size_(0),
local_pretenuring_feedback_(local_pretenuring_feedback) {}
bool Visit(HeapObject* object) override {
inline bool Visit(HeapObject* object) override {
heap_->UpdateAllocationSite<Heap::kCached>(object,
local_pretenuring_feedback_);
int size = object->Size();
@ -1798,6 +1798,33 @@ class MarkCompactCollector::EvacuateNewSpaceVisitor final
HashMap* local_pretenuring_feedback_;
};
class MarkCompactCollector::EvacuateNewSpacePageVisitor final
: public MarkCompactCollector::HeapObjectVisitor {
public:
EvacuateNewSpacePageVisitor() : promoted_size_(0) {}
static void MoveToOldSpace(NewSpacePage* page, PagedSpace* owner) {
page->heap()->new_space()->ReplaceWithEmptyPage(page);
Page* new_page = Page::Convert(page, owner);
new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
}
inline bool Visit(HeapObject* object) {
if (V8_UNLIKELY(object->IsJSArrayBuffer())) {
object->GetHeap()->array_buffer_tracker()->Promote(
JSArrayBuffer::cast(object));
}
RecordMigratedSlotVisitor visitor;
object->IterateBodyFast(&visitor);
promoted_size_ += object->Size();
return true;
}
intptr_t promoted_size() { return promoted_size_; }
private:
intptr_t promoted_size_;
};
class MarkCompactCollector::EvacuateOldSpaceVisitor final
: public MarkCompactCollector::EvacuateVisitorBase {
@ -1806,7 +1833,7 @@ class MarkCompactCollector::EvacuateOldSpaceVisitor final
CompactionSpaceCollection* compaction_spaces)
: EvacuateVisitorBase(heap, compaction_spaces) {}
bool Visit(HeapObject* object) override {
inline bool Visit(HeapObject* object) override {
CompactionSpace* target_space = compaction_spaces_->Get(
Page::FromAddress(object->address())->owner()->identity());
HeapObject* target_object = nullptr;
@ -3021,9 +3048,17 @@ void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
newspace_evacuation_candidates_.Rewind(0);
}
class MarkCompactCollector::Evacuator : public Malloced {
public:
// NewSpacePages with more live bytes than this threshold qualify for fast
// evacuation.
static int PageEvacuationThreshold() {
if (FLAG_page_promotion)
return FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory /
100;
return NewSpacePage::kAllocatableMemory + kPointerSize;
}
explicit Evacuator(MarkCompactCollector* collector)
: collector_(collector),
compaction_spaces_(collector->heap()),
@ -3031,6 +3066,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
kInitialLocalPretenuringFeedbackCapacity),
new_space_visitor_(collector->heap(), &compaction_spaces_,
&local_pretenuring_feedback_),
new_space_page_visitor(),
old_space_visitor_(collector->heap(), &compaction_spaces_),
duration_(0.0),
bytes_compacted_(0) {}
@ -3044,17 +3080,32 @@ class MarkCompactCollector::Evacuator : public Malloced {
CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
private:
enum EvacuationMode {
kObjectsNewToOld,
kPageNewToOld,
kObjectsOldToOld,
};
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
Heap* heap() { return collector_->heap(); }
inline Heap* heap() { return collector_->heap(); }
inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
// Note: The order of checks is important in this function.
if (chunk->InNewSpace()) return kObjectsNewToOld;
if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
return kPageNewToOld;
DCHECK(chunk->IsEvacuationCandidate());
return kObjectsOldToOld;
}
void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
duration_ += duration;
bytes_compacted_ += bytes_compacted;
}
template <IterationMode mode>
inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
template <IterationMode mode, class Visitor>
inline bool EvacuateSinglePage(MemoryChunk* p, Visitor* visitor);
MarkCompactCollector* collector_;
@ -3064,6 +3115,7 @@ class MarkCompactCollector::Evacuator : public Malloced {
// Visitors for the corresponding spaces.
EvacuateNewSpaceVisitor new_space_visitor_;
EvacuateNewSpacePageVisitor new_space_page_visitor;
EvacuateOldSpaceVisitor old_space_visitor_;
// Book keeping info.
@ -3071,17 +3123,18 @@ class MarkCompactCollector::Evacuator : public Malloced {
intptr_t bytes_compacted_;
};
template <MarkCompactCollector::IterationMode mode>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
MemoryChunk* p, HeapObjectVisitor* visitor) {
template <MarkCompactCollector::IterationMode mode, class Visitor>
bool MarkCompactCollector::Evacuator::EvacuateSinglePage(MemoryChunk* p,
Visitor* visitor) {
bool success = false;
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
DCHECK(p->IsEvacuationCandidate() || p->InNewSpace() ||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
int saved_live_bytes = p->LiveBytes();
double evacuation_time;
{
AlwaysAllocateScope always_allocate(heap()->isolate());
TimedScope timed_scope(&evacuation_time);
success = collector_->VisitLiveObjects(p, visitor, mode);
success = collector_->VisitLiveObjects<Visitor>(p, visitor, mode);
}
if (FLAG_trace_evacuation) {
const char age_mark_tag =
@ -3093,8 +3146,9 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
: '#';
PrintIsolate(heap()->isolate(),
"evacuation[%p]: page=%p new_space=%d age_mark_tag=%c "
"executable=%d live_bytes=%d time=%f\n",
"page_evacuation=%d executable=%d live_bytes=%d time=%f\n",
this, p, p->InNewSpace(), age_mark_tag,
p->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION),
p->IsFlagSet(MemoryChunk::IS_EXECUTABLE), saved_live_bytes,
evacuation_time);
}
@ -3105,30 +3159,38 @@ bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
}
bool MarkCompactCollector::Evacuator::EvacuatePage(MemoryChunk* chunk) {
bool success = false;
if (chunk->InNewSpace()) {
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
NewSpacePage::kSweepingDone);
success = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
DCHECK(success);
USE(success);
} else {
DCHECK(chunk->IsEvacuationCandidate());
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(), Page::kSweepingDone);
success = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
if (!success) {
// Aborted compaction page. We can record slots here to have them
// processed in parallel later on.
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
success = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
DCHECK(success);
USE(success);
// We need to return failure here to indicate that we want this page added
// to the sweeper.
return false;
}
bool result = false;
DCHECK_EQ(chunk->concurrent_sweeping_state().Value(),
NewSpacePage::kSweepingDone);
switch (ComputeEvacuationMode(chunk)) {
case kObjectsNewToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &new_space_visitor_);
DCHECK(result);
USE(result);
break;
case kPageNewToOld:
result = EvacuateSinglePage<kKeepMarking>(chunk, &new_space_page_visitor);
DCHECK(result);
USE(result);
break;
case kObjectsOldToOld:
result = EvacuateSinglePage<kClearMarkbits>(chunk, &old_space_visitor_);
if (!result) {
// Aborted compaction page. We can record slots here to have them
// processed in parallel later on.
EvacuateRecordOnlyVisitor record_visitor(chunk->owner()->identity());
result = EvacuateSinglePage<kKeepMarking>(chunk, &record_visitor);
DCHECK(result);
USE(result);
// We need to return failure here to indicate that we want this page
// added to the sweeper.
return false;
}
break;
default:
UNREACHABLE();
}
return success;
return result;
}
void MarkCompactCollector::Evacuator::Finalize() {
@ -3136,12 +3198,14 @@ void MarkCompactCollector::Evacuator::Finalize() {
heap()->code_space()->MergeCompactionSpace(
compaction_spaces_.Get(CODE_SPACE));
heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
new_space_page_visitor.promoted_size());
heap()->IncrementSemiSpaceCopiedObjectSize(
new_space_visitor_.semispace_copied_size());
heap()->IncrementYoungSurvivorsCounter(
new_space_visitor_.promoted_size() +
new_space_visitor_.semispace_copied_size());
new_space_visitor_.semispace_copied_size() +
new_space_page_visitor.promoted_size());
heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
}
@ -3191,6 +3255,14 @@ class EvacuationJobTraits {
bool success, PerPageData data) {
if (chunk->InNewSpace()) {
DCHECK(success);
} else if (chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
DCHECK(success);
Page* p = static_cast<Page*>(chunk);
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
p->ForAllFreeListCategories(
[](FreeListCategory* category) { DCHECK(!category->is_linked()); });
heap->mark_compact_collector()->sweeper().AddLatePage(
p->owner()->identity(), p);
} else {
Page* p = static_cast<Page*>(chunk);
if (success) {
@ -3220,8 +3292,15 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
live_bytes += page->LiveBytes();
job.AddPage(page, &abandoned_pages);
}
const Address age_mark = heap()->new_space()->age_mark();
for (NewSpacePage* page : newspace_evacuation_candidates_) {
live_bytes += page->LiveBytes();
if (!page->NeverEvacuate() &&
(page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
!page->Contains(age_mark)) {
EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
}
job.AddPage(page, &abandoned_pages);
}
DCHECK_GE(job.NumberOfPages(), 1);
@ -3381,9 +3460,8 @@ static void VerifyAllBlackObjects(MemoryChunk* page) {
}
#endif // VERIFY_HEAP
bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page,
HeapObjectVisitor* visitor,
template <class Visitor>
bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode) {
#ifdef VERIFY_HEAP
VerifyAllBlackObjects(page);
@ -3544,12 +3622,8 @@ class PointerUpdateJobTraits {
static void UpdateOldToNewSlot(HeapObject** address, HeapObject* object) {
MapWord map_word = object->map_word();
// Since we only filter invalid slots in old space, the store buffer can
// still contain stale pointers in large object and in map spaces. Ignore
// these pointers here.
DCHECK(map_word.IsForwardingAddress() ||
!object->GetHeap()->old_space()->Contains(
reinterpret_cast<Address>(address)));
// There could still be stale pointers in large object space, map space,
// and old space for pages that have been promoted.
if (map_word.IsForwardingAddress()) {
// Update the corresponding slot.
*address = map_word.ToForwardingAddress();

View File

@ -628,14 +628,13 @@ class MarkCompactCollector {
void RegisterExternallyReferencedObject(Object** object);
private:
class EvacuateNewSpacePageVisitor;
class EvacuateNewSpaceVisitor;
class EvacuateOldSpaceVisitor;
class EvacuateRecordOnlyVisitor;
class EvacuateVisitorBase;
class HeapObjectVisitor;
typedef std::vector<Page*> SweepingList;
explicit MarkCompactCollector(Heap* heap);
bool WillBeDeoptimized(Code* code);
@ -828,7 +827,8 @@ class MarkCompactCollector {
// Iterates through all live objects on a page using marking information.
// Returns whether all objects have successfully been visited.
bool VisitLiveObjects(MemoryChunk* page, HeapObjectVisitor* visitor,
template <class Visitor>
bool VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
IterationMode mode);
void VisitLiveObjectsBody(Page* page, ObjectVisitor* visitor);

View File

@ -287,6 +287,7 @@ NewSpacePage* NewSpacePage::Initialize(Heap* heap, MemoryChunk* chunk,
// --------------------------------------------------------------------------
// PagedSpace
template <Page::InitializationMode mode>
Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
PagedSpace* owner) {
Page* page = reinterpret_cast<Page*>(chunk);
@ -299,11 +300,25 @@ Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
// Make sure that categories are initialized before freeing the area.
page->InitializeFreeListCategories();
owner->Free(page->area_start(), page->area_size());
// In the case we do not free the memory, we effectively account for the whole
// page as allocated memory that cannot be used for further allocations.
if (mode == kFreeMemory) {
owner->Free(page->area_start(), page->area_size());
}
return page;
}
Page* Page::Convert(NewSpacePage* old_page, PagedSpace* new_owner) {
old_page->set_owner(new_owner);
old_page->SetFlags(0, ~0);
new_owner->AccountCommitted(old_page->size());
Page* new_page = Page::Initialize<kDoNotFreeMemory>(
old_page->heap(), old_page, NOT_EXECUTABLE, new_owner);
new_page->InsertAfter(new_owner->anchor()->prev_page());
return new_page;
}
void Page::InitializeFreeListCategories() {
for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
categories_[i].Initialize(static_cast<FreeListCategoryType>(i));

View File

@ -1161,7 +1161,7 @@ bool PagedSpace::Expand() {
Page* p =
heap()->memory_allocator()->AllocatePage<Page>(size, this, executable());
if (p == NULL) return false;
if (p == nullptr) return false;
AccountCommitted(static_cast<intptr_t>(p->size()));
@ -1817,6 +1817,19 @@ void SemiSpace::Reset() {
current_page_ = anchor_.next_page();
}
void SemiSpace::ReplaceWithEmptyPage(NewSpacePage* old_page) {
NewSpacePage* new_page =
heap()->memory_allocator()->AllocatePage<NewSpacePage>(
NewSpacePage::kAllocatableMemory, this, executable());
Bitmap::Clear(new_page);
new_page->SetFlags(old_page->GetFlags(), NewSpacePage::kCopyAllFlags);
new_page->set_next_page(old_page->next_page());
new_page->set_prev_page(old_page->prev_page());
old_page->next_page()->set_prev_page(new_page);
old_page->prev_page()->set_next_page(new_page);
heap()->CreateFillerObjectAt(new_page->area_start(), new_page->area_size(),
ClearRecordedSlots::kNo);
}
void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
// We won't be swapping semispaces without data in them.

View File

@ -419,6 +419,10 @@ class MemoryChunk {
// to grey transition is performed in the value.
HAS_PROGRESS_BAR,
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also
// not updated.
@ -824,6 +828,8 @@ class MemoryChunk {
// Page* p = Page::FromAllocationTop(top);
class Page : public MemoryChunk {
public:
static inline Page* Convert(NewSpacePage* old_page, PagedSpace* new_owner);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[
// This only works if the object is in fact in a page. See also MemoryChunk::
@ -938,6 +944,9 @@ class Page : public MemoryChunk {
inline void ClearEvacuationCandidate();
private:
enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
template <InitializationMode mode = kFreeMemory>
static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
Executability executable, PagedSpace* owner);
@ -1041,11 +1050,6 @@ class Space : public Malloced {
}
}
#ifdef DEBUG
virtual void Print() = 0;
#endif
protected:
void AccountCommitted(intptr_t bytes) {
DCHECK_GE(bytes, 0);
committed_ += bytes;
@ -1060,6 +1064,11 @@ class Space : public Malloced {
DCHECK_GE(committed_, 0);
}
#ifdef DEBUG
virtual void Print() = 0;
#endif
protected:
v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
bool allocation_observers_paused_;
@ -2355,6 +2364,8 @@ class NewSpacePage : public MemoryChunk {
(1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
(1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
static const intptr_t kCopyAllFlags = ~0;
// Create a NewSpacePage object that is only used as anchor
// for the doubly-linked list of real pages.
explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
@ -2440,6 +2451,8 @@ class SemiSpace : public Space {
// Resets the space to using the first page.
void Reset();
void ReplaceWithEmptyPage(NewSpacePage* page);
// Age mark accessors.
Address age_mark() { return age_mark_; }
void set_age_mark(Address mark);
@ -2660,6 +2673,12 @@ class NewSpace : public Space {
inline size_t AllocatedSinceLastGC();
void ReplaceWithEmptyPage(NewSpacePage* page) {
// This method is called after flipping the semispace.
DCHECK(page->InFromSpace());
from_space_.ReplaceWithEmptyPage(page);
}
// Return the maximum capacity of a semispace.
int MaximumCapacity() {
DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());

View File

@ -3524,6 +3524,8 @@ TEST(ReleaseOverReservedPages) {
// Concurrent sweeping adds non determinism, depending on when memory is
// available for further reuse.
i::FLAG_concurrent_sweeping = false;
// Fast evacuation of pages may result in a different page count in old space.
i::FLAG_page_promotion = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
@ -6573,5 +6575,51 @@ HEAP_TEST(Regress589413) {
heap->CollectGarbage(OLD_SPACE);
}
UNINITIALIZED_TEST(PagePromotion) {
FLAG_page_promotion = true;
FLAG_page_promotion_threshold = 50; // %
i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
// We cannot optimize for size as we require a new space with more than one
// page.
i::FLAG_optimize_for_size = false;
// Set max_semi_space_size because it could've been initialized by an
// implication of optimize_for_size.
i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
v8::Isolate::CreateParams create_params;
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
{
v8::Isolate::Scope isolate_scope(isolate);
v8::HandleScope handle_scope(isolate);
v8::Context::New(isolate)->Enter();
Heap* heap = i_isolate->heap();
std::vector<Handle<FixedArray>> handles;
SimulateFullSpace(heap->new_space(), &handles);
heap->CollectGarbage(NEW_SPACE);
CHECK_GT(handles.size(), 0u);
// First object in handle should be on the first page.
Handle<FixedArray> first_object = handles.front();
NewSpacePage* first_page =
NewSpacePage::FromAddress(first_object->address());
// The age mark should not be on the first page.
CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
// To perform a sanity check on live bytes we need to mark the heap.
SimulateIncrementalMarking(heap, true);
// Sanity check that the page meets the requirements for promotion.
const int threshold_bytes =
FLAG_page_promotion_threshold * NewSpacePage::kAllocatableMemory / 100;
CHECK_GE(first_page->LiveBytes(), threshold_bytes);
// Actual checks: The page is in new space first, but is moved to old space
// during a full GC.
CHECK(heap->new_space()->ContainsSlow(first_page->address()));
CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
heap->CollectGarbage(OLD_SPACE);
CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
CHECK(heap->old_space()->ContainsSlow(first_page->address()));
}
}
} // namespace internal
} // namespace v8

View File

@ -63,37 +63,48 @@ static inline std::vector<Handle<FixedArray>> CreatePadding(
// Helper function that simulates a full new-space in the heap.
static inline bool FillUpOnePage(v8::internal::NewSpace* space) {
static inline bool FillUpOnePage(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
if (space_remaining == 0) return false;
CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
std::vector<Handle<FixedArray>> handles =
CreatePadding(space->heap(), space_remaining, i::NOT_TENURED);
if (out_handles != nullptr)
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
return true;
}
// Helper function that simulates a fill new-space in the heap.
static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
int extra_bytes) {
static inline void AllocateAllButNBytes(
v8::internal::NewSpace* space, int extra_bytes,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
space->DisableInlineAllocationSteps();
int space_remaining = static_cast<int>(*space->allocation_limit_address() -
*space->allocation_top_address());
CHECK(space_remaining >= extra_bytes);
int new_linear_size = space_remaining - extra_bytes;
if (new_linear_size == 0) return;
CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
std::vector<Handle<FixedArray>> handles =
CreatePadding(space->heap(), new_linear_size, i::NOT_TENURED);
if (out_handles != nullptr)
out_handles->insert(out_handles->end(), handles.begin(), handles.end());
}
static inline void FillCurrentPage(v8::internal::NewSpace* space) {
AllocateAllButNBytes(space, 0);
static inline void FillCurrentPage(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
AllocateAllButNBytes(space, 0, out_handles);
}
static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
FillCurrentPage(space);
while (FillUpOnePage(space)) {
static inline void SimulateFullSpace(
v8::internal::NewSpace* space,
std::vector<Handle<FixedArray>>* out_handles = nullptr) {
FillCurrentPage(space, out_handles);
while (FillUpOnePage(space, out_handles) || space->AddFreshPage()) {
}
}