[heap] Cleanup: Remove WAS_SWEPT flag.
- Completely rely on the concurrent sweeping state for SweepingCompleted() - Rename the state accordingly. CQ_EXTRA_TRYBOTS=tryserver.v8:v8_linux_arm64_gc_stress_dbg,v8_linux_gc_stress_dbg,v8_mac_gc_stress_dbg,v8_linux64_asan_rel,v8_linux64_tsan_rel,v8_mac64_asan_rel R=hpayer@chromium.org Review URL: https://codereview.chromium.org/1614953002 Cr-Commit-Position: refs/heads/master@{#33490}
This commit is contained in:
parent
9602f4b2d2
commit
5eff542054
@ -3100,7 +3100,7 @@ bool Heap::CanMoveObjectStart(HeapObject* object) {
|
||||
// (3) the page was already concurrently swept. This case is an optimization
|
||||
// for concurrent sweeping. The WasSwept predicate for concurrently swept
|
||||
// pages is set after sweeping all pages.
|
||||
return !InOldSpace(address) || page->WasSwept() || page->SweepingCompleted();
|
||||
return !InOldSpace(address) || page->SweepingDone();
|
||||
}
|
||||
|
||||
|
||||
|
@ -540,9 +540,9 @@ void MarkCompactCollector::StartSweeperThreads() {
|
||||
|
||||
void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
|
||||
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
|
||||
if (!page->SweepingCompleted()) {
|
||||
if (!page->SweepingDone()) {
|
||||
SweepInParallel(page, owner);
|
||||
if (!page->SweepingCompleted()) {
|
||||
if (!page->SweepingDone()) {
|
||||
// We were not able to sweep that page, i.e., a concurrent
|
||||
// sweeper thread currently owns this page. Wait for the sweeper
|
||||
// thread to be done with this page.
|
||||
@ -721,14 +721,14 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
|
||||
continue;
|
||||
}
|
||||
// Invariant: Evacuation candidates are just created when marking is
|
||||
// started. At the end of a GC all evacuation candidates are cleared and
|
||||
// their slot buffers are released.
|
||||
// started. This means that sweeping has finished. Furthermore, at the end
|
||||
// of a GC all evacuation candidates are cleared and their slot buffers are
|
||||
// released.
|
||||
CHECK(!p->IsEvacuationCandidate());
|
||||
CHECK(p->slots_buffer() == NULL);
|
||||
CHECK(p->slots_buffer() == nullptr);
|
||||
CHECK(p->SweepingDone());
|
||||
DCHECK(p->area_size() == area_size);
|
||||
int live_bytes =
|
||||
p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
|
||||
pages.push_back(std::make_pair(live_bytes, p));
|
||||
pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
|
||||
}
|
||||
|
||||
int candidate_count = 0;
|
||||
@ -3224,7 +3224,7 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
break;
|
||||
case MemoryChunk::kCompactingFinalize:
|
||||
DCHECK(p->IsEvacuationCandidate());
|
||||
p->SetWasSwept();
|
||||
DCHECK(p->SweepingDone());
|
||||
p->Unlink();
|
||||
break;
|
||||
case MemoryChunk::kCompactingDone:
|
||||
@ -3290,8 +3290,7 @@ void MarkCompactCollector::EvacuatePages(
|
||||
Page* p = evacuation_candidates_[i];
|
||||
DCHECK(p->IsEvacuationCandidate() ||
|
||||
p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
|
||||
DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
|
||||
MemoryChunk::kSweepingDone);
|
||||
DCHECK(p->SweepingDone());
|
||||
if (p->parallel_compaction_state().TrySetValue(
|
||||
MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
|
||||
if (p->IsEvacuationCandidate()) {
|
||||
@ -3365,7 +3364,7 @@ template <SweepingMode sweeping_mode,
|
||||
FreeSpaceTreatmentMode free_space_mode>
|
||||
static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
|
||||
ObjectVisitor* v) {
|
||||
DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
|
||||
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
|
||||
DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
|
||||
space->identity() == CODE_SPACE);
|
||||
DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
|
||||
@ -3428,14 +3427,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
|
||||
freed_bytes = Free<parallelism>(space, free_list, free_start, size);
|
||||
max_freed_bytes = Max(freed_bytes, max_freed_bytes);
|
||||
}
|
||||
|
||||
if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
|
||||
// When concurrent sweeping is active, the page will be marked after
|
||||
// sweeping by the main thread.
|
||||
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
|
||||
} else {
|
||||
p->SetWasSwept();
|
||||
}
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
|
||||
return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
|
||||
}
|
||||
|
||||
@ -3554,6 +3546,7 @@ void MarkCompactCollector::SweepAbortedPages() {
|
||||
Page* p = evacuation_candidates_[i];
|
||||
if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
|
||||
p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
|
||||
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
||||
switch (space->identity()) {
|
||||
case OLD_SPACE:
|
||||
@ -3716,6 +3709,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
|
||||
}
|
||||
PagedSpace* space = static_cast<PagedSpace*>(p->owner());
|
||||
p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
|
||||
|
||||
switch (space->identity()) {
|
||||
case OLD_SPACE:
|
||||
@ -3766,8 +3760,8 @@ void MarkCompactCollector::ReleaseEvacuationCandidates() {
|
||||
space->Free(p->area_start(), p->area_size());
|
||||
p->set_scan_on_scavenge(false);
|
||||
p->ResetLiveBytes();
|
||||
CHECK(p->WasSwept());
|
||||
space->ReleasePage(p);
|
||||
CHECK(p->SweepingDone());
|
||||
space->ReleasePage(p, true);
|
||||
}
|
||||
evacuation_candidates_.Rewind(0);
|
||||
compacting_ = false;
|
||||
@ -3802,12 +3796,11 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
|
||||
int max_freed = 0;
|
||||
if (page->TryLock()) {
|
||||
// If this page was already swept in the meantime, we can return here.
|
||||
if (page->parallel_sweeping_state().Value() !=
|
||||
MemoryChunk::kSweepingPending) {
|
||||
if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
|
||||
page->mutex()->Unlock();
|
||||
return 0;
|
||||
}
|
||||
page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress);
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
|
||||
FreeList* free_list;
|
||||
FreeList private_free_list(space);
|
||||
if (space->identity() == OLD_SPACE) {
|
||||
@ -3827,6 +3820,7 @@ int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
|
||||
IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
|
||||
}
|
||||
free_list->Concatenate(&private_free_list);
|
||||
page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
|
||||
page->mutex()->Unlock();
|
||||
}
|
||||
return max_freed;
|
||||
@ -3843,10 +3837,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
||||
|
||||
while (it.has_next()) {
|
||||
Page* p = it.next();
|
||||
DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
|
||||
|
||||
// Clear sweeping flags indicating that marking bits are still intact.
|
||||
p->ClearWasSwept();
|
||||
DCHECK(p->SweepingDone());
|
||||
|
||||
if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
|
||||
p->IsEvacuationCandidate()) {
|
||||
@ -3860,6 +3851,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
||||
// that this adds unusable memory into the free list that is later on
|
||||
// (in the free list) dropped again. Since we only use the flag for
|
||||
// testing this is fine.
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
|
||||
Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
|
||||
IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
|
||||
continue;
|
||||
@ -3871,14 +3863,14 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintIsolate(isolate(), "sweeping: released page: %p", p);
|
||||
}
|
||||
space->ReleasePage(p);
|
||||
space->ReleasePage(p, false);
|
||||
continue;
|
||||
}
|
||||
unused_page_present = true;
|
||||
}
|
||||
|
||||
p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
|
||||
sweeping_list(space).push_back(p);
|
||||
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
|
||||
int to_sweep = p->area_size() - p->LiveBytes();
|
||||
space->accounting_stats_.ShrinkSpace(to_sweep);
|
||||
will_be_swept++;
|
||||
@ -3940,22 +3932,7 @@ void MarkCompactCollector::SweepSpaces() {
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
|
||||
for (Page* p : sweeping_list(space)) {
|
||||
if (p->parallel_sweeping_state().Value() ==
|
||||
MemoryChunk::kSweepingFinalize) {
|
||||
p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
|
||||
p->SetWasSwept();
|
||||
}
|
||||
DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void MarkCompactCollector::ParallelSweepSpacesComplete() {
|
||||
ParallelSweepSpaceComplete(heap()->old_space());
|
||||
ParallelSweepSpaceComplete(heap()->code_space());
|
||||
ParallelSweepSpaceComplete(heap()->map_space());
|
||||
sweeping_list(heap()->old_space()).clear();
|
||||
sweeping_list(heap()->code_space()).clear();
|
||||
sweeping_list(heap()->map_space()).clear();
|
||||
|
@ -749,8 +749,6 @@ class MarkCompactCollector {
|
||||
// swept in parallel.
|
||||
void ParallelSweepSpacesComplete();
|
||||
|
||||
void ParallelSweepSpaceComplete(PagedSpace* space);
|
||||
|
||||
// Updates store buffer and slot buffer for a pointer in a migrating object.
|
||||
void RecordMigratedSlot(Object* value, Address slot,
|
||||
SlotsBuffer** evacuation_slots_buffer);
|
||||
|
@ -35,7 +35,7 @@ HeapObjectIterator::HeapObjectIterator(Page* page) {
|
||||
owner == page->heap()->code_space());
|
||||
Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
|
||||
page->area_end(), kOnePageOnly);
|
||||
DCHECK(page->WasSwept() || page->SweepingCompleted());
|
||||
DCHECK(page->SweepingDone());
|
||||
}
|
||||
|
||||
|
||||
@ -66,7 +66,7 @@ bool HeapObjectIterator::AdvanceToNextPage() {
|
||||
cur_page);
|
||||
cur_addr_ = cur_page->area_start();
|
||||
cur_end_ = cur_page->area_end();
|
||||
DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
|
||||
DCHECK(cur_page->SweepingDone());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -469,7 +469,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
|
||||
chunk->progress_bar_ = 0;
|
||||
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
|
||||
chunk->parallel_sweeping_state().SetValue(kSweepingDone);
|
||||
chunk->concurrent_sweeping_state().SetValue(kSweepingDone);
|
||||
chunk->parallel_compaction_state().SetValue(kCompactingDone);
|
||||
chunk->mutex_ = NULL;
|
||||
chunk->available_in_small_free_list_ = 0;
|
||||
@ -480,7 +480,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
|
||||
chunk->ResetLiveBytes();
|
||||
Bitmap::Clear(chunk);
|
||||
chunk->initialize_scan_on_scavenge(false);
|
||||
chunk->SetFlag(WAS_SWEPT);
|
||||
chunk->set_next_chunk(nullptr);
|
||||
chunk->set_prev_chunk(nullptr);
|
||||
|
||||
@ -923,7 +922,7 @@ bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
|
||||
|
||||
void MemoryChunk::IncrementLiveBytesFromMutator(HeapObject* object, int by) {
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
|
||||
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
|
||||
if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->SweepingDone()) {
|
||||
static_cast<PagedSpace*>(chunk->owner())->Allocate(by);
|
||||
}
|
||||
chunk->IncrementLiveBytes(by);
|
||||
@ -1225,11 +1224,11 @@ void PagedSpace::IncreaseCapacity(int size) {
|
||||
}
|
||||
|
||||
|
||||
void PagedSpace::ReleasePage(Page* page) {
|
||||
void PagedSpace::ReleasePage(Page* page, bool evict_free_list_items) {
|
||||
DCHECK(page->LiveBytes() == 0);
|
||||
DCHECK(AreaSize() == page->area_size());
|
||||
|
||||
if (page->WasSwept()) {
|
||||
if (evict_free_list_items) {
|
||||
intptr_t size = free_list_.EvictFreeListItems(page);
|
||||
accounting_stats_.AllocateBytes(size);
|
||||
DCHECK_EQ(AreaSize(), static_cast<int>(size));
|
||||
@ -1275,7 +1274,7 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
|
||||
if (page == Page::FromAllocationTop(allocation_info_.top())) {
|
||||
allocation_pointer_found_in_space = true;
|
||||
}
|
||||
CHECK(page->WasSwept());
|
||||
CHECK(page->SweepingDone());
|
||||
HeapObjectIterator it(page);
|
||||
Address end_of_previous_object = page->area_start();
|
||||
Address top = page->area_end();
|
||||
|
@ -308,10 +308,6 @@ class MemoryChunk {
|
||||
NEVER_EVACUATE, // May contain immortal immutables.
|
||||
POPULAR_PAGE, // Slots buffer of this page overflowed on the previous GC.
|
||||
|
||||
// WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
|
||||
// otherwise marking bits are still intact.
|
||||
WAS_SWEPT,
|
||||
|
||||
// Large objects can have a progress bar in their page header. These object
|
||||
// are scanned in increments and will be kept black while being scanned.
|
||||
// Even if the mutator writes to them they will be kept black and a white
|
||||
@ -353,16 +349,14 @@ class MemoryChunk {
|
||||
};
|
||||
|
||||
// |kSweepingDone|: The page state when sweeping is complete or sweeping must
|
||||
// not be performed on that page.
|
||||
// |kSweepingFinalize|: A sweeper thread is done sweeping this page and will
|
||||
// not touch the page memory anymore.
|
||||
// |kSweepingInProgress|: This page is currently swept by a sweeper thread.
|
||||
// not be performed on that page. Sweeper threads that are done with their
|
||||
// work will set this value and not touch the page anymore.
|
||||
// |kSweepingPending|: This page is ready for parallel sweeping.
|
||||
enum ParallelSweepingState {
|
||||
// |kSweepingInProgress|: This page is currently swept by a sweeper thread.
|
||||
enum ConcurrentSweepingState {
|
||||
kSweepingDone,
|
||||
kSweepingFinalize,
|
||||
kSweepingPending,
|
||||
kSweepingInProgress,
|
||||
kSweepingPending
|
||||
};
|
||||
|
||||
// Every n write barrier invocations we go to runtime even though
|
||||
@ -556,8 +550,8 @@ class MemoryChunk {
|
||||
// Return all current flags.
|
||||
intptr_t GetFlags() { return flags_; }
|
||||
|
||||
AtomicValue<ParallelSweepingState>& parallel_sweeping_state() {
|
||||
return parallel_sweeping_;
|
||||
AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
|
||||
return concurrent_sweeping_;
|
||||
}
|
||||
|
||||
AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
|
||||
@ -568,19 +562,6 @@ class MemoryChunk {
|
||||
|
||||
base::Mutex* mutex() { return mutex_; }
|
||||
|
||||
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
|
||||
// progress. In particular, when we know that right before this call a
|
||||
// sweeper thread was sweeping this page.
|
||||
void WaitUntilSweepingCompleted() {
|
||||
mutex_->Lock();
|
||||
mutex_->Unlock();
|
||||
DCHECK(SweepingCompleted());
|
||||
}
|
||||
|
||||
bool SweepingCompleted() {
|
||||
return parallel_sweeping_state().Value() <= kSweepingFinalize;
|
||||
}
|
||||
|
||||
// Manage live byte count (count of bytes known to be live,
|
||||
// because they are marked black).
|
||||
void ResetLiveBytes() {
|
||||
@ -759,7 +740,7 @@ class MemoryChunk {
|
||||
AtomicValue<intptr_t> high_water_mark_;
|
||||
|
||||
base::Mutex* mutex_;
|
||||
AtomicValue<ParallelSweepingState> parallel_sweeping_;
|
||||
AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
|
||||
AtomicValue<ParallelCompactingState> parallel_compaction_;
|
||||
|
||||
// PagedSpace free-list statistics.
|
||||
@ -865,9 +846,18 @@ class Page : public MemoryChunk {
|
||||
|
||||
void InitializeAsAnchor(PagedSpace* owner);
|
||||
|
||||
bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
|
||||
void SetWasSwept() { SetFlag(WAS_SWEPT); }
|
||||
void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
|
||||
// WaitUntilSweepingCompleted only works when concurrent sweeping is in
|
||||
// progress. In particular, when we know that right before this call a
|
||||
// sweeper thread was sweeping this page.
|
||||
void WaitUntilSweepingCompleted() {
|
||||
mutex_->Lock();
|
||||
mutex_->Unlock();
|
||||
DCHECK(SweepingDone());
|
||||
}
|
||||
|
||||
bool SweepingDone() {
|
||||
return concurrent_sweeping_state().Value() == kSweepingDone;
|
||||
}
|
||||
|
||||
void ResetFreeListStatistics();
|
||||
|
||||
@ -2077,7 +2067,7 @@ class PagedSpace : public Space {
|
||||
void IncreaseCapacity(int size);
|
||||
|
||||
// Releases an unused page and shrinks the space.
|
||||
void ReleasePage(Page* page);
|
||||
void ReleasePage(Page* page, bool evict_free_list_items);
|
||||
|
||||
// The dummy page that anchors the linked list of pages.
|
||||
Page* anchor() { return &anchor_; }
|
||||
@ -2104,13 +2094,6 @@ class PagedSpace : public Space {
|
||||
static void ResetCodeStatistics(Isolate* isolate);
|
||||
#endif
|
||||
|
||||
// Evacuation candidates are swept by evacuator. Needs to return a valid
|
||||
// result before _and_ after evacuation has finished.
|
||||
static bool ShouldBeSweptBySweeperThreads(Page* p) {
|
||||
return !p->IsEvacuationCandidate() &&
|
||||
!p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
|
||||
}
|
||||
|
||||
// This function tries to steal size_in_bytes memory from the sweeper threads
|
||||
// free-lists. If it does not succeed stealing enough memory, it will wait
|
||||
// for the sweeper threads to finish sweeping.
|
||||
|
@ -476,7 +476,7 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
|
||||
Page* page = reinterpret_cast<Page*>(chunk);
|
||||
PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
|
||||
if (owner == heap_->map_space()) {
|
||||
DCHECK(page->WasSwept());
|
||||
DCHECK(page->SweepingDone());
|
||||
HeapObjectIterator iterator(page);
|
||||
for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
|
||||
heap_object = iterator.Next()) {
|
||||
|
@ -5571,33 +5571,6 @@ TEST(Regress507979) {
|
||||
}
|
||||
|
||||
|
||||
TEST(ArrayShiftSweeping) {
|
||||
i::FLAG_expose_gc = true;
|
||||
CcTest::InitializeVM();
|
||||
v8::HandleScope scope(CcTest::isolate());
|
||||
Isolate* isolate = CcTest::i_isolate();
|
||||
Heap* heap = isolate->heap();
|
||||
|
||||
v8::Local<v8::Value> result = CompileRun(
|
||||
"var array = new Array(400);"
|
||||
"var tmp = new Array(1000);"
|
||||
"array[0] = 10;"
|
||||
"gc();"
|
||||
"gc();"
|
||||
"array.shift();"
|
||||
"array;");
|
||||
|
||||
Handle<JSObject> o = Handle<JSObject>::cast(
|
||||
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result)));
|
||||
CHECK(heap->InOldSpace(o->elements()));
|
||||
CHECK(heap->InOldSpace(*o));
|
||||
Page* page = Page::FromAddress(o->elements()->address());
|
||||
CHECK(page->parallel_sweeping_state().Value() <=
|
||||
MemoryChunk::kSweepingFinalize ||
|
||||
Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
|
||||
}
|
||||
|
||||
|
||||
UNINITIALIZED_TEST(PromotionQueue) {
|
||||
i::FLAG_expose_gc = true;
|
||||
i::FLAG_max_semi_space_size = 2 * (Page::kPageSize / MB);
|
||||
|
Loading…
Reference in New Issue
Block a user