diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc index 1f895dec38..1e0e8878d6 100644 --- a/src/heap/mark-compact.cc +++ b/src/heap/mark-compact.cc @@ -4551,7 +4551,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { space->ClearStats(); int will_be_swept = 0; - bool unused_page_present = false; // Loop needs to support deletion if live bytes == 0 for a page. for (auto it = space->begin(); it != space->end();) { @@ -4561,10 +4560,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { if (p->IsEvacuationCandidate()) { // Will be processed in Evacuate. DCHECK(!evacuation_candidates_.empty()); - continue; - } - - if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { + } else if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { // We need to sweep the page to get it into an iterable state again. Note // that this adds unusable memory into the free list that is later on // (in the free list) dropped again. Since we only use the flag for @@ -4575,25 +4571,19 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE); space->IncreaseAllocatedBytes(p->allocated_bytes(), p); - continue; - } - - // One unused page is kept, all further are released before sweeping them. - if (non_atomic_marking_state()->live_bytes(p) == 0) { - if (unused_page_present) { - if (FLAG_gc_verbose) { - PrintIsolate(isolate(), "sweeping: released page: %p", - static_cast(p)); - } - ArrayBufferTracker::FreeAll(p); - space->ReleasePage(p); - continue; + } else if (non_atomic_marking_state()->live_bytes(p) == 0) { + // Release empty pages + if (FLAG_gc_verbose) { + PrintIsolate(isolate(), "sweeping: released page: %p", + static_cast(p)); } - unused_page_present = true; + ArrayBufferTracker::FreeAll(p); + space->ReleasePage(p); + } else { + // Add non-empty pages to the sweeper. + sweeper().AddPage(space->identity(), p, Sweeper::REGULAR); + will_be_swept++; } - - sweeper().AddPage(space->identity(), p, Sweeper::REGULAR); - will_be_swept++; } if (FLAG_gc_verbose) { diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc index 37ec5a4afc..b46b37bf87 100644 --- a/src/heap/spaces.cc +++ b/src/heap/spaces.cc @@ -1059,7 +1059,8 @@ template Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner, Executability executable) { MemoryChunk* chunk = nullptr; - if (alloc_mode == kPooled) { + // Code space does not support pooled allocation. + if (alloc_mode == kPooled && owner->identity() != CODE_SPACE) { DCHECK_EQ(size, static_cast(MemoryChunk::kAllocatableMemory)); DCHECK_EQ(executable, NOT_EXECUTABLE); chunk = AllocatePagePooled(owner); @@ -1075,6 +1076,9 @@ template Page* MemoryAllocator::AllocatePage( size_t size, PagedSpace* owner, Executability executable); template Page* +MemoryAllocator::AllocatePage( + size_t size, PagedSpace* owner, Executability executable); +template Page* MemoryAllocator::AllocatePage( size_t size, SemiSpace* owner, Executability executable); template Page* @@ -1591,7 +1595,8 @@ bool PagedSpace::Expand() { if (!heap()->CanExpandOldGeneration(size)) return false; Page* page = - heap()->memory_allocator()->AllocatePage(size, this, executable()); + heap()->memory_allocator()->AllocatePage( + size, this, executable()); if (page == nullptr) return false; // Pages created during bootstrapping may contain immortal immovable objects. if (!heap()->deserialization_complete()) page->MarkNeverEvacuate(); diff --git a/src/heap/spaces.h b/src/heap/spaces.h index 5974f5db20..12fc71dc32 100644 --- a/src/heap/spaces.h +++ b/src/heap/spaces.h @@ -1203,19 +1203,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator { MemoryChunk* TryGetPooledMemoryChunkSafe() { // Procedure: - // (1) Try to get a chunk that was declared as pooled and already has - // been uncommitted. - // (2) Try to steal any memory chunk of kPageSize that would've been + // (1) Try to steal any memory chunk of kPageSize that would've been // unmapped. - MemoryChunk* chunk = GetMemoryChunkSafe(); - if (chunk == nullptr) { - chunk = GetMemoryChunkSafe(); - if (chunk != nullptr) { - // For stolen chunks we need to manually free any allocated memory. - chunk->ReleaseAllocatedMemory(); - } + // (2) Try to get a chunk that was declared as pooled and already has + // been uncommitted. + MemoryChunk* chunk = GetMemoryChunkSafe(); + if (chunk != nullptr) { + // For stolen chunks we need to manually free any allocated memory. + chunk->ReleaseAllocatedMemory(); + return chunk; } - return chunk; + return GetMemoryChunkSafe(); } void FreeQueuedChunks();