[heap] Removed keep-one-unused-page concept in sweeper.
This works because we pool regular non-executable pages on a lower level. Executable pages are currently not supported by the pooling mechanism. If this regresses we should fix it. Change-Id: Ief3484d59f1f1f4bc63f8e718482e4174bedc012 Reviewed-on: https://chromium-review.googlesource.com/778939 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Commit-Queue: Hannes Payer <hpayer@chromium.org> Cr-Commit-Position: refs/heads/master@{#49536}
This commit is contained in:
parent
cc0086532f
commit
9cbb2ed4c3
@ -4551,7 +4551,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
|||||||
space->ClearStats();
|
space->ClearStats();
|
||||||
|
|
||||||
int will_be_swept = 0;
|
int will_be_swept = 0;
|
||||||
bool unused_page_present = false;
|
|
||||||
|
|
||||||
// Loop needs to support deletion if live bytes == 0 for a page.
|
// Loop needs to support deletion if live bytes == 0 for a page.
|
||||||
for (auto it = space->begin(); it != space->end();) {
|
for (auto it = space->begin(); it != space->end();) {
|
||||||
@ -4561,10 +4560,7 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
|||||||
if (p->IsEvacuationCandidate()) {
|
if (p->IsEvacuationCandidate()) {
|
||||||
// Will be processed in Evacuate.
|
// Will be processed in Evacuate.
|
||||||
DCHECK(!evacuation_candidates_.empty());
|
DCHECK(!evacuation_candidates_.empty());
|
||||||
continue;
|
} else if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
|
||||||
}
|
|
||||||
|
|
||||||
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
|
|
||||||
// We need to sweep the page to get it into an iterable state again. Note
|
// We need to sweep the page to get it into an iterable state again. Note
|
||||||
// that this adds unusable memory into the free list that is later on
|
// that this adds unusable memory into the free list that is later on
|
||||||
// (in the free list) dropped again. Since we only use the flag for
|
// (in the free list) dropped again. Since we only use the flag for
|
||||||
@ -4575,25 +4571,19 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
|||||||
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
|
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
|
||||||
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
|
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
|
||||||
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
|
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
|
||||||
continue;
|
} else if (non_atomic_marking_state()->live_bytes(p) == 0) {
|
||||||
}
|
// Release empty pages
|
||||||
|
if (FLAG_gc_verbose) {
|
||||||
// One unused page is kept, all further are released before sweeping them.
|
PrintIsolate(isolate(), "sweeping: released page: %p",
|
||||||
if (non_atomic_marking_state()->live_bytes(p) == 0) {
|
static_cast<void*>(p));
|
||||||
if (unused_page_present) {
|
|
||||||
if (FLAG_gc_verbose) {
|
|
||||||
PrintIsolate(isolate(), "sweeping: released page: %p",
|
|
||||||
static_cast<void*>(p));
|
|
||||||
}
|
|
||||||
ArrayBufferTracker::FreeAll(p);
|
|
||||||
space->ReleasePage(p);
|
|
||||||
continue;
|
|
||||||
}
|
}
|
||||||
unused_page_present = true;
|
ArrayBufferTracker::FreeAll(p);
|
||||||
|
space->ReleasePage(p);
|
||||||
|
} else {
|
||||||
|
// Add non-empty pages to the sweeper.
|
||||||
|
sweeper().AddPage(space->identity(), p, Sweeper::REGULAR);
|
||||||
|
will_be_swept++;
|
||||||
}
|
}
|
||||||
|
|
||||||
sweeper().AddPage(space->identity(), p, Sweeper::REGULAR);
|
|
||||||
will_be_swept++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FLAG_gc_verbose) {
|
if (FLAG_gc_verbose) {
|
||||||
|
@ -1059,7 +1059,8 @@ template <MemoryAllocator::AllocationMode alloc_mode, typename SpaceType>
|
|||||||
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
|
Page* MemoryAllocator::AllocatePage(size_t size, SpaceType* owner,
|
||||||
Executability executable) {
|
Executability executable) {
|
||||||
MemoryChunk* chunk = nullptr;
|
MemoryChunk* chunk = nullptr;
|
||||||
if (alloc_mode == kPooled) {
|
// Code space does not support pooled allocation.
|
||||||
|
if (alloc_mode == kPooled && owner->identity() != CODE_SPACE) {
|
||||||
DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
|
DCHECK_EQ(size, static_cast<size_t>(MemoryChunk::kAllocatableMemory));
|
||||||
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
DCHECK_EQ(executable, NOT_EXECUTABLE);
|
||||||
chunk = AllocatePagePooled(owner);
|
chunk = AllocatePagePooled(owner);
|
||||||
@ -1075,6 +1076,9 @@ template Page*
|
|||||||
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
|
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
|
||||||
size_t size, PagedSpace* owner, Executability executable);
|
size_t size, PagedSpace* owner, Executability executable);
|
||||||
template Page*
|
template Page*
|
||||||
|
MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, PagedSpace>(
|
||||||
|
size_t size, PagedSpace* owner, Executability executable);
|
||||||
|
template Page*
|
||||||
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
|
MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
|
||||||
size_t size, SemiSpace* owner, Executability executable);
|
size_t size, SemiSpace* owner, Executability executable);
|
||||||
template Page*
|
template Page*
|
||||||
@ -1591,7 +1595,8 @@ bool PagedSpace::Expand() {
|
|||||||
if (!heap()->CanExpandOldGeneration(size)) return false;
|
if (!heap()->CanExpandOldGeneration(size)) return false;
|
||||||
|
|
||||||
Page* page =
|
Page* page =
|
||||||
heap()->memory_allocator()->AllocatePage(size, this, executable());
|
heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
|
||||||
|
size, this, executable());
|
||||||
if (page == nullptr) return false;
|
if (page == nullptr) return false;
|
||||||
// Pages created during bootstrapping may contain immortal immovable objects.
|
// Pages created during bootstrapping may contain immortal immovable objects.
|
||||||
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
|
if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
|
||||||
|
@ -1203,19 +1203,17 @@ class V8_EXPORT_PRIVATE MemoryAllocator {
|
|||||||
|
|
||||||
MemoryChunk* TryGetPooledMemoryChunkSafe() {
|
MemoryChunk* TryGetPooledMemoryChunkSafe() {
|
||||||
// Procedure:
|
// Procedure:
|
||||||
// (1) Try to get a chunk that was declared as pooled and already has
|
// (1) Try to steal any memory chunk of kPageSize that would've been
|
||||||
// been uncommitted.
|
|
||||||
// (2) Try to steal any memory chunk of kPageSize that would've been
|
|
||||||
// unmapped.
|
// unmapped.
|
||||||
MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
|
// (2) Try to get a chunk that was declared as pooled and already has
|
||||||
if (chunk == nullptr) {
|
// been uncommitted.
|
||||||
chunk = GetMemoryChunkSafe<kRegular>();
|
MemoryChunk* chunk = GetMemoryChunkSafe<kRegular>();
|
||||||
if (chunk != nullptr) {
|
if (chunk != nullptr) {
|
||||||
// For stolen chunks we need to manually free any allocated memory.
|
// For stolen chunks we need to manually free any allocated memory.
|
||||||
chunk->ReleaseAllocatedMemory();
|
chunk->ReleaseAllocatedMemory();
|
||||||
}
|
return chunk;
|
||||||
}
|
}
|
||||||
return chunk;
|
return GetMemoryChunkSafe<kPooled>();
|
||||||
}
|
}
|
||||||
|
|
||||||
void FreeQueuedChunks();
|
void FreeQueuedChunks();
|
||||||
|
Loading…
Reference in New Issue
Block a user