[heap] Handle NEVER_ALLOCATE_ON_PAGE pages in concurrent sweepers.

This avoids accessing the page flags of all old generation PagedSpace pages when starting sweeping.


Bug: v8:9093
Change-Id: Ibdfb35f3e368107f8c364c9498312b01edce47d1
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1554688
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60789}
This commit is contained in:
Hannes Payer 2019-04-11 17:40:33 +02:00 committed by Commit Bot
parent cde0d18c71
commit bb0454ac45
3 changed files with 13 additions and 19 deletions

View File

@ -3785,20 +3785,6 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
continue;
}
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// We need to sweep the page to get it into an iterable state again. Note
// that this adds unusable memory into the free list that is later on
// (in the free list) dropped again. Since we only use the flag for
// testing this is fine.
p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
Heap::ShouldZapGarbage()
? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
: FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
continue;
}
// One unused page is kept, all further are released before sweeping them.
if (non_atomic_marking_state()->live_bytes(p) == 0) {
if (unused_page_present) {

View File

@ -186,7 +186,9 @@ size_t PagedSpace::RelinkFreeListCategories(Page* page) {
added += category->available();
category->Relink();
});
DCHECK_EQ(page->AvailableInFreeList(),
DCHECK_IMPLIES(!page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
page->AvailableInFreeList() ==
page->AvailableInFreeListFromAllocatedBytes());
return added;
}

View File

@ -1543,6 +1543,12 @@ void PagedSpace::RefillFreeList() {
{
Page* p = nullptr;
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
// entries here to make them unavailable for allocations.
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
p->ForAllFreeListCategories(
[](FreeListCategory* category) { category->Reset(); });
}
// Only during compaction pages can actually change ownership. This is
// safe because there exists no other competing action on the page links
// during compaction.
@ -1583,8 +1589,9 @@ void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
// Relinking requires the category to be unlinked.
other->RemovePage(p);
AddPage(p);
DCHECK_EQ(p->AvailableInFreeList(),
p->AvailableInFreeListFromAllocatedBytes());
DCHECK_IMPLIES(
!p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE),
p->AvailableInFreeList() == p->AvailableInFreeListFromAllocatedBytes());
}
DCHECK_EQ(0u, other->Size());
DCHECK_EQ(0u, other->Capacity());
@ -2896,7 +2903,6 @@ FreeSpace FreeListCategory::SearchForNodeInList(size_t minimum_size,
void FreeListCategory::Free(Address start, size_t size_in_bytes,
FreeMode mode) {
DCHECK(page()->CanAllocate());
FreeSpace free_space = FreeSpace::cast(HeapObject::FromAddress(start));
free_space->set_next(top());
set_top(free_space);