[heap] Update evacuation and implement sweeping in MinorMC
Bug: v8:12612 Change-Id: I28a574435646073d65e6fe1e746267ffb0eaa01d Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3864083 Commit-Queue: Omer Katz <omerkatz@chromium.org> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org> Cr-Commit-Position: refs/heads/main@{#82932}
This commit is contained in:
parent
2f41221fbc
commit
026a100031
@ -982,18 +982,20 @@ enum AllocationSpace {
|
||||
OLD_SPACE, // Old generation regular object space.
|
||||
CODE_SPACE, // Old generation code object space, marked executable.
|
||||
MAP_SPACE, // Old generation map object space, non-movable.
|
||||
NEW_SPACE, // Young generation space for regular objects collected
|
||||
// with Scavenger/MinorMC.
|
||||
LO_SPACE, // Old generation large object space.
|
||||
CODE_LO_SPACE, // Old generation large code object space.
|
||||
NEW_LO_SPACE, // Young generation large object space.
|
||||
NEW_SPACE, // Young generation semispaces for regular objects collected with
|
||||
// Scavenger.
|
||||
|
||||
FIRST_SPACE = RO_SPACE,
|
||||
LAST_SPACE = NEW_SPACE,
|
||||
LAST_SPACE = NEW_LO_SPACE,
|
||||
FIRST_MUTABLE_SPACE = OLD_SPACE,
|
||||
LAST_MUTABLE_SPACE = NEW_SPACE,
|
||||
LAST_MUTABLE_SPACE = NEW_LO_SPACE,
|
||||
FIRST_GROWABLE_PAGED_SPACE = OLD_SPACE,
|
||||
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE
|
||||
LAST_GROWABLE_PAGED_SPACE = MAP_SPACE,
|
||||
FIRST_SWEEPABLE_SPACE = OLD_SPACE,
|
||||
LAST_SWEEPABLE_SPACE = NEW_SPACE
|
||||
};
|
||||
constexpr int kSpaceTagSize = 4;
|
||||
static_assert(FIRST_SPACE == 0);
|
||||
|
@ -827,7 +827,6 @@ void GCTracer::PrintNVP() const {
|
||||
"evacuate=%.2f "
|
||||
"evacuate.copy=%.2f "
|
||||
"evacuate.update_pointers=%.2f "
|
||||
"evacuate.update_pointers.to_new_roots=%.2f "
|
||||
"evacuate.update_pointers.slots=%.2f "
|
||||
"background.mark=%.2f "
|
||||
"background.evacuate.copy=%.2f "
|
||||
@ -850,7 +849,6 @@ void GCTracer::PrintNVP() const {
|
||||
current_scope(Scope::MINOR_MC_EVACUATE),
|
||||
current_scope(Scope::MINOR_MC_EVACUATE_COPY),
|
||||
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS),
|
||||
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS),
|
||||
current_scope(Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS),
|
||||
current_scope(Scope::MINOR_MC_BACKGROUND_MARKING),
|
||||
current_scope(Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY),
|
||||
|
@ -658,6 +658,10 @@ uintptr_t Heap::code_page_collection_memory_modification_scope_depth() {
|
||||
return local_heap->code_page_collection_memory_modification_scope_depth_;
|
||||
}
|
||||
|
||||
PagedNewSpace* Heap::paged_new_space() const {
|
||||
return PagedNewSpace::From(new_space());
|
||||
}
|
||||
|
||||
CodeSpaceMemoryModificationScope::~CodeSpaceMemoryModificationScope() {
|
||||
if (heap_->write_protect_code_memory()) {
|
||||
heap_->decrement_code_space_memory_modification_scope_depth();
|
||||
|
@ -51,9 +51,7 @@ void HeapLayoutTracer::PrintBasicMemoryChunk(std::ostream& os,
|
||||
// static
|
||||
void HeapLayoutTracer::PrintHeapLayout(std::ostream& os, Heap* heap) {
|
||||
if (FLAG_minor_mc) {
|
||||
const PagedNewSpace* paged_new_space =
|
||||
PagedNewSpace::From(heap->new_space());
|
||||
for (const Page* page : *paged_new_space) {
|
||||
for (const Page* page : *heap->paged_new_space()) {
|
||||
PrintBasicMemoryChunk(os, *page, "new_space");
|
||||
}
|
||||
} else {
|
||||
|
@ -4452,7 +4452,7 @@ void Heap::VerifyCountersBeforeConcurrentSweeping() {
|
||||
// We need to refine the counters on pages that are already swept and have
|
||||
// not been moved over to the actual space. Otherwise, the AccountingStats
|
||||
// are just an over approximation.
|
||||
space->RefillFreeList();
|
||||
space->RefillFreeList(mark_compact_collector()->sweeper());
|
||||
space->VerifyCountersBeforeConcurrentSweeping();
|
||||
}
|
||||
}
|
||||
@ -4464,9 +4464,7 @@ void Heap::VerifyCommittedPhysicalMemory() {
|
||||
space->VerifyCommittedPhysicalMemory();
|
||||
}
|
||||
if (FLAG_minor_mc && new_space()) {
|
||||
PagedNewSpace::From(new_space())
|
||||
->paged_space()
|
||||
->VerifyCommittedPhysicalMemory();
|
||||
paged_new_space()->paged_space()->VerifyCommittedPhysicalMemory();
|
||||
}
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
@ -119,6 +119,7 @@ class ObjectIterator;
|
||||
class ObjectStats;
|
||||
class Page;
|
||||
class PagedSpace;
|
||||
class PagedNewSpace;
|
||||
class ReadOnlyHeap;
|
||||
class RootVisitor;
|
||||
class RwxMemoryWriteScope;
|
||||
@ -873,6 +874,7 @@ class Heap {
|
||||
inline Address NewSpaceTop();
|
||||
|
||||
NewSpace* new_space() const { return new_space_; }
|
||||
inline PagedNewSpace* paged_new_space() const;
|
||||
OldSpace* old_space() const { return old_space_; }
|
||||
OldSpace* shared_old_space() const { return shared_old_space_; }
|
||||
CodeSpace* code_space() const { return code_space_; }
|
||||
|
@ -723,15 +723,15 @@ void MarkCompactCollector::EnsureSweepingCompleted(
|
||||
ThreadKind::kMain);
|
||||
|
||||
sweeper()->EnsureCompleted();
|
||||
heap()->old_space()->RefillFreeList();
|
||||
heap()->old_space()->RefillFreeList(sweeper());
|
||||
{
|
||||
CodePageHeaderModificationScope rwx_write_scope(
|
||||
"Updating per-page stats stored in page headers requires write "
|
||||
"access to Code page headers");
|
||||
heap()->code_space()->RefillFreeList();
|
||||
heap()->code_space()->RefillFreeList(sweeper());
|
||||
}
|
||||
if (heap()->map_space()) {
|
||||
heap()->map_space()->RefillFreeList();
|
||||
heap()->map_space()->RefillFreeList(sweeper());
|
||||
heap()->map_space()->SortFreeList();
|
||||
}
|
||||
|
||||
@ -1078,10 +1078,8 @@ void MarkCompactCollector::VerifyMarking() {
|
||||
heap()->old_space()->VerifyLiveBytes();
|
||||
if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
|
||||
heap()->code_space()->VerifyLiveBytes();
|
||||
if (FLAG_minor_mc && heap()->new_space())
|
||||
PagedNewSpace::From(heap()->new_space())
|
||||
->paged_space()
|
||||
->VerifyLiveBytes();
|
||||
if (FLAG_minor_mc && heap()->paged_new_space())
|
||||
heap()->paged_new_space()->paged_space()->VerifyLiveBytes();
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -1132,6 +1130,16 @@ void MarkCompactCollector::Finish() {
|
||||
SweepLargeSpace(heap()->new_lo_space());
|
||||
}
|
||||
|
||||
if (FLAG_minor_mc && heap()->new_space()) {
|
||||
// Keep new space sweeping atomic.
|
||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||
GCTracer::Scope::MC_FINISH_SWEEP_NEW,
|
||||
ThreadKind::kMain);
|
||||
sweeper()->ParallelSweepSpace(NEW_SPACE,
|
||||
Sweeper::SweepingMode::kEagerDuringGC, 0);
|
||||
heap()->paged_new_space()->paged_space()->RefillFreeList(sweeper());
|
||||
}
|
||||
|
||||
sweeper()->StartSweeperTasks();
|
||||
|
||||
// Ensure unmapper tasks are stopped such that queued pages aren't freed
|
||||
@ -4183,6 +4191,7 @@ void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
|
||||
marking_state->live_bytes(chunk));
|
||||
break;
|
||||
case kPageNewToNew:
|
||||
DCHECK(!FLAG_minor_mc);
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(
|
||||
chunk, marking_state, &new_to_new_page_visitor_,
|
||||
LiveObjectVisitor::kKeepMarking);
|
||||
@ -4340,11 +4349,14 @@ void MarkCompactCollector::EvacuatePagesInParallel() {
|
||||
|
||||
// Evacuation of new space pages cannot be aborted, so it needs to run
|
||||
// before old space evacuation.
|
||||
bool force_page_promotion =
|
||||
!heap()->IsGCWithoutStack() && !FLAG_compact_with_stack;
|
||||
for (Page* page : new_space_evacuation_pages_) {
|
||||
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
|
||||
if (live_bytes_on_page == 0) continue;
|
||||
DCHECK_LT(0, live_bytes_on_page);
|
||||
live_bytes += live_bytes_on_page;
|
||||
if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
|
||||
if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes) ||
|
||||
force_page_promotion) {
|
||||
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
|
||||
DCHECK_EQ(heap()->old_space(), page->owner());
|
||||
// The move added page->allocated_bytes to the old space, but we are
|
||||
@ -4534,6 +4546,10 @@ void MarkCompactCollector::Evacuate() {
|
||||
p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
|
||||
DCHECK_EQ(OLD_SPACE, p->owner_identity());
|
||||
sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
|
||||
} else if (FLAG_minor_mc) {
|
||||
// Sweep non-promoted pages to add them back to the free list.
|
||||
DCHECK_EQ(NEW_SPACE, p->owner_identity());
|
||||
sweeper()->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
|
||||
}
|
||||
}
|
||||
new_space_evacuation_pages_.clear();
|
||||
@ -4759,6 +4775,7 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
if (Heap::InFromPage(heap_object)) {
|
||||
DCHECK(!FLAG_minor_mc);
|
||||
MapWord map_word = heap_object.map_word(kRelaxedLoad);
|
||||
if (map_word.IsForwardingAddress()) {
|
||||
HeapObjectReference::Update(THeapObjectSlot(slot),
|
||||
@ -4780,6 +4797,19 @@ class RememberedSetUpdatingItem : public UpdatingItem {
|
||||
// if the slot was already updated during old->old updating.
|
||||
// In case the page has been moved, check markbits to determine liveness
|
||||
// of the slot. In the other case, the slot can just be kept.
|
||||
if (FLAG_minor_mc) {
|
||||
MapWord map_word = heap_object.map_word(kRelaxedLoad);
|
||||
if (map_word.IsForwardingAddress()) {
|
||||
HeapObjectReference::Update(THeapObjectSlot(slot),
|
||||
map_word.ToForwardingAddress());
|
||||
bool success = (*slot).GetHeapObject(&heap_object);
|
||||
USE(success);
|
||||
DCHECK(success);
|
||||
} else if (marking_state_->IsBlack(heap_object)) {
|
||||
return KEEP_SLOT;
|
||||
}
|
||||
return REMOVE_SLOT;
|
||||
}
|
||||
if (Page::FromHeapObject(heap_object)
|
||||
->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
|
||||
if (marking_state_->IsBlack(heap_object)) {
|
||||
@ -5361,6 +5391,34 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::StartSweepNewSpace() {
|
||||
PagedSpaceBase* paged_space = heap()->paged_new_space()->paged_space();
|
||||
paged_space->ClearAllocatorState();
|
||||
|
||||
int will_be_swept = 0;
|
||||
|
||||
for (auto it = paged_space->begin(); it != paged_space->end();) {
|
||||
Page* p = *(it++);
|
||||
DCHECK(p->SweepingDone());
|
||||
|
||||
if (non_atomic_marking_state()->live_bytes(p) > 0) {
|
||||
// Non-empty pages will be evacuated/promoted.
|
||||
continue;
|
||||
}
|
||||
|
||||
// New space preallocates all its pages. Don't free empty pages since they
|
||||
// will just be reallocated.
|
||||
DCHECK_EQ(NEW_SPACE, paged_space->identity());
|
||||
sweeper_->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
|
||||
will_be_swept++;
|
||||
}
|
||||
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
|
||||
paged_space->name(), will_be_swept);
|
||||
}
|
||||
}
|
||||
|
||||
void MarkCompactCollector::Sweep() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
|
||||
#ifdef DEBUG
|
||||
@ -5394,6 +5452,11 @@ void MarkCompactCollector::Sweep() {
|
||||
heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP, ThreadKind::kMain);
|
||||
StartSweepSpace(heap()->map_space());
|
||||
}
|
||||
if (FLAG_minor_mc && heap()->new_space()) {
|
||||
GCTracer::Scope sweep_scope(
|
||||
heap()->tracer(), GCTracer::Scope::MC_SWEEP_NEW, ThreadKind::kMain);
|
||||
StartSweepNewSpace();
|
||||
}
|
||||
sweeper()->StartSweeping();
|
||||
}
|
||||
}
|
||||
@ -5481,6 +5544,7 @@ class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
|
||||
|
||||
void Run() override {
|
||||
DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
|
||||
DCHECK(!heap_->minor_mark_compact_collector()->sweeping_in_progress());
|
||||
VerifyRoots();
|
||||
VerifyEvacuation(heap_->new_space());
|
||||
VerifyEvacuation(heap_->old_space());
|
||||
@ -5565,7 +5629,8 @@ constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
|
||||
|
||||
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
|
||||
: CollectorBase(heap, GarbageCollector::MINOR_MARK_COMPACTOR),
|
||||
page_parallel_job_semaphore_(0) {}
|
||||
page_parallel_job_semaphore_(0),
|
||||
sweeper_(std::make_unique<Sweeper>(heap_, non_atomic_marking_state())) {}
|
||||
|
||||
std::pair<size_t, size_t> MinorMarkCompactCollector::ProcessMarkingWorklist(
|
||||
size_t bytes_to_process) {
|
||||
@ -5681,11 +5746,9 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
|
||||
TRACE_GC(heap()->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
|
||||
|
||||
PointersUpdatingVisitor updating_visitor(heap());
|
||||
std::vector<std::unique_ptr<UpdatingItem>> updating_items;
|
||||
|
||||
// Create batches of global handles.
|
||||
CollectToSpaceUpdatingItems(&updating_items);
|
||||
CollectRememberedSetUpdatingItems(this, &updating_items, heap()->old_space(),
|
||||
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
|
||||
CollectRememberedSetUpdatingItems(this, &updating_items, heap()->code_space(),
|
||||
@ -5701,13 +5764,6 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
|
||||
heap()->code_lo_space(),
|
||||
RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
|
||||
heap()->IterateRoots(&updating_visitor,
|
||||
base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
|
||||
SkipRoot::kOldGeneration});
|
||||
}
|
||||
{
|
||||
TRACE_GC(heap()->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
|
||||
@ -5725,9 +5781,6 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
|
||||
TRACE_GC(heap()->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
|
||||
|
||||
EvacuationWeakObjectRetainer evacuation_object_retainer;
|
||||
heap()->ProcessWeakListRoots(&evacuation_object_retainer);
|
||||
|
||||
// Update pointers from external string table.
|
||||
heap()->UpdateYoungReferencesInExternalStringTable(
|
||||
&UpdateReferenceInExternalStringTableEntry);
|
||||
@ -5765,6 +5818,8 @@ void MinorMarkCompactCollector::Prepare() {
|
||||
if (!heap()->incremental_marking()->IsMarking()) {
|
||||
StartMarking();
|
||||
}
|
||||
|
||||
heap()->new_space()->FreeLinearAllocationArea();
|
||||
}
|
||||
|
||||
void MinorMarkCompactCollector::StartMarking() {
|
||||
@ -5782,12 +5837,23 @@ void MinorMarkCompactCollector::StartMarking() {
|
||||
|
||||
void MinorMarkCompactCollector::Finish() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_FINISH);
|
||||
|
||||
{
|
||||
// Keep new space sweeping atomic.
|
||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||
GCTracer::Scope::MC_FINISH_SWEEP_NEW,
|
||||
ThreadKind::kMain);
|
||||
sweeper_->EnsureCompleted(Sweeper::SweepingMode::kEagerDuringGC);
|
||||
heap()->paged_new_space()->paged_space()->RefillFreeList(sweeper());
|
||||
}
|
||||
|
||||
local_marking_worklists_.reset();
|
||||
main_marking_visitor_.reset();
|
||||
}
|
||||
|
||||
void MinorMarkCompactCollector::CollectGarbage() {
|
||||
DCHECK(!heap()->mark_compact_collector()->in_use());
|
||||
DCHECK_NOT_NULL(heap()->new_space());
|
||||
// Minor MC does not support processing the ephemeron remembered set.
|
||||
DCHECK(heap()->ephemeron_remembered_set_.empty());
|
||||
|
||||
@ -5802,6 +5868,7 @@ void MinorMarkCompactCollector::CollectGarbage() {
|
||||
}
|
||||
#endif // VERIFY_HEAP
|
||||
|
||||
Sweep();
|
||||
Evacuate();
|
||||
Finish();
|
||||
|
||||
@ -5819,24 +5886,16 @@ void MinorMarkCompactCollector::CollectGarbage() {
|
||||
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
|
||||
for (Page* p : PageRange(SemiSpaceNewSpace::From(heap()->new_space())
|
||||
->from_space()
|
||||
.first_page(),
|
||||
nullptr)) {
|
||||
DCHECK_EQ(promoted_pages_.end(),
|
||||
std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
|
||||
non_atomic_marking_state()->ClearLiveness(p);
|
||||
if (FLAG_concurrent_marking) {
|
||||
// Ensure that concurrent marker does not track pages that are
|
||||
// going to be unmapped.
|
||||
heap()->concurrent_marking()->ClearMemoryChunkData(p);
|
||||
}
|
||||
}
|
||||
// Since we promote all surviving large objects immediately, all
|
||||
// remaining large objects must be dead.
|
||||
// TODO(v8:11685): Don't free all as soon as we have an intermediate
|
||||
// generation.
|
||||
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
|
||||
// Since we promote all surviving large objects immediately, all remaining
|
||||
// large objects must be dead.
|
||||
NonAtomicMarkingState* marking_state = non_atomic_marking_state();
|
||||
heap()->new_lo_space()->FreeDeadObjects([marking_state](HeapObject obj) {
|
||||
// New large object space is not swept and markbits for non-promoted
|
||||
// objects are still in tact.
|
||||
USE(marking_state);
|
||||
DCHECK(marking_state->IsWhite(obj));
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
CleanupPromotedPages();
|
||||
@ -5952,8 +6011,6 @@ void MinorMarkCompactCollector::EvacuatePrologue() {
|
||||
}
|
||||
}
|
||||
|
||||
new_space->EvacuatePrologue();
|
||||
|
||||
heap()->new_lo_space()->Flip();
|
||||
heap()->new_lo_space()->ResetPendingObject();
|
||||
}
|
||||
@ -5962,30 +6019,6 @@ void MinorMarkCompactCollector::EvacuateEpilogue() {
|
||||
heap()->new_space()->EvacuateEpilogue();
|
||||
}
|
||||
|
||||
int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
|
||||
std::vector<std::unique_ptr<UpdatingItem>>* items) {
|
||||
// Seed to space pages.
|
||||
const Address space_start = heap()->new_space()->first_allocatable_address();
|
||||
const Address space_end = heap()->new_space()->top();
|
||||
int pages = 0;
|
||||
for (Page* page : PageRange(space_start, space_end)) {
|
||||
Address start =
|
||||
page->Contains(space_start) ? space_start : page->area_start();
|
||||
Address end = page->Contains(space_end) ? space_end : page->area_end();
|
||||
items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
|
||||
pages++;
|
||||
}
|
||||
return pages;
|
||||
}
|
||||
|
||||
std::unique_ptr<UpdatingItem>
|
||||
MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
|
||||
Address start,
|
||||
Address end) {
|
||||
return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
|
||||
heap(), chunk, start, end, non_atomic_marking_state());
|
||||
}
|
||||
|
||||
std::unique_ptr<UpdatingItem>
|
||||
MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
|
||||
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
|
||||
@ -6365,9 +6398,13 @@ void MinorMarkCompactCollector::Evacuate() {
|
||||
{
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
|
||||
for (Page* p : new_space_evacuation_pages_) {
|
||||
if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
|
||||
p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
|
||||
DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
|
||||
if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
|
||||
promoted_pages_.push_back(p);
|
||||
} else {
|
||||
// Page was not promoted. Sweep it instead.
|
||||
DCHECK_EQ(NEW_SPACE, p->owner_identity());
|
||||
sweeper()->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
|
||||
}
|
||||
}
|
||||
new_space_evacuation_pages_.clear();
|
||||
@ -6413,52 +6450,24 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
|
||||
"YoungGenerationEvacuator::RawEvacuatePage");
|
||||
NonAtomicMarkingState* marking_state = collector_->non_atomic_marking_state();
|
||||
*live_bytes = marking_state->live_bytes(chunk);
|
||||
switch (ComputeEvacuationMode(chunk)) {
|
||||
case kObjectsNewToOld:
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(
|
||||
chunk, marking_state, &new_space_visitor_,
|
||||
LiveObjectVisitor::kClearMarkbits);
|
||||
break;
|
||||
case kPageNewToOld:
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(
|
||||
chunk, marking_state, &new_to_old_page_visitor_,
|
||||
LiveObjectVisitor::kKeepMarking);
|
||||
new_to_old_page_visitor_.account_moved_bytes(
|
||||
marking_state->live_bytes(chunk));
|
||||
if (!chunk->IsLargePage()) {
|
||||
if (heap()->ShouldZapGarbage()) {
|
||||
collector_->MakeIterable(static_cast<Page*>(chunk),
|
||||
FreeSpaceTreatmentMode::kZapFreeSpace);
|
||||
} else if (heap()->incremental_marking()->IsMarking()) {
|
||||
// When incremental marking is on, we need to clear the mark bits
|
||||
// of the full collector. We cannot yet discard the young
|
||||
// generation mark bits as they are still relevant for pointers
|
||||
// updating.
|
||||
collector_->MakeIterable(static_cast<Page*>(chunk),
|
||||
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
|
||||
}
|
||||
}
|
||||
break;
|
||||
case kPageNewToNew:
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(
|
||||
chunk, marking_state, &new_to_new_page_visitor_,
|
||||
LiveObjectVisitor::kKeepMarking);
|
||||
new_to_new_page_visitor_.account_moved_bytes(
|
||||
marking_state->live_bytes(chunk));
|
||||
DCHECK(!chunk->IsLargePage());
|
||||
if (heap()->ShouldZapGarbage()) {
|
||||
collector_->MakeIterable(static_cast<Page*>(chunk),
|
||||
FreeSpaceTreatmentMode::kZapFreeSpace);
|
||||
} else if (heap()->incremental_marking()->IsMarking()) {
|
||||
// When incremental marking is on, we need to clear the mark bits of
|
||||
// the full collector. We cannot yet discard the young generation
|
||||
// mark bits as they are still relevant for pointers updating.
|
||||
collector_->MakeIterable(static_cast<Page*>(chunk),
|
||||
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
|
||||
}
|
||||
break;
|
||||
case kObjectsOldToOld:
|
||||
UNREACHABLE();
|
||||
DCHECK_EQ(kPageNewToOld, ComputeEvacuationMode(chunk));
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
|
||||
&new_to_old_page_visitor_,
|
||||
LiveObjectVisitor::kKeepMarking);
|
||||
new_to_old_page_visitor_.account_moved_bytes(
|
||||
marking_state->live_bytes(chunk));
|
||||
if (!chunk->IsLargePage()) {
|
||||
if (heap()->ShouldZapGarbage()) {
|
||||
collector_->MakeIterable(static_cast<Page*>(chunk),
|
||||
FreeSpaceTreatmentMode::kZapFreeSpace);
|
||||
} else if (heap()->incremental_marking()->IsMarking()) {
|
||||
// When incremental marking is on, we need to clear the mark bits
|
||||
// of the full collector. We cannot yet discard the young
|
||||
// generation mark bits as they are still relevant for pointers
|
||||
// updating.
|
||||
collector_->MakeIterable(static_cast<Page*>(chunk),
|
||||
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -6473,13 +6482,9 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
|
||||
DCHECK_LT(0, live_bytes_on_page);
|
||||
live_bytes += live_bytes_on_page;
|
||||
if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
|
||||
if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
|
||||
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
|
||||
} else {
|
||||
EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
|
||||
}
|
||||
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
|
||||
evacuation_items.emplace_back(ParallelWorkItem{}, page);
|
||||
}
|
||||
evacuation_items.emplace_back(ParallelWorkItem{}, page);
|
||||
}
|
||||
|
||||
// Promote young generation large objects.
|
||||
@ -6510,5 +6515,45 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
|
||||
}
|
||||
}
|
||||
|
||||
void MinorMarkCompactCollector::StartSweepNewSpace() {
|
||||
PagedSpaceBase* paged_space = heap()->paged_new_space()->paged_space();
|
||||
paged_space->ClearAllocatorState();
|
||||
|
||||
int will_be_swept = 0;
|
||||
|
||||
// Loop needs to support deletion if live bytes == 0 for a page.
|
||||
for (auto it = paged_space->begin(); it != paged_space->end();) {
|
||||
Page* p = *(it++);
|
||||
DCHECK(p->SweepingDone());
|
||||
|
||||
if (non_atomic_marking_state()->live_bytes(p) > 0) {
|
||||
// Non-empty pages will be evacuated/promoted.
|
||||
continue;
|
||||
}
|
||||
|
||||
// New space preallocates all its pages. Don't free empty pages since they
|
||||
// will just be reallocated.
|
||||
DCHECK_EQ(NEW_SPACE, paged_space->identity());
|
||||
sweeper_->AddPage(NEW_SPACE, p, Sweeper::REGULAR);
|
||||
will_be_swept++;
|
||||
}
|
||||
|
||||
if (FLAG_gc_verbose) {
|
||||
PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
|
||||
paged_space->name(), will_be_swept);
|
||||
}
|
||||
}
|
||||
|
||||
void MinorMarkCompactCollector::Sweep() {
|
||||
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEP);
|
||||
{
|
||||
GCTracer::Scope sweep_scope(heap()->tracer(),
|
||||
GCTracer::Scope::MINOR_MC_SWEEP_NEW,
|
||||
ThreadKind::kMain);
|
||||
StartSweepNewSpace();
|
||||
}
|
||||
sweeper_->StartSweeping();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -29,6 +29,7 @@ class ItemParallelJob;
|
||||
class LargeObjectSpace;
|
||||
class LargePage;
|
||||
class MigrationObserver;
|
||||
class PagedNewSpace;
|
||||
class ReadOnlySpace;
|
||||
class RecordMigratedSlotVisitor;
|
||||
class UpdatingItem;
|
||||
@ -683,6 +684,7 @@ class MarkCompactCollector final : public CollectorBase {
|
||||
// up other pages for sweeping. Does not start sweeper tasks.
|
||||
void Sweep();
|
||||
void StartSweepSpace(PagedSpace* space);
|
||||
void StartSweepNewSpace();
|
||||
void SweepLargeSpace(LargeObjectSpace* space);
|
||||
|
||||
void EvacuatePrologue();
|
||||
@ -811,10 +813,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
|
||||
|
||||
void Finish() final;
|
||||
|
||||
bool sweeping_in_progress() const final {
|
||||
// TODO(v8:13012): Fix this once sweeping is implemented.
|
||||
return false;
|
||||
}
|
||||
Sweeper* sweeper() { return sweeper_.get(); }
|
||||
bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
|
||||
|
||||
void VisitObject(HeapObject obj) final;
|
||||
|
||||
@ -831,19 +831,15 @@ class MinorMarkCompactCollector final : public CollectorBase {
|
||||
void TraceFragmentation();
|
||||
void ClearNonLiveReferences();
|
||||
|
||||
void Sweep();
|
||||
void StartSweepNewSpace();
|
||||
|
||||
void EvacuatePrologue();
|
||||
void EvacuateEpilogue();
|
||||
void Evacuate();
|
||||
void EvacuatePagesInParallel();
|
||||
void UpdatePointersAfterEvacuation();
|
||||
|
||||
std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
|
||||
Address start,
|
||||
Address end);
|
||||
|
||||
int CollectToSpaceUpdatingItems(
|
||||
std::vector<std::unique_ptr<UpdatingItem>>* items);
|
||||
|
||||
void SweepArrayBufferExtensions();
|
||||
|
||||
std::unique_ptr<YoungGenerationMainMarkingVisitor> main_marking_visitor_;
|
||||
@ -853,6 +849,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
|
||||
std::vector<Page*> promoted_pages_;
|
||||
std::vector<LargePage*> promoted_large_pages_;
|
||||
|
||||
std::unique_ptr<Sweeper> sweeper_;
|
||||
|
||||
friend class YoungGenerationMarkingTask;
|
||||
friend class YoungGenerationMarkingJob;
|
||||
friend class YoungGenerationMainMarkingVisitor;
|
||||
|
@ -966,8 +966,6 @@ void PagedSpaceForNewSpace::Shrink() {
|
||||
target_capacity_ = current_capacity_;
|
||||
}
|
||||
|
||||
void PagedSpaceForNewSpace::EvacuatePrologue() { FreeLinearAllocationArea(); }
|
||||
|
||||
void PagedSpaceForNewSpace::UpdateInlineAllocationLimit(size_t size_in_bytes) {
|
||||
PagedSpaceBase::UpdateInlineAllocationLimit(size_in_bytes);
|
||||
}
|
||||
|
@ -584,7 +584,7 @@ class V8_EXPORT_PRIVATE PagedSpaceForNewSpace final : public PagedSpaceBase {
|
||||
}
|
||||
|
||||
// Reset the allocation pointer.
|
||||
void EvacuatePrologue();
|
||||
void EvacuatePrologue() {}
|
||||
void EvacuateEpilogue() { allocated_linear_areas_ = 0; }
|
||||
|
||||
// When inline allocation stepping is active, either because of incremental
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "src/heap/read-only-heap.h"
|
||||
#include "src/heap/safepoint.h"
|
||||
#include "src/heap/spaces.h"
|
||||
#include "src/heap/sweeper.h"
|
||||
#include "src/logging/runtime-call-stats-scope.h"
|
||||
#include "src/objects/string.h"
|
||||
#include "src/utils/utils.h"
|
||||
@ -138,17 +139,17 @@ void PagedSpaceBase::TearDown() {
|
||||
accounting_stats_.Clear();
|
||||
}
|
||||
|
||||
void PagedSpaceBase::RefillFreeList() {
|
||||
void PagedSpaceBase::RefillFreeList(Sweeper* sweeper) {
|
||||
// Any PagedSpace might invoke RefillFreeList. We filter all but our old
|
||||
// generation spaces out.
|
||||
DCHECK(identity() == OLD_SPACE || identity() == CODE_SPACE ||
|
||||
identity() == MAP_SPACE || identity() == NEW_SPACE);
|
||||
MarkCompactCollector* collector = heap()->mark_compact_collector();
|
||||
|
||||
size_t added = 0;
|
||||
|
||||
{
|
||||
Page* p = nullptr;
|
||||
while ((p = collector->sweeper()->GetSweptPageSafe(this)) != nullptr) {
|
||||
while ((p = sweeper->GetSweptPageSafe(this)) != nullptr) {
|
||||
// We regularly sweep NEVER_ALLOCATE_ON_PAGE pages. We drop the freelist
|
||||
// entries here to make them unavailable for allocations.
|
||||
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
|
||||
@ -162,6 +163,7 @@ void PagedSpaceBase::RefillFreeList() {
|
||||
// during compaction.
|
||||
if (is_compaction_space()) {
|
||||
DCHECK_NE(this, p->owner());
|
||||
DCHECK_NE(NEW_SPACE, identity());
|
||||
PagedSpaceBase* owner = reinterpret_cast<PagedSpaceBase*>(p->owner());
|
||||
base::MutexGuard guard(owner->mutex());
|
||||
owner->RefineAllocatedBytesAfterSweeping(p);
|
||||
@ -282,7 +284,7 @@ bool PagedSpaceBase::ContainsSlow(Address addr) const {
|
||||
void PagedSpaceBase::RefineAllocatedBytesAfterSweeping(Page* page) {
|
||||
CHECK(page->SweepingDone());
|
||||
auto marking_state =
|
||||
heap()->incremental_marking()->non_atomic_marking_state();
|
||||
heap()->mark_compact_collector()->non_atomic_marking_state();
|
||||
// The live_byte on the page was accounted in the space allocated
|
||||
// bytes counter. After sweeping allocated_bytes() contains the
|
||||
// accurate live byte count on the page.
|
||||
@ -329,7 +331,13 @@ void PagedSpaceBase::RemovePage(Page* page) {
|
||||
if (identity() == NEW_SPACE) {
|
||||
page->ReleaseFreeListCategories();
|
||||
}
|
||||
DecreaseAllocatedBytes(page->allocated_bytes(), page);
|
||||
// Pages are only removed from new space when they are promoted to old space
|
||||
// during a GC. This happens after sweeping as started and the allocation
|
||||
// counters have been reset.
|
||||
DCHECK_IMPLIES(identity() == NEW_SPACE, Size() == 0);
|
||||
if (identity() != NEW_SPACE) {
|
||||
DecreaseAllocatedBytes(page->allocated_bytes(), page);
|
||||
}
|
||||
DecreaseCapacity(page->area_size());
|
||||
AccountUncommitted(page->size());
|
||||
for (size_t i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
|
||||
@ -662,7 +670,7 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
|
||||
if (collector->sweeping_in_progress()) {
|
||||
// First try to refill the free-list, concurrent sweeper threads
|
||||
// may have freed some objects in the meantime.
|
||||
RefillFreeList();
|
||||
RefillFreeList(collector->sweeper());
|
||||
|
||||
// Retry the free list allocation.
|
||||
result = TryAllocationFromFreeListBackground(min_size_in_bytes,
|
||||
@ -677,7 +685,8 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
|
||||
identity(), Sweeper::SweepingMode::kLazyOrConcurrent,
|
||||
static_cast<int>(min_size_in_bytes), kMaxPagesToSweep);
|
||||
|
||||
RefillFreeList();
|
||||
// Keep new space sweeping atomic.
|
||||
RefillFreeList(collector->sweeper());
|
||||
|
||||
if (static_cast<size_t>(max_freed) >= min_size_in_bytes) {
|
||||
result = TryAllocationFromFreeListBackground(min_size_in_bytes,
|
||||
@ -699,7 +708,7 @@ PagedSpaceBase::RawAllocateBackground(LocalHeap* local_heap,
|
||||
collector->DrainSweepingWorklistForSpace(identity());
|
||||
}
|
||||
|
||||
RefillFreeList();
|
||||
RefillFreeList(collector->sweeper());
|
||||
|
||||
// Last try to acquire memory from free list.
|
||||
return TryAllocationFromFreeListBackground(min_size_in_bytes,
|
||||
@ -985,7 +994,7 @@ bool PagedSpaceBase::RawRefillLabMain(int size_in_bytes,
|
||||
if (collector->sweeping_in_progress()) {
|
||||
// First try to refill the free-list, concurrent sweeper threads
|
||||
// may have freed some objects in the meantime.
|
||||
RefillFreeList();
|
||||
RefillFreeList(collector->sweeper());
|
||||
|
||||
// Retry the free list allocation.
|
||||
if (TryAllocationFromFreeListMain(static_cast<size_t>(size_in_bytes),
|
||||
@ -1049,7 +1058,7 @@ bool PagedSpaceBase::ContributeToSweepingMain(int required_freed_bytes,
|
||||
if (collector->sweeping_in_progress()) {
|
||||
collector->sweeper()->ParallelSweepSpace(identity(), sweeping_mode,
|
||||
required_freed_bytes, max_pages);
|
||||
RefillFreeList();
|
||||
RefillFreeList(collector->sweeper());
|
||||
return TryAllocationFromFreeListMain(size_in_bytes, origin);
|
||||
}
|
||||
return false;
|
||||
|
@ -31,6 +31,7 @@ class HeapObject;
|
||||
class Isolate;
|
||||
class ObjectVisitor;
|
||||
class PagedSpaceBase;
|
||||
class Sweeper;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Heap object iterator in paged spaces.
|
||||
@ -211,8 +212,6 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
|
||||
accounting_stats_.IncreaseCapacity(bytes);
|
||||
}
|
||||
|
||||
void RefineAllocatedBytesAfterSweeping(Page* page);
|
||||
|
||||
Page* InitializePage(MemoryChunk* chunk) override;
|
||||
|
||||
virtual void ReleasePage(Page* page);
|
||||
@ -281,7 +280,7 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
|
||||
|
||||
// Refills the free list from the corresponding free list filled by the
|
||||
// sweeper.
|
||||
virtual void RefillFreeList();
|
||||
void RefillFreeList(Sweeper* sweeper);
|
||||
|
||||
base::Mutex* mutex() { return &space_mutex_; }
|
||||
|
||||
@ -343,6 +342,8 @@ class V8_EXPORT_PRIVATE PagedSpaceBase
|
||||
return !is_compaction_space();
|
||||
}
|
||||
|
||||
void RefineAllocatedBytesAfterSweeping(Page* page);
|
||||
|
||||
protected:
|
||||
void UpdateInlineAllocationLimit(size_t min_size) override;
|
||||
|
||||
|
@ -76,10 +76,7 @@ bool Heap::CreateHeapObjects() {
|
||||
// Create initial maps.
|
||||
if (!CreateInitialMaps()) return false;
|
||||
if (FLAG_minor_mc && new_space()) {
|
||||
PagedNewSpace::From(new_space())
|
||||
->paged_space()
|
||||
->free_list()
|
||||
->RepairLists(this);
|
||||
paged_new_space()->paged_space()->free_list()->RepairLists(this);
|
||||
}
|
||||
CreateApiObjects();
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "src/base/logging.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/execution/vm-state-inl.h"
|
||||
#include "src/heap/base/active-system-pages.h"
|
||||
@ -16,28 +17,41 @@
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "src/heap/invalidated-slots-inl.h"
|
||||
#include "src/heap/mark-compact-inl.h"
|
||||
#include "src/heap/new-spaces.h"
|
||||
#include "src/heap/paged-spaces.h"
|
||||
#include "src/heap/remembered-set.h"
|
||||
#include "src/objects/objects-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
static const int kInitialLocalPretenuringFeedbackCapacity = 256;
|
||||
} // namespace
|
||||
|
||||
class Sweeper::ConcurrentSweeper final {
|
||||
public:
|
||||
explicit ConcurrentSweeper(Sweeper* sweeper) : sweeper_(sweeper) {}
|
||||
explicit ConcurrentSweeper(Sweeper* sweeper)
|
||||
: sweeper_(sweeper),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
|
||||
|
||||
bool ConcurrentSweepSpace(AllocationSpace identity, JobDelegate* delegate) {
|
||||
while (!delegate->ShouldYield()) {
|
||||
Page* page = sweeper_->GetSweepingPageSafe(identity);
|
||||
if (page == nullptr) return true;
|
||||
sweeper_->ParallelSweepPage(page, identity,
|
||||
sweeper_->ParallelSweepPage(page, identity, &local_pretenuring_feedback_,
|
||||
SweepingMode::kLazyOrConcurrent);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback() {
|
||||
return &local_pretenuring_feedback_;
|
||||
}
|
||||
|
||||
private:
|
||||
Sweeper* const sweeper_;
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
};
|
||||
|
||||
class Sweeper::SweeperJob final : public JobTask {
|
||||
@ -80,8 +94,7 @@ class Sweeper::SweeperJob final : public JobTask {
|
||||
ConcurrentSweeper& sweeper = (*concurrent_sweepers_)[offset];
|
||||
for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
|
||||
const AllocationSpace space_id = static_cast<AllocationSpace>(
|
||||
FIRST_GROWABLE_PAGED_SPACE +
|
||||
((i + offset) % kNumberOfSweepingSpaces));
|
||||
FIRST_SWEEPABLE_SPACE + ((i + offset) % kNumberOfSweepingSpaces));
|
||||
DCHECK(IsValidSweepingSpace(space_id));
|
||||
if (!sweeper.ConcurrentSweepSpace(space_id, delegate)) return;
|
||||
}
|
||||
@ -96,9 +109,13 @@ Sweeper::Sweeper(Heap* heap, NonAtomicMarkingState* marking_state)
|
||||
: heap_(heap),
|
||||
marking_state_(marking_state),
|
||||
sweeping_in_progress_(false),
|
||||
should_reduce_memory_(false) {}
|
||||
should_reduce_memory_(false),
|
||||
local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity) {}
|
||||
|
||||
Sweeper::~Sweeper() { DCHECK(concurrent_sweepers_.empty()); }
|
||||
Sweeper::~Sweeper() {
|
||||
DCHECK(concurrent_sweepers_.empty());
|
||||
DCHECK(local_pretenuring_feedback_.empty());
|
||||
}
|
||||
|
||||
Sweeper::PauseScope::PauseScope(Sweeper* sweeper) : sweeper_(sweeper) {
|
||||
if (!sweeper_->sweeping_in_progress()) return;
|
||||
@ -143,11 +160,10 @@ void Sweeper::TearDown() {
|
||||
}
|
||||
|
||||
void Sweeper::StartSweeping() {
|
||||
DCHECK(local_pretenuring_feedback_.empty());
|
||||
sweeping_in_progress_ = true;
|
||||
should_reduce_memory_ = heap_->ShouldReduceMemory();
|
||||
NonAtomicMarkingState* marking_state =
|
||||
heap_->mark_compact_collector()->non_atomic_marking_state();
|
||||
ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
|
||||
ForAllSweepingSpaces([this](AllocationSpace space) {
|
||||
// Sorting is done in order to make compaction more efficient: by sweeping
|
||||
// pages with the most free bytes first, we make it more likely that when
|
||||
// evacuating a page, already swept pages will have enough free bytes to
|
||||
@ -158,7 +174,7 @@ void Sweeper::StartSweeping() {
|
||||
int space_index = GetSweepSpaceIndex(space);
|
||||
std::sort(
|
||||
sweeping_list_[space_index].begin(), sweeping_list_[space_index].end(),
|
||||
[marking_state](Page* a, Page* b) {
|
||||
[marking_state = marking_state_](Page* a, Page* b) {
|
||||
return marking_state->live_bytes(a) > marking_state->live_bytes(b);
|
||||
});
|
||||
});
|
||||
@ -198,13 +214,13 @@ Page* Sweeper::GetSweptPageSafe(PagedSpaceBase* space) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
void Sweeper::EnsureCompleted() {
|
||||
void Sweeper::EnsureCompleted(SweepingMode sweeping_mode) {
|
||||
if (!sweeping_in_progress_) return;
|
||||
|
||||
// If sweeping is not completed or not running at all, we try to complete it
|
||||
// here.
|
||||
ForAllSweepingSpaces([this](AllocationSpace space) {
|
||||
ParallelSweepSpace(space, SweepingMode::kLazyOrConcurrent, 0);
|
||||
ForAllSweepingSpaces([this, sweeping_mode](AllocationSpace space) {
|
||||
ParallelSweepSpace(space, sweeping_mode, 0);
|
||||
});
|
||||
|
||||
if (job_handle_ && job_handle_->IsValid()) job_handle_->Join();
|
||||
@ -213,7 +229,14 @@ void Sweeper::EnsureCompleted() {
|
||||
CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
|
||||
});
|
||||
|
||||
heap_->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
|
||||
for (ConcurrentSweeper& concurrent_sweeper : concurrent_sweepers_) {
|
||||
heap_->MergeAllocationSitePretenuringFeedback(
|
||||
*concurrent_sweeper.local_pretenuring_feedback());
|
||||
}
|
||||
local_pretenuring_feedback_.clear();
|
||||
concurrent_sweepers_.clear();
|
||||
|
||||
sweeping_in_progress_ = false;
|
||||
}
|
||||
|
||||
@ -322,13 +345,17 @@ void Sweeper::ClearMarkBitsAndHandleLivenessStatistics(Page* page,
|
||||
DCHECK_EQ(live_bytes, page->allocated_bytes());
|
||||
}
|
||||
|
||||
int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
SweepingMode sweeping_mode,
|
||||
const base::MutexGuard& page_guard) {
|
||||
int Sweeper::RawSweep(
|
||||
Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback) {
|
||||
Space* space = p->owner();
|
||||
DCHECK_NOT_NULL(space);
|
||||
DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
|
||||
space->identity() == MAP_SPACE);
|
||||
space->identity() == MAP_SPACE ||
|
||||
(space->identity() == NEW_SPACE && FLAG_minor_mc));
|
||||
DCHECK_IMPLIES(space->identity() == NEW_SPACE,
|
||||
sweeping_mode == SweepingMode::kEagerDuringGC);
|
||||
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
|
||||
|
||||
// Phase 1: Prepare the page for sweeping.
|
||||
@ -410,6 +437,10 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
live_bytes += size;
|
||||
free_start = free_end + size;
|
||||
|
||||
if (p->InYoungGeneration()) {
|
||||
heap_->UpdateAllocationSite(map, object, local_pretenuring_feedback);
|
||||
}
|
||||
|
||||
if (active_system_pages_after_sweeping) {
|
||||
active_system_pages_after_sweeping->Add(
|
||||
free_end - p->address(), free_start - p->address(),
|
||||
@ -456,7 +487,9 @@ int Sweeper::RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
size_t Sweeper::ConcurrentSweepingPageCount() {
|
||||
base::MutexGuard guard(&mutex_);
|
||||
return sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)].size() +
|
||||
sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size();
|
||||
sweeping_list_[GetSweepSpaceIndex(MAP_SPACE)].size() +
|
||||
(FLAG_minor_mc ? sweeping_list_[GetSweepSpaceIndex(NEW_SPACE)].size()
|
||||
: 0);
|
||||
}
|
||||
|
||||
int Sweeper::ParallelSweepSpace(AllocationSpace identity,
|
||||
@ -466,7 +499,8 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
|
||||
int pages_freed = 0;
|
||||
Page* page = nullptr;
|
||||
while ((page = GetSweepingPageSafe(identity)) != nullptr) {
|
||||
int freed = ParallelSweepPage(page, identity, sweeping_mode);
|
||||
int freed = ParallelSweepPage(page, identity, &local_pretenuring_feedback_,
|
||||
sweeping_mode);
|
||||
++pages_freed;
|
||||
if (page->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
|
||||
// Free list of a never-allocate page will be dropped later on.
|
||||
@ -481,8 +515,10 @@ int Sweeper::ParallelSweepSpace(AllocationSpace identity,
|
||||
return max_freed;
|
||||
}
|
||||
|
||||
int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
|
||||
SweepingMode sweeping_mode) {
|
||||
int Sweeper::ParallelSweepPage(
|
||||
Page* page, AllocationSpace identity,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
SweepingMode sweeping_mode) {
|
||||
DCHECK(IsValidSweepingSpace(identity));
|
||||
|
||||
// The Scavenger may add already swept pages back.
|
||||
@ -503,7 +539,8 @@ int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity,
|
||||
const FreeSpaceTreatmentMode free_space_treatment_mode =
|
||||
Heap::ShouldZapGarbage() ? FreeSpaceTreatmentMode::kZapFreeSpace
|
||||
: FreeSpaceTreatmentMode::kIgnoreFreeSpace;
|
||||
max_freed = RawSweep(page, free_space_treatment_mode, sweeping_mode, guard);
|
||||
max_freed = RawSweep(page, free_space_treatment_mode, sweeping_mode, guard,
|
||||
local_pretenuring_feedback);
|
||||
DCHECK(page->SweepingDone());
|
||||
}
|
||||
|
||||
@ -522,7 +559,8 @@ void Sweeper::EnsurePageIsSwept(Page* page) {
|
||||
if (IsValidSweepingSpace(space)) {
|
||||
if (TryRemoveSweepingPageSafe(space, page)) {
|
||||
// Page was successfully removed and can now be swept.
|
||||
ParallelSweepPage(page, space, SweepingMode::kLazyOrConcurrent);
|
||||
ParallelSweepPage(page, space, &local_pretenuring_feedback_,
|
||||
SweepingMode::kLazyOrConcurrent);
|
||||
} else {
|
||||
// Some sweeper task already took ownership of that page, wait until
|
||||
// sweeping is finished.
|
||||
@ -578,8 +616,14 @@ void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
|
||||
});
|
||||
#endif // DEBUG
|
||||
page->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
|
||||
heap_->paged_space(space)->IncreaseAllocatedBytes(
|
||||
marking_state_->live_bytes(page), page);
|
||||
PagedSpaceBase* paged_space;
|
||||
if (space == NEW_SPACE) {
|
||||
DCHECK(FLAG_minor_mc);
|
||||
paged_space = heap_->paged_new_space()->paged_space();
|
||||
} else {
|
||||
paged_space = heap_->paged_space(space);
|
||||
}
|
||||
paged_space->IncreaseAllocatedBytes(marking_state_->live_bytes(page), page);
|
||||
}
|
||||
|
||||
Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include "src/base/platform/condition-variable.h"
|
||||
#include "src/base/platform/semaphore.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/flags/flags.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/heap/slot-set.h"
|
||||
#include "src/tasks/cancelable-task.h"
|
||||
|
||||
@ -85,20 +87,24 @@ class Sweeper {
|
||||
|
||||
int ParallelSweepSpace(AllocationSpace identity, SweepingMode sweeping_mode,
|
||||
int required_freed_bytes, int max_pages = 0);
|
||||
int ParallelSweepPage(Page* page, AllocationSpace identity,
|
||||
SweepingMode sweeping_mode);
|
||||
int ParallelSweepPage(
|
||||
Page* page, AllocationSpace identity,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
|
||||
SweepingMode sweeping_mode);
|
||||
|
||||
void EnsurePageIsSwept(Page* page);
|
||||
|
||||
int RawSweep(Page* p, FreeSpaceTreatmentMode free_space_treatment_mode,
|
||||
SweepingMode sweeping_mode, const base::MutexGuard& page_guard);
|
||||
SweepingMode sweeping_mode, const base::MutexGuard& page_guard,
|
||||
Heap::PretenuringFeedbackMap* local_pretenuring_feedback);
|
||||
|
||||
// After calling this function sweeping is considered to be in progress
|
||||
// and the main thread can sweep lazily, but the background sweeper tasks
|
||||
// are not running yet.
|
||||
void StartSweeping();
|
||||
V8_EXPORT_PRIVATE void StartSweeperTasks();
|
||||
void EnsureCompleted();
|
||||
void EnsureCompleted(
|
||||
SweepingMode sweeping_mode = SweepingMode::kLazyOrConcurrent);
|
||||
void DrainSweepingWorklistForSpace(AllocationSpace space);
|
||||
bool AreSweeperTasksRunning();
|
||||
|
||||
@ -107,16 +113,21 @@ class Sweeper {
|
||||
|
||||
Page* GetSweptPageSafe(PagedSpaceBase* space);
|
||||
|
||||
NonAtomicMarkingState* marking_state() const { return marking_state_; }
|
||||
|
||||
private:
|
||||
class ConcurrentSweeper;
|
||||
class SweeperJob;
|
||||
|
||||
static const int kNumberOfSweepingSpaces =
|
||||
LAST_GROWABLE_PAGED_SPACE - FIRST_GROWABLE_PAGED_SPACE + 1;
|
||||
LAST_SWEEPABLE_SPACE - FIRST_SWEEPABLE_SPACE + 1;
|
||||
static constexpr int kMaxSweeperTasks = 3;
|
||||
|
||||
template <typename Callback>
|
||||
void ForAllSweepingSpaces(Callback callback) const {
|
||||
if (FLAG_minor_mc) {
|
||||
callback(NEW_SPACE);
|
||||
}
|
||||
callback(OLD_SPACE);
|
||||
callback(CODE_SPACE);
|
||||
callback(MAP_SPACE);
|
||||
@ -165,13 +176,12 @@ class Sweeper {
|
||||
void PrepareToBeSweptPage(AllocationSpace space, Page* page);
|
||||
|
||||
static bool IsValidSweepingSpace(AllocationSpace space) {
|
||||
return space >= FIRST_GROWABLE_PAGED_SPACE &&
|
||||
space <= LAST_GROWABLE_PAGED_SPACE;
|
||||
return space >= FIRST_SWEEPABLE_SPACE && space <= LAST_SWEEPABLE_SPACE;
|
||||
}
|
||||
|
||||
static int GetSweepSpaceIndex(AllocationSpace space) {
|
||||
DCHECK(IsValidSweepingSpace(space));
|
||||
return space - FIRST_GROWABLE_PAGED_SPACE;
|
||||
return space - FIRST_SWEEPABLE_SPACE;
|
||||
}
|
||||
|
||||
int NumberOfConcurrentSweepers() const;
|
||||
@ -188,6 +198,7 @@ class Sweeper {
|
||||
// path checks this flag to see whether it could support concurrent sweeping.
|
||||
std::atomic<bool> sweeping_in_progress_;
|
||||
bool should_reduce_memory_;
|
||||
Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
@ -577,6 +577,7 @@
|
||||
F(MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
|
||||
F(MC_EVACUATE_UPDATE_POINTERS_WEAK) \
|
||||
F(MC_FINISH_SWEEP_NEW_LO) \
|
||||
F(MC_FINISH_SWEEP_NEW) \
|
||||
F(MC_FINISH_SWEEP_ARRAY_BUFFERS) \
|
||||
F(MC_MARK_CLIENT_HEAPS) \
|
||||
F(MC_MARK_EMBEDDER_PROLOGUE) \
|
||||
@ -593,6 +594,7 @@
|
||||
F(MC_SWEEP_CODE_LO) \
|
||||
F(MC_SWEEP_LO) \
|
||||
F(MC_SWEEP_MAP) \
|
||||
F(MC_SWEEP_NEW) \
|
||||
F(MC_SWEEP_OLD) \
|
||||
F(MINOR_MARK_COMPACTOR) \
|
||||
F(MINOR_MC) \
|
||||
@ -610,10 +612,10 @@
|
||||
F(MINOR_MC_EVACUATE_UPDATE_POINTERS) \
|
||||
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL) \
|
||||
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS) \
|
||||
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS) \
|
||||
F(MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK) \
|
||||
F(MINOR_MC_FINISH) \
|
||||
F(MINOR_MC_FINISH_SWEEP_ARRAY_BUFFERS) \
|
||||
F(MINOR_MC_FINISH_SWEEP_NEW) \
|
||||
F(MINOR_MC_MARK) \
|
||||
F(MINOR_MC_MARK_GLOBAL_HANDLES) \
|
||||
F(MINOR_MC_MARK_PARALLEL) \
|
||||
@ -622,6 +624,8 @@
|
||||
F(MINOR_MC_MARK_WEAK) \
|
||||
F(MINOR_MC_MARKING_DEQUE) \
|
||||
F(MINOR_MC_RESET_LIVENESS) \
|
||||
F(MINOR_MC_SWEEP) \
|
||||
F(MINOR_MC_SWEEP_NEW) \
|
||||
F(SAFEPOINT) \
|
||||
F(SCAVENGER) \
|
||||
F(SCAVENGER_COMPLETE_SWEEP_ARRAY_BUFFERS) \
|
||||
|
Loading…
Reference in New Issue
Block a user