[heap] Don't start a job for MinorMC evacuation
With concurrent promoted page iteration, the parallel evacuation phase merely pushes the pages to the sweeper. Therefore, the work is minimal and there is practically no justification to start a parallel job for it. Bug: v8:12612 Change-Id: I585d9e23e07b70fa780239bd26843530c6ca69a1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/4203376 Reviewed-by: Michael Lippautz <mlippautz@chromium.org> Commit-Queue: Omer Katz <omerkatz@chromium.org> Cr-Commit-Position: refs/heads/main@{#85550}
This commit is contained in:
parent
cfd4728fb2
commit
24b1878832
@ -6658,17 +6658,12 @@ bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
|
||||
NonAtomicMarkingState* marking_state = heap_->non_atomic_marking_state();
|
||||
*live_bytes = marking_state->live_bytes(chunk);
|
||||
DCHECK_EQ(kPageNewToOld, ComputeEvacuationMode(chunk));
|
||||
if (heap()->ShouldReduceMemory()) {
|
||||
// For memory reducing GCs, iterate pages immediately to avoid delaying
|
||||
// array buffer sweeping.
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
|
||||
&new_to_old_page_visitor_);
|
||||
if (!chunk->IsLargePage() && heap()->ShouldZapGarbage()) {
|
||||
heap_->minor_mark_compact_collector()->MakeIterable(
|
||||
static_cast<Page*>(chunk), FreeSpaceTreatmentMode::kZapFreeSpace);
|
||||
}
|
||||
} else {
|
||||
sweeper_->AddPromotedPageForIteration(chunk);
|
||||
DCHECK(heap()->ShouldReduceMemory());
|
||||
LiveObjectVisitor::VisitBlackObjectsNoFail(chunk, marking_state,
|
||||
&new_to_old_page_visitor_);
|
||||
if (!chunk->IsLargePage() && heap()->ShouldZapGarbage()) {
|
||||
heap_->minor_mark_compact_collector()->MakeIterable(
|
||||
static_cast<Page*>(chunk), FreeSpaceTreatmentMode::kZapFreeSpace);
|
||||
}
|
||||
new_to_old_page_visitor_.account_moved_bytes(
|
||||
marking_state->live_bytes(chunk));
|
||||
@ -6680,10 +6675,30 @@ bool YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
|
||||
|
||||
void MinorMarkCompactCollector::EvacuatePagesInParallel() {
|
||||
std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
|
||||
intptr_t live_bytes = 0;
|
||||
|
||||
intptr_t live_bytes = 0;
|
||||
intptr_t moved_bytes = 0;
|
||||
|
||||
std::function<void(MemoryChunk*, intptr_t)> handle_promoted_page;
|
||||
if (heap()->ShouldReduceMemory()) {
|
||||
handle_promoted_page = [&evacuation_items](MemoryChunk* chunk,
|
||||
intptr_t live_bytes) {
|
||||
// For memory reducing GCs, parallel iterate pages immediately to avoid
|
||||
// delaying array buffer sweeping.
|
||||
evacuation_items.emplace_back(ParallelWorkItem{}, chunk);
|
||||
};
|
||||
} else {
|
||||
handle_promoted_page = [&moved_bytes, this](MemoryChunk* chunk,
|
||||
intptr_t live_bytes) {
|
||||
sweeper()->AddPromotedPageForIteration(chunk);
|
||||
moved_bytes += live_bytes;
|
||||
};
|
||||
}
|
||||
|
||||
const NonAtomicMarkingState* marking_state = non_atomic_marking_state();
|
||||
size_t promoted_page_count = 0;
|
||||
for (Page* page : new_space_evacuation_pages_) {
|
||||
intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
|
||||
intptr_t live_bytes_on_page = marking_state->live_bytes(page);
|
||||
DCHECK_LT(0, live_bytes_on_page);
|
||||
live_bytes += live_bytes_on_page;
|
||||
if (ShouldMovePage(page, live_bytes_on_page, page->wasted_memory(),
|
||||
@ -6692,36 +6707,46 @@ void MinorMarkCompactCollector::EvacuatePagesInParallel() {
|
||||
? PromoteUnusablePages::kYes
|
||||
: PromoteUnusablePages::kNo)) {
|
||||
EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
|
||||
evacuation_items.emplace_back(ParallelWorkItem{}, page);
|
||||
promoted_page_count++;
|
||||
handle_promoted_page(page, live_bytes_on_page);
|
||||
} else {
|
||||
// Page is not promoted. Sweep it instead.
|
||||
sweeper()->AddNewSpacePage(page);
|
||||
}
|
||||
}
|
||||
|
||||
DCHECK_EQ(0, promoted_large_pages_.size());
|
||||
// Promote young generation large objects.
|
||||
for (auto it = heap()->new_lo_space()->begin();
|
||||
it != heap()->new_lo_space()->end();) {
|
||||
LargePage* current = *it;
|
||||
it++;
|
||||
HeapObject object = current->GetObject();
|
||||
if (non_atomic_marking_state()->IsBlack(object)) {
|
||||
if (marking_state->IsBlack(object)) {
|
||||
heap_->lo_space()->PromoteNewLargeObject(current);
|
||||
current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
|
||||
promoted_large_pages_.push_back(current);
|
||||
evacuation_items.emplace_back(ParallelWorkItem{}, current);
|
||||
handle_promoted_page(current, marking_state->live_bytes(current));
|
||||
}
|
||||
heap()->new_lo_space()->set_objects_size(0);
|
||||
}
|
||||
if (evacuation_items.empty()) return;
|
||||
|
||||
const auto pages_count = evacuation_items.size();
|
||||
const auto wanted_num_tasks =
|
||||
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
|
||||
heap(), std::move(evacuation_items));
|
||||
size_t wanted_num_tasks = 0;
|
||||
DCHECK_IMPLIES(!heap()->ShouldReduceMemory(), evacuation_items.empty());
|
||||
DCHECK_IMPLIES(heap()->ShouldReduceMemory(), moved_bytes == 0);
|
||||
if (!evacuation_items.empty()) {
|
||||
wanted_num_tasks =
|
||||
CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
|
||||
heap(), std::move(evacuation_items));
|
||||
} else {
|
||||
heap()->IncrementPromotedObjectsSize(moved_bytes);
|
||||
heap()->IncrementYoungSurvivorsCounter(moved_bytes);
|
||||
}
|
||||
|
||||
if (v8_flags.trace_evacuation) {
|
||||
TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
|
||||
TraceEvacuation(isolate(),
|
||||
promoted_page_count + promoted_large_pages_.size(),
|
||||
wanted_num_tasks, live_bytes, 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,7 +283,6 @@ class CollectorBase {
|
||||
|
||||
private:
|
||||
std::vector<Page*> new_space_evacuation_pages_;
|
||||
std::vector<LargePage*> promoted_large_pages_;
|
||||
|
||||
protected:
|
||||
using ResizeNewSpaceMode = Heap::ResizeNewSpaceMode;
|
||||
|
@ -1084,6 +1084,7 @@ void Sweeper::AddNewSpacePage(Page* page) {
|
||||
}
|
||||
|
||||
void Sweeper::AddPromotedPageForIteration(MemoryChunk* chunk) {
|
||||
DCHECK(heap_->IsMainThread());
|
||||
DCHECK(!heap_->ShouldReduceMemory());
|
||||
DCHECK(chunk->owner_identity() == OLD_SPACE ||
|
||||
chunk->owner_identity() == LO_SPACE);
|
||||
@ -1102,7 +1103,8 @@ void Sweeper::AddPromotedPageForIteration(MemoryChunk* chunk) {
|
||||
DCHECK_EQ(Page::ConcurrentSweepingState::kDone,
|
||||
chunk->concurrent_sweeping_state());
|
||||
chunk->set_concurrent_sweeping_state(Page::ConcurrentSweepingState::kPending);
|
||||
base::MutexGuard guard(&promoted_pages_iteration_mutex_);
|
||||
// This method is called only from the main thread while sweeping tasks have
|
||||
// not yet started, thus a mutex is not needed.
|
||||
sweeping_list_for_promoted_page_iteration_.push_back(chunk);
|
||||
promoted_pages_for_iteration_count_++;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user