[heap] Process PageMarkingItems on incremental marking start

This CL adds processing of the OLD_TO_NEW RememberedSet
during minor incremental marking start.

Bug: v8:13012
Change-Id: I4fd051087d46e1b8a22b735bf0cae6d2da2ecb5b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885875
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Cr-Commit-Position: refs/heads/main@{#83278}
This commit is contained in:
Leon Bettscheider 2022-09-16 14:42:47 +00:00 committed by V8 LUCI CQ
parent 17b903a22e
commit f81d0ca6e5
4 changed files with 204 additions and 140 deletions

View File

@ -266,7 +266,21 @@ void IncrementalMarking::MarkRoots() {
heap()->isolate()->global_handles()->IterateYoungStrongAndDependentRoots( heap()->isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
&visitor); &visitor);
// TODO(v8:13012): Do PageMarkingItem processing.
std::vector<PageMarkingItem> marking_items;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&marking_items](MemoryChunk* chunk) {
marking_items.emplace_back(chunk);
});
V8::GetCurrentPlatform()
->CreateJob(
v8::TaskPriority::kUserBlocking,
std::make_unique<YoungGenerationMarkingJob>(
heap_->isolate(), minor_collector_,
minor_collector_->marking_worklists(), std::move(marking_items),
YoungMarkingJobType::kIncremental))
->Join();
} }
} }

View File

@ -9,6 +9,7 @@
#include "src/codegen/assembler-inl.h" #include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h" #include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h" #include "src/heap/incremental-marking.h"
#include "src/heap/index-generator.h"
#include "src/heap/mark-compact.h" #include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h" #include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h" #include "src/heap/marking-worklist.h"
@ -283,6 +284,65 @@ typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
Isolate* CollectorBase::isolate() { return heap()->isolate(); } Isolate* CollectorBase::isolate() { return heap()->isolate(); }
class YoungGenerationMarkingTask;
class PageMarkingItem : public ParallelWorkItem {
public:
explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageMarkingItem() = default;
void Process(YoungGenerationMarkingTask* task);
private:
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task);
void MarkTypedPointers(YoungGenerationMarkingTask* task);
template <typename TSlot>
V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot);
MemoryChunk* chunk_;
};
enum class YoungMarkingJobType { kAtomic, kIncremental };
class YoungGenerationMarkingJob : public v8::JobTask {
public:
YoungGenerationMarkingJob(Isolate* isolate,
MinorMarkCompactCollector* collector,
MarkingWorklists* global_worklists,
std::vector<PageMarkingItem> marking_items,
YoungMarkingJobType young_marking_job_type)
: isolate_(isolate),
collector_(collector),
global_worklists_(global_worklists),
marking_items_(std::move(marking_items)),
remaining_marking_items_(marking_items_.size()),
generator_(marking_items_.size()),
young_marking_job_type_(young_marking_job_type) {}
void Run(JobDelegate* delegate) override;
size_t GetMaxConcurrency(size_t worker_count) const override;
bool incremental() const {
return young_marking_job_type_ == YoungMarkingJobType::kIncremental;
}
private:
void ProcessItems(JobDelegate* delegate);
void ProcessMarkingItems(YoungGenerationMarkingTask* task);
Isolate* isolate_;
MinorMarkCompactCollector* collector_;
MarkingWorklists* global_worklists_;
std::vector<PageMarkingItem> marking_items_;
std::atomic_size_t remaining_marking_items_{0};
IndexGenerator generator_;
YoungMarkingJobType young_marking_job_type_;
};
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -6239,7 +6239,6 @@ MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
class PageMarkingItem; class PageMarkingItem;
class RootMarkingItem; class RootMarkingItem;
class YoungGenerationMarkingTask;
class YoungGenerationMarkingTask { class YoungGenerationMarkingTask {
public: public:
@ -6269,6 +6268,8 @@ class YoungGenerationMarkingTask {
} }
} }
void PublishMarkingWorklist() { marking_worklists_local_->Publish(); }
MarkingWorklists::Local* marking_worklists_local() { MarkingWorklists::Local* marking_worklists_local() {
return marking_worklists_local_.get(); return marking_worklists_local_.get();
} }
@ -6279,155 +6280,139 @@ class YoungGenerationMarkingTask {
YoungGenerationMainMarkingVisitor visitor_; YoungGenerationMainMarkingVisitor visitor_;
}; };
class PageMarkingItem : public ParallelWorkItem { void PageMarkingItem::Process(YoungGenerationMarkingTask* task) {
public: TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "PageMarkingItem::Process");
explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {} base::MutexGuard guard(chunk_->mutex());
~PageMarkingItem() = default; MarkUntypedPointers(task);
MarkTypedPointers(task);
}
void Process(YoungGenerationMarkingTask* task) { void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
"PageMarkingItem::Process"); chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
base::MutexGuard guard(chunk_->mutex()); RememberedSet<OLD_TO_NEW>::Iterate(
MarkUntypedPointers(task); chunk_,
MarkTypedPointers(task); [this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
}
void PageMarkingItem::MarkTypedPointers(YoungGenerationMarkingTask* task) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
return CheckAndMarkObject(task, slot);
});
});
}
template <typename TSlot>
V8_INLINE SlotCallbackResult PageMarkingItem::CheckAndMarkObject(
YoungGenerationMarkingTask* task, TSlot slot) {
static_assert(
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
MaybeObject object = *slot;
if (Heap::InYoungGeneration(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in a to page.
DCHECK(Heap::InToPage(object));
HeapObject heap_object;
bool success = object.GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
return KEEP_SLOT;
} }
return REMOVE_SLOT;
}
private: void YoungGenerationMarkingJob::Run(JobDelegate* delegate) {
inline Heap* heap() { return chunk_->heap(); } if (delegate->IsJoiningThread()) {
TRACE_GC(collector_->heap()->tracer(),
void MarkUntypedPointers(YoungGenerationMarkingTask* task) { GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew( ProcessItems(delegate);
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo); } else {
RememberedSet<OLD_TO_NEW>::Iterate( TRACE_GC_EPOCH(collector_->heap()->tracer(),
chunk_, GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
[this, task, &filter](MaybeObjectSlot slot) { ThreadKind::kBackground);
if (!filter.IsValid(slot.address())) return REMOVE_SLOT; ProcessItems(delegate);
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
} }
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) { size_t YoungGenerationMarkingJob::GetMaxConcurrency(size_t worker_count) const {
RememberedSet<OLD_TO_NEW>::IterateTyped( // Pages are not private to markers but we can still use them to estimate
chunk_, [=](SlotType slot_type, Address slot) { // the amount of marking that is required.
return UpdateTypedSlotHelper::UpdateTypedSlot( const int kPagesPerTask = 2;
heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) { size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
return CheckAndMarkObject(task, slot); size_t num_tasks;
}); if (!incremental()) {
}); num_tasks = std::max(
}
template <typename TSlot>
V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
static_assert(
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
MaybeObject object = *slot;
if (Heap::InYoungGeneration(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in a to page.
DCHECK(Heap::InToPage(object));
HeapObject heap_object;
bool success = object.GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
MemoryChunk* chunk_;
};
class YoungGenerationMarkingJob : public v8::JobTask {
public:
YoungGenerationMarkingJob(Isolate* isolate,
MinorMarkCompactCollector* collector,
MarkingWorklists* global_worklists,
std::vector<PageMarkingItem> marking_items)
: isolate_(isolate),
collector_(collector),
global_worklists_(global_worklists),
marking_items_(std::move(marking_items)),
remaining_marking_items_(marking_items_.size()),
generator_(marking_items_.size()) {}
void Run(JobDelegate* delegate) override {
if (delegate->IsJoiningThread()) {
TRACE_GC(collector_->heap()->tracer(),
GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
ProcessItems(delegate);
} else {
TRACE_GC_EPOCH(collector_->heap()->tracer(),
GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
ProcessItems(delegate);
}
}
size_t GetMaxConcurrency(size_t worker_count) const override {
// Pages are not private to markers but we can still use them to estimate
// the amount of marking that is required.
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
size_t num_tasks = std::max(
(items + 1) / kPagesPerTask, (items + 1) / kPagesPerTask,
global_worklists_->shared()->Size() + global_worklists_->shared()->Size() +
global_worklists_->on_hold() global_worklists_->on_hold()
->Size()); // TODO(v8:13012): If this is used with concurrent ->Size()); // TODO(v8:13012): If this is used with concurrent
// marking, we need to remove on_hold() here. // marking, we need to remove on_hold() here.
if (!v8_flags.parallel_marking) { } else {
num_tasks = std::min<size_t>(1, num_tasks); num_tasks = (items + 1) / kPagesPerTask;
}
return std::min<size_t>(num_tasks,
MinorMarkCompactCollector::kMaxParallelTasks);
} }
private: if (!v8_flags.parallel_marking) {
void ProcessItems(JobDelegate* delegate) { num_tasks = std::min<size_t>(1, num_tasks);
double marking_time = 0.0; }
{ return std::min<size_t>(num_tasks,
TimedScope scope(&marking_time); MinorMarkCompactCollector::kMaxParallelTasks);
YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_); }
ProcessMarkingItems(&task);
void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_);
ProcessMarkingItems(&task);
if (!incremental()) {
task.EmptyMarkingWorklist(); task.EmptyMarkingWorklist();
} } else {
if (v8_flags.trace_minor_mc_parallel_marking) { task.PublishMarkingWorklist();
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
static_cast<void*>(this), marking_time);
} }
} }
if (v8_flags.trace_minor_mc_parallel_marking) {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
static_cast<void*>(this), marking_time);
}
}
void ProcessMarkingItems(YoungGenerationMarkingTask* task) { void YoungGenerationMarkingJob::ProcessMarkingItems(
while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) { YoungGenerationMarkingTask* task) {
base::Optional<size_t> index = generator_.GetNext(); // TODO(v8:13012): YoungGenerationMarkingJob is generally used to compute the
if (!index) return; // transitive closure. In the context of concurrent MinorMC, it currently only
for (size_t i = *index; i < marking_items_.size(); ++i) { // seeds the worklists from the old-to-new remembered set, but does not empty
auto& work_item = marking_items_[i]; // them (this is done concurrently). The class should be refactored to make
if (!work_item.TryAcquire()) break; // this clearer.
work_item.Process(task); while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
base::Optional<size_t> index = generator_.GetNext();
if (!index) return;
for (size_t i = *index; i < marking_items_.size(); ++i) {
auto& work_item = marking_items_[i];
if (!work_item.TryAcquire()) break;
work_item.Process(task);
if (!incremental()) {
task->EmptyMarkingWorklist(); task->EmptyMarkingWorklist();
if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <= }
1) { if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
return; 1) {
} return;
} }
} }
} }
}
Isolate* isolate_;
MinorMarkCompactCollector* collector_;
MarkingWorklists* global_worklists_;
std::vector<PageMarkingItem> marking_items_;
std::atomic_size_t remaining_marking_items_{0};
IndexGenerator generator_;
};
void MinorMarkCompactCollector::MarkRootSetInParallel( void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) { RootMarkingVisitor* root_visitor, bool was_marked_incrementally) {
{ {
std::vector<PageMarkingItem> marking_items; std::vector<PageMarkingItem> marking_items;
@ -6445,11 +6430,14 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
SkipRoot::kOldGeneration}); SkipRoot::kOldGeneration});
isolate()->global_handles()->IterateYoungStrongAndDependentRoots( isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
root_visitor); root_visitor);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks( if (!was_marked_incrementally) {
heap(), [&marking_items](MemoryChunk* chunk) { // Create items for each page.
marking_items.emplace_back(chunk); RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
}); heap(), [&marking_items](MemoryChunk* chunk) {
marking_items.emplace_back(chunk);
});
}
} }
// Add tasks and run in parallel. // Add tasks and run in parallel.
@ -6460,10 +6448,11 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
local_marking_worklists_->Publish(); local_marking_worklists_->Publish();
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS); TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
V8::GetCurrentPlatform() V8::GetCurrentPlatform()
->CreateJob(v8::TaskPriority::kUserBlocking, ->CreateJob(
std::make_unique<YoungGenerationMarkingJob>( v8::TaskPriority::kUserBlocking,
isolate(), this, marking_worklists(), std::make_unique<YoungGenerationMarkingJob>(
std::move(marking_items))) isolate(), this, marking_worklists(),
std::move(marking_items), YoungMarkingJobType::kAtomic))
->Join(); ->Join();
DCHECK(local_marking_worklists_->IsEmpty()); DCHECK(local_marking_worklists_->IsEmpty());
@ -6494,7 +6483,7 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(this); RootMarkingVisitor root_visitor(this);
MarkRootSetInParallel(&root_visitor); MarkRootSetInParallel(&root_visitor, was_marked_incrementally);
// Mark rest on the main thread. // Mark rest on the main thread.
{ {

View File

@ -829,7 +829,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
static const int kMainMarker = 0; static const int kMainMarker = 0;
void MarkLiveObjects(); void MarkLiveObjects();
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor); void MarkRootSetInParallel(RootMarkingVisitor* root_visitor,
bool was_marked_incrementally);
V8_INLINE void MarkRootObject(HeapObject obj); V8_INLINE void MarkRootObject(HeapObject obj);
void DrainMarkingWorklist(); void DrainMarkingWorklist();
void TraceFragmentation(); void TraceFragmentation();