[heap] Process PageMarkingItems on incremental marking start

This CL adds processing of the OLD_TO_NEW RememberedSet
during minor incremental marking start.

Bug: v8:13012
Change-Id: I4fd051087d46e1b8a22b735bf0cae6d2da2ecb5b
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3885875
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Cr-Commit-Position: refs/heads/main@{#83278}
This commit is contained in:
Leon Bettscheider 2022-09-16 14:42:47 +00:00 committed by V8 LUCI CQ
parent 17b903a22e
commit f81d0ca6e5
4 changed files with 204 additions and 140 deletions

View File

@ -266,7 +266,21 @@ void IncrementalMarking::MarkRoots() {
heap()->isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
&visitor);
// TODO(v8:13012): Do PageMarkingItem processing.
std::vector<PageMarkingItem> marking_items;
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap_, [&marking_items](MemoryChunk* chunk) {
marking_items.emplace_back(chunk);
});
V8::GetCurrentPlatform()
->CreateJob(
v8::TaskPriority::kUserBlocking,
std::make_unique<YoungGenerationMarkingJob>(
heap_->isolate(), minor_collector_,
minor_collector_->marking_worklists(), std::move(marking_items),
YoungMarkingJobType::kIncremental))
->Join();
}
}

View File

@ -9,6 +9,7 @@
#include "src/codegen/assembler-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/index-generator.h"
#include "src/heap/mark-compact.h"
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
@ -283,6 +284,65 @@ typename LiveObjectRange<mode>::iterator LiveObjectRange<mode>::end() {
Isolate* CollectorBase::isolate() { return heap()->isolate(); }
class YoungGenerationMarkingTask;
class PageMarkingItem : public ParallelWorkItem {
public:
explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageMarkingItem() = default;
void Process(YoungGenerationMarkingTask* task);
private:
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task);
void MarkTypedPointers(YoungGenerationMarkingTask* task);
template <typename TSlot>
V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot);
MemoryChunk* chunk_;
};
enum class YoungMarkingJobType { kAtomic, kIncremental };
class YoungGenerationMarkingJob : public v8::JobTask {
public:
YoungGenerationMarkingJob(Isolate* isolate,
MinorMarkCompactCollector* collector,
MarkingWorklists* global_worklists,
std::vector<PageMarkingItem> marking_items,
YoungMarkingJobType young_marking_job_type)
: isolate_(isolate),
collector_(collector),
global_worklists_(global_worklists),
marking_items_(std::move(marking_items)),
remaining_marking_items_(marking_items_.size()),
generator_(marking_items_.size()),
young_marking_job_type_(young_marking_job_type) {}
void Run(JobDelegate* delegate) override;
size_t GetMaxConcurrency(size_t worker_count) const override;
bool incremental() const {
return young_marking_job_type_ == YoungMarkingJobType::kIncremental;
}
private:
void ProcessItems(JobDelegate* delegate);
void ProcessMarkingItems(YoungGenerationMarkingTask* task);
Isolate* isolate_;
MinorMarkCompactCollector* collector_;
MarkingWorklists* global_worklists_;
std::vector<PageMarkingItem> marking_items_;
std::atomic_size_t remaining_marking_items_{0};
IndexGenerator generator_;
YoungMarkingJobType young_marking_job_type_;
};
} // namespace internal
} // namespace v8

View File

@ -6239,7 +6239,6 @@ MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
class PageMarkingItem;
class RootMarkingItem;
class YoungGenerationMarkingTask;
class YoungGenerationMarkingTask {
public:
@ -6269,6 +6268,8 @@ class YoungGenerationMarkingTask {
}
}
void PublishMarkingWorklist() { marking_worklists_local_->Publish(); }
MarkingWorklists::Local* marking_worklists_local() {
return marking_worklists_local_.get();
}
@ -6279,155 +6280,139 @@ class YoungGenerationMarkingTask {
YoungGenerationMainMarkingVisitor visitor_;
};
class PageMarkingItem : public ParallelWorkItem {
public:
explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
~PageMarkingItem() = default;
void PageMarkingItem::Process(YoungGenerationMarkingTask* task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "PageMarkingItem::Process");
base::MutexGuard guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
}
void Process(YoungGenerationMarkingTask* task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
"PageMarkingItem::Process");
base::MutexGuard guard(chunk_->mutex());
MarkUntypedPointers(task);
MarkTypedPointers(task);
void PageMarkingItem::MarkUntypedPointers(YoungGenerationMarkingTask* task) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
}
void PageMarkingItem::MarkTypedPointers(YoungGenerationMarkingTask* task) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
return CheckAndMarkObject(task, slot);
});
});
}
template <typename TSlot>
V8_INLINE SlotCallbackResult PageMarkingItem::CheckAndMarkObject(
YoungGenerationMarkingTask* task, TSlot slot) {
static_assert(
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
MaybeObject object = *slot;
if (Heap::InYoungGeneration(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in a to page.
DCHECK(Heap::InToPage(object));
HeapObject heap_object;
bool success = object.GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
private:
inline Heap* heap() { return chunk_->heap(); }
void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(
chunk_, InvalidatedSlotsFilter::LivenessCheck::kNo);
RememberedSet<OLD_TO_NEW>::Iterate(
chunk_,
[this, task, &filter](MaybeObjectSlot slot) {
if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
return CheckAndMarkObject(task, slot);
},
SlotSet::FREE_EMPTY_BUCKETS);
void YoungGenerationMarkingJob::Run(JobDelegate* delegate) {
if (delegate->IsJoiningThread()) {
TRACE_GC(collector_->heap()->tracer(),
GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
ProcessItems(delegate);
} else {
TRACE_GC_EPOCH(collector_->heap()->tracer(),
GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
ProcessItems(delegate);
}
}
void MarkTypedPointers(YoungGenerationMarkingTask* task) {
RememberedSet<OLD_TO_NEW>::IterateTyped(
chunk_, [=](SlotType slot_type, Address slot) {
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
return CheckAndMarkObject(task, slot);
});
});
}
template <typename TSlot>
V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
static_assert(
std::is_same<TSlot, FullMaybeObjectSlot>::value ||
std::is_same<TSlot, MaybeObjectSlot>::value,
"Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
MaybeObject object = *slot;
if (Heap::InYoungGeneration(object)) {
// Marking happens before flipping the young generation, so the object
// has to be in a to page.
DCHECK(Heap::InToPage(object));
HeapObject heap_object;
bool success = object.GetHeapObject(&heap_object);
USE(success);
DCHECK(success);
task->MarkObject(heap_object);
return KEEP_SLOT;
}
return REMOVE_SLOT;
}
MemoryChunk* chunk_;
};
class YoungGenerationMarkingJob : public v8::JobTask {
public:
YoungGenerationMarkingJob(Isolate* isolate,
MinorMarkCompactCollector* collector,
MarkingWorklists* global_worklists,
std::vector<PageMarkingItem> marking_items)
: isolate_(isolate),
collector_(collector),
global_worklists_(global_worklists),
marking_items_(std::move(marking_items)),
remaining_marking_items_(marking_items_.size()),
generator_(marking_items_.size()) {}
void Run(JobDelegate* delegate) override {
if (delegate->IsJoiningThread()) {
TRACE_GC(collector_->heap()->tracer(),
GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
ProcessItems(delegate);
} else {
TRACE_GC_EPOCH(collector_->heap()->tracer(),
GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
ThreadKind::kBackground);
ProcessItems(delegate);
}
}
size_t GetMaxConcurrency(size_t worker_count) const override {
// Pages are not private to markers but we can still use them to estimate
// the amount of marking that is required.
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
size_t num_tasks = std::max(
size_t YoungGenerationMarkingJob::GetMaxConcurrency(size_t worker_count) const {
// Pages are not private to markers but we can still use them to estimate
// the amount of marking that is required.
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
size_t num_tasks;
if (!incremental()) {
num_tasks = std::max(
(items + 1) / kPagesPerTask,
global_worklists_->shared()->Size() +
global_worklists_->on_hold()
->Size()); // TODO(v8:13012): If this is used with concurrent
// marking, we need to remove on_hold() here.
if (!v8_flags.parallel_marking) {
num_tasks = std::min<size_t>(1, num_tasks);
}
return std::min<size_t>(num_tasks,
MinorMarkCompactCollector::kMaxParallelTasks);
} else {
num_tasks = (items + 1) / kPagesPerTask;
}
private:
void ProcessItems(JobDelegate* delegate) {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_);
ProcessMarkingItems(&task);
if (!v8_flags.parallel_marking) {
num_tasks = std::min<size_t>(1, num_tasks);
}
return std::min<size_t>(num_tasks,
MinorMarkCompactCollector::kMaxParallelTasks);
}
void YoungGenerationMarkingJob::ProcessItems(JobDelegate* delegate) {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_);
ProcessMarkingItems(&task);
if (!incremental()) {
task.EmptyMarkingWorklist();
}
if (v8_flags.trace_minor_mc_parallel_marking) {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
static_cast<void*>(this), marking_time);
} else {
task.PublishMarkingWorklist();
}
}
if (v8_flags.trace_minor_mc_parallel_marking) {
PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
static_cast<void*>(this), marking_time);
}
}
void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
base::Optional<size_t> index = generator_.GetNext();
if (!index) return;
for (size_t i = *index; i < marking_items_.size(); ++i) {
auto& work_item = marking_items_[i];
if (!work_item.TryAcquire()) break;
work_item.Process(task);
void YoungGenerationMarkingJob::ProcessMarkingItems(
YoungGenerationMarkingTask* task) {
// TODO(v8:13012): YoungGenerationMarkingJob is generally used to compute the
// transitive closure. In the context of concurrent MinorMC, it currently only
// seeds the worklists from the old-to-new remembered set, but does not empty
// them (this is done concurrently). The class should be refactored to make
// this clearer.
while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
base::Optional<size_t> index = generator_.GetNext();
if (!index) return;
for (size_t i = *index; i < marking_items_.size(); ++i) {
auto& work_item = marking_items_[i];
if (!work_item.TryAcquire()) break;
work_item.Process(task);
if (!incremental()) {
task->EmptyMarkingWorklist();
if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
1) {
return;
}
}
if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
1) {
return;
}
}
}
Isolate* isolate_;
MinorMarkCompactCollector* collector_;
MarkingWorklists* global_worklists_;
std::vector<PageMarkingItem> marking_items_;
std::atomic_size_t remaining_marking_items_{0};
IndexGenerator generator_;
};
}
void MinorMarkCompactCollector::MarkRootSetInParallel(
RootMarkingVisitor* root_visitor) {
RootMarkingVisitor* root_visitor, bool was_marked_incrementally) {
{
std::vector<PageMarkingItem> marking_items;
@ -6445,11 +6430,14 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
SkipRoot::kOldGeneration});
isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
root_visitor);
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [&marking_items](MemoryChunk* chunk) {
marking_items.emplace_back(chunk);
});
if (!was_marked_incrementally) {
// Create items for each page.
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
heap(), [&marking_items](MemoryChunk* chunk) {
marking_items.emplace_back(chunk);
});
}
}
// Add tasks and run in parallel.
@ -6460,10 +6448,11 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
local_marking_worklists_->Publish();
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
V8::GetCurrentPlatform()
->CreateJob(v8::TaskPriority::kUserBlocking,
std::make_unique<YoungGenerationMarkingJob>(
isolate(), this, marking_worklists(),
std::move(marking_items)))
->CreateJob(
v8::TaskPriority::kUserBlocking,
std::make_unique<YoungGenerationMarkingJob>(
isolate(), this, marking_worklists(),
std::move(marking_items), YoungMarkingJobType::kAtomic))
->Join();
DCHECK(local_marking_worklists_->IsEmpty());
@ -6494,7 +6483,7 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
RootMarkingVisitor root_visitor(this);
MarkRootSetInParallel(&root_visitor);
MarkRootSetInParallel(&root_visitor, was_marked_incrementally);
// Mark rest on the main thread.
{

View File

@ -829,7 +829,8 @@ class MinorMarkCompactCollector final : public CollectorBase {
static const int kMainMarker = 0;
void MarkLiveObjects();
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor,
bool was_marked_incrementally);
V8_INLINE void MarkRootObject(HeapObject obj);
void DrainMarkingWorklist();
void TraceFragmentation();