[heap] Use MarkingWorklists in MinorMC

This CL is part of an effort to enable concurrent marking in MinorMC.

For this purpose we plan to reuse the IncrementalMarking class which
already implements a part of the concurrent marking code for MajorMC.
IncrementalMarking internally uses the MarkingWorklists class.

This CL adapts the stop-the-world marking implementation of
MinorMC to use the MarkingWorklists class.

Bug: v8:13012
Change-Id: I3c4eb33142f2630e89aa3771b6065b9f82dc0847
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3747862
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Cr-Commit-Position: refs/heads/main@{#81646}
This commit is contained in:
Leon Bettscheider 2022-07-11 15:36:36 +00:00 committed by V8 LUCI CQ
parent 8ab9821b9d
commit 110fa66e13
5 changed files with 94 additions and 99 deletions

View File

@ -43,7 +43,7 @@ void MarkCompactCollector::MarkRootObject(Root root, HeapObject obj) {
void MinorMarkCompactCollector::MarkRootObject(HeapObject obj) {
if (Heap::InYoungGeneration(obj) &&
non_atomic_marking_state_.WhiteToBlack(obj)) {
main_thread_worklist_local_.Push(obj);
main_thread_worklists_local_->Push(obj);
}
}

View File

@ -4971,15 +4971,14 @@ void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
chunk->ReleaseInvalidatedSlots<OLD_TO_SHARED>();
RememberedSet<OLD_TO_SHARED>::IterateTyped(
chunk, [this](SlotType slot_type, Address slot) {
RememberedSet<OLD_TO_SHARED>::IterateTyped(chunk, [this](SlotType slot_type,
Address slot) {
// Using UpdateStrongSlot is OK here, because there are no weak
// typed slots.
PtrComprCageBase cage_base = heap_->isolate();
return UpdateTypedSlotHelper::UpdateTypedSlot(
heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base,
slot);
return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
});
});
}
@ -5350,9 +5349,9 @@ class YoungGenerationMarkingVisitor final
: public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
public:
YoungGenerationMarkingVisitor(Isolate* isolate, MarkingState* marking_state,
MarkingWorklist::Local* worklist_local)
MarkingWorklists::Local* worklists_local)
: NewSpaceVisitor(isolate),
worklist_local_(worklist_local),
worklists_local_(worklists_local),
marking_state_(marking_state) {}
V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
@ -5420,14 +5419,16 @@ class YoungGenerationMarkingVisitor final
inline void MarkObjectViaMarkingWorklist(HeapObject object) {
if (marking_state_->WhiteToBlack(object)) {
worklist_local_->Push(object);
worklists_local_->Push(object);
}
}
MarkingWorklist::Local* worklist_local_;
MarkingWorklists::Local* worklists_local_;
MarkingState* marking_state_;
};
MinorMarkCompactCollector::~MinorMarkCompactCollector() = default;
void MinorMarkCompactCollector::SetUp() {}
void MinorMarkCompactCollector::TearDown() {}
@ -5437,19 +5438,10 @@ constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
: heap_(heap),
worklist_(new MarkingWorklist()),
main_thread_worklist_local_(worklist_),
marking_state_(heap->isolate()),
non_atomic_marking_state_(heap->isolate()),
main_marking_visitor_(new YoungGenerationMarkingVisitor(
heap->isolate(), marking_state(), &main_thread_worklist_local_)),
page_parallel_job_semaphore_(0) {}
MinorMarkCompactCollector::~MinorMarkCompactCollector() {
delete worklist_;
delete main_marking_visitor_;
}
void MinorMarkCompactCollector::CleanupPromotedPages() {
for (Page* p : promoted_pages_) {
p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
@ -5483,8 +5475,8 @@ class YoungGenerationMigrationObserver final : public MigrationObserver {
inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
int size) final {
// Migrate color to old generation marking in case the object survived young
// generation garbage collection.
// Migrate color to old generation marking in case the object survived
// young generation garbage collection.
if (heap_->incremental_marking()->IsMarking()) {
DCHECK(
heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
@ -5515,8 +5507,8 @@ class YoungGenerationRecordMigratedSlotVisitor final
}
private:
// Only record slots for host objects that are considered as live by the full
// collector.
// Only record slots for host objects that are considered as live by the
// full collector.
inline bool IsLive(HeapObject object) {
return collector_->non_atomic_marking_state()->IsBlack(object);
}
@ -5678,8 +5670,8 @@ void MinorMarkCompactCollector::CollectGarbage() {
heap()->concurrent_marking()->ClearMemoryChunkData(p);
}
}
// Since we promote all surviving large objects immediately, all remaining
// large objects must be dead.
// Since we promote all surviving large objects immediately, all
// remaining large objects must be dead.
// TODO(v8:11685): Don't free all as soon as we have an intermediate
// generation.
heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
@ -5780,8 +5772,8 @@ void MinorMarkCompactCollector::ClearNonLiveReferences() {
{
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
// Internalized strings are always stored in old space, so there is no need
// to clean them here.
// Internalized strings are always stored in old space, so there is no
// need to clean them here.
YoungGenerationExternalStringTableCleaner external_visitor(this);
heap()->external_string_table_.IterateYoung(&external_visitor);
heap()->external_string_table_.CleanUpYoung();
@ -5846,10 +5838,11 @@ class YoungGenerationMarkingTask {
public:
YoungGenerationMarkingTask(Isolate* isolate,
MinorMarkCompactCollector* collector,
MarkingWorklist* global_worklist)
: marking_worklist_local_(global_worklist),
MarkingWorklists* global_worklists)
: marking_worklists_local_(
std::make_unique<MarkingWorklists::Local>(global_worklists)),
marking_state_(collector->marking_state()),
visitor_(isolate, marking_state_, &marking_worklist_local_) {}
visitor_(isolate, marking_state_, marking_worklists_local()) {}
void MarkObject(Object object) {
if (!Heap::InYoungGeneration(object)) return;
@ -5861,13 +5854,17 @@ class YoungGenerationMarkingTask {
void EmptyMarkingWorklist() {
HeapObject object;
while (marking_worklist_local_.Pop(&object)) {
while (marking_worklists_local_->Pop(&object)) {
visitor_.Visit(object);
}
}
MarkingWorklists::Local* marking_worklists_local() {
return marking_worklists_local_.get();
}
private:
MarkingWorklist::Local marking_worklist_local_;
std::unique_ptr<MarkingWorklists::Local> marking_worklists_local_;
MarkingState* marking_state_;
YoungGenerationMarkingVisitor visitor_;
};
@ -5939,11 +5936,11 @@ class YoungGenerationMarkingJob : public v8::JobTask {
public:
YoungGenerationMarkingJob(Isolate* isolate,
MinorMarkCompactCollector* collector,
MarkingWorklist* global_worklist,
MarkingWorklists* global_worklists,
std::vector<PageMarkingItem> marking_items)
: isolate_(isolate),
collector_(collector),
global_worklist_(global_worklist),
global_worklists_(global_worklists),
marking_items_(std::move(marking_items)),
remaining_marking_items_(marking_items_.size()),
generator_(marking_items_.size()) {}
@ -5966,8 +5963,12 @@ class YoungGenerationMarkingJob : public v8::JobTask {
// the amount of marking that is required.
const int kPagesPerTask = 2;
size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
size_t num_tasks =
std::max((items + 1) / kPagesPerTask, global_worklist_->Size());
size_t num_tasks = std::max(
(items + 1) / kPagesPerTask,
global_worklists_->shared()->Size() +
global_worklists_->on_hold()
->Size()); // TODO(v8:13012): If this is used with concurrent
// marking, we need to remove on_hold() here.
if (!FLAG_parallel_marking) {
num_tasks = std::min<size_t>(1, num_tasks);
}
@ -5980,7 +5981,7 @@ class YoungGenerationMarkingJob : public v8::JobTask {
double marking_time = 0.0;
{
TimedScope scope(&marking_time);
YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_);
YoungGenerationMarkingTask task(isolate_, collector_, global_worklists_);
ProcessMarkingItems(&task);
task.EmptyMarkingWorklist();
}
@ -6009,7 +6010,7 @@ class YoungGenerationMarkingJob : public v8::JobTask {
Isolate* isolate_;
MinorMarkCompactCollector* collector_;
MarkingWorklist* global_worklist_;
MarkingWorklists* global_worklists_;
std::vector<PageMarkingItem> marking_items_;
std::atomic_size_t remaining_marking_items_{0};
IndexGenerator generator_;
@ -6043,19 +6044,18 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
// Add tasks and run in parallel.
{
// The main thread might hold local items, while GlobalPoolSize() == 0.
// Flush to ensure these items are visible globally and picked up by the
// job.
main_thread_worklist_local_.Publish();
// The main thread might hold local items, while GlobalPoolSize() ==
// 0. Flush to ensure these items are visible globally and picked up
// by the job.
main_thread_worklists_local_->Publish();
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
V8::GetCurrentPlatform()
->PostJob(v8::TaskPriority::kUserBlocking,
std::make_unique<YoungGenerationMarkingJob>(
isolate(), this, worklist(), std::move(marking_items)))
isolate(), this, worklists(), std::move(marking_items)))
->Join();
DCHECK(worklist()->IsEmpty());
DCHECK(main_thread_worklist_local_.IsLocalEmpty());
DCHECK(main_thread_worklists_local_->IsEmpty());
}
}
}
@ -6063,6 +6063,11 @@ void MinorMarkCompactCollector::MarkRootSetInParallel(
void MinorMarkCompactCollector::MarkLiveObjects() {
TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
main_thread_worklists_local_ =
std::make_unique<MarkingWorklists::Local>(&worklists_);
main_marking_visitor_ = std::make_unique<YoungGenerationMarkingVisitor>(
heap()->isolate(), marking_state(), main_thread_worklists_local());
PostponeInterruptsScope postpone(isolate());
RootMarkingVisitor root_visitor(this);
@ -6085,19 +6090,22 @@ void MinorMarkCompactCollector::MarkLiveObjects() {
if (FLAG_minor_mc_trace_fragmentation) {
TraceFragmentation();
}
main_thread_worklists_local_.reset();
main_marking_visitor_.reset();
}
void MinorMarkCompactCollector::DrainMarkingWorklist() {
PtrComprCageBase cage_base(isolate());
HeapObject object;
while (main_thread_worklist_local_.Pop(&object)) {
while (main_thread_worklists_local_->Pop(&object)) {
DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
DCHECK(object.IsHeapObject());
DCHECK(heap()->Contains(object));
DCHECK(non_atomic_marking_state()->IsBlack(object));
main_marking_visitor()->Visit(object);
main_marking_visitor_->Visit(object);
}
DCHECK(main_thread_worklist_local_.IsLocalEmpty());
DCHECK(main_thread_worklists_local_->IsEmpty());
}
void MinorMarkCompactCollector::TraceFragmentation() {
@ -6144,12 +6152,14 @@ void MinorMarkCompactCollector::TraceFragmentation() {
allocatable_bytes += area_end - p->area_start();
CHECK_EQ(allocatable_bytes, live_bytes + free_bytes_of_class[0]);
}
PrintIsolate(
isolate(),
"Minor Mark-Compact Fragmentation: allocatable_bytes=%zu live_bytes=%zu "
"free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu free_bytes_4K=%zu\n",
PrintIsolate(isolate(),
"Minor Mark-Compact Fragmentation: allocatable_bytes=%zu "
"live_bytes=%zu "
"free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu "
"free_bytes_4K=%zu\n",
allocatable_bytes, live_bytes, free_bytes_of_class[0],
free_bytes_of_class[1], free_bytes_of_class[2], free_bytes_of_class[3]);
free_bytes_of_class[1], free_bytes_of_class[2],
free_bytes_of_class[3]);
}
void MinorMarkCompactCollector::Evacuate() {
@ -6243,9 +6253,10 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
// When incremental marking is on, we need to clear the mark bits
// of the full collector. We cannot yet discard the young
// generation mark bits as they are still relevant for pointers
// updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}
@ -6263,8 +6274,8 @@ void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
FreeSpaceTreatmentMode::kZapFreeSpace);
} else if (heap()->incremental_marking()->IsMarking()) {
// When incremental marking is on, we need to clear the mark bits of
// the full collector. We cannot yet discard the young generation mark
// bits as they are still relevant for pointers updating.
// the full collector. We cannot yet discard the young generation
// mark bits as they are still relevant for pointers updating.
collector_->MakeIterable(static_cast<Page*>(chunk),
FreeSpaceTreatmentMode::kIgnoreFreeSpace);
}

View File

@ -777,17 +777,17 @@ class MinorMarkCompactCollector final {
std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode);
MarkingWorklists::Local* main_thread_worklists_local() {
return main_thread_worklists_local_.get();
}
private:
class RootMarkingVisitor;
static const int kNumMarkers = 8;
static const int kMainMarker = 0;
inline MarkingWorklist* worklist() { return worklist_; }
inline YoungGenerationMarkingVisitor* main_marking_visitor() {
return main_marking_visitor_;
}
inline MarkingWorklists* worklists() { return &worklists_; }
void MarkLiveObjects();
void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
@ -813,13 +813,13 @@ class MinorMarkCompactCollector final {
Heap* heap_;
MarkingWorklist* worklist_;
MarkingWorklist::Local main_thread_worklist_local_;
MarkingWorklists worklists_;
std::unique_ptr<MarkingWorklists::Local> main_thread_worklists_local_;
std::unique_ptr<YoungGenerationMarkingVisitor> main_marking_visitor_;
MarkingState marking_state_;
NonAtomicMarkingState non_atomic_marking_state_;
YoungGenerationMarkingVisitor* main_marking_visitor_;
base::Semaphore page_parallel_job_semaphore_;
std::vector<Page*> new_space_evacuation_pages_;
std::vector<Page*> promoted_pages_;

View File

@ -21,14 +21,6 @@
namespace v8 {
namespace internal {
MarkingWorklists::~MarkingWorklists() {
DCHECK(shared_.IsEmpty());
DCHECK(on_hold_.IsEmpty());
DCHECK(other_.IsEmpty());
DCHECK(worklists_.empty());
DCHECK(context_worklists_.empty());
}
void MarkingWorklists::Clear() {
shared_.Clear();
on_hold_.Clear();
@ -106,7 +98,6 @@ MarkingWorklists::Local::Local(
std::unique_ptr<CppMarkingState> cpp_marking_state)
: on_hold_(global->on_hold()),
wrapper_(global->wrapper()),
is_per_context_mode_(false),
cpp_marking_state_(std::move(cpp_marking_state)) {
if (global->context_worklists().empty()) {
MarkingWorklist::Local shared(global->shared());
@ -126,17 +117,6 @@ MarkingWorklists::Local::Local(
}
}
MarkingWorklists::Local::~Local() {
DCHECK(active_.IsLocalEmpty());
if (is_per_context_mode_) {
for (auto& cw : worklist_by_context_) {
if (cw.first != active_context_) {
DCHECK(cw.second->IsLocalEmpty());
}
}
}
}
void MarkingWorklists::Local::Publish() {
active_.Publish();
on_hold_.Publish();

View File

@ -66,7 +66,7 @@ struct ContextWorklistPair {
};
// A helper class that owns all global marking worklists.
class V8_EXPORT_PRIVATE MarkingWorklists {
class V8_EXPORT_PRIVATE MarkingWorklists final {
public:
class Local;
// Fake addresses of special contexts used for per-context accounting.
@ -77,7 +77,9 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
static const Address kOtherContext = 8;
MarkingWorklists() = default;
~MarkingWorklists();
// Worklists implicitly check for emptiness on destruction.
~MarkingWorklists() = default;
// Calls the specified callback on each element of the deques and replaces
// the element with the result of the callback. If the callback returns
@ -141,16 +143,18 @@ class V8_EXPORT_PRIVATE MarkingWorklists {
// - active_owner == worlist_by_context[active_context_].get()
// - *active_owner is empty (all fields are null) because its content has
// been moved to active_.
class V8_EXPORT_PRIVATE MarkingWorklists::Local {
class V8_EXPORT_PRIVATE MarkingWorklists::Local final {
public:
static constexpr Address kSharedContext = MarkingWorklists::kSharedContext;
static constexpr Address kOtherContext = MarkingWorklists::kOtherContext;
static constexpr std::nullptr_t kNoCppMarkingState = nullptr;
Local(
explicit Local(
MarkingWorklists* global,
std::unique_ptr<CppMarkingState> cpp_marking_state = kNoCppMarkingState);
~Local();
// Local worklists implicitly check for emptiness on destruction.
~Local() = default;
inline void Push(HeapObject object);
inline bool Pop(HeapObject* object);
@ -200,7 +204,7 @@ class V8_EXPORT_PRIVATE MarkingWorklists::Local {
MarkingWorklist::Local active_;
Address active_context_;
MarkingWorklist::Local* active_owner_;
bool is_per_context_mode_;
bool is_per_context_mode_ = false;
std::unordered_map<Address, std::unique_ptr<MarkingWorklist::Local>>
worklist_by_context_;