diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc index deb211e324..51fa8301d1 100644 --- a/src/heap/incremental-marking.cc +++ b/src/heap/incremental-marking.cc @@ -278,7 +278,8 @@ void IncrementalMarking::StartMarking() { is_marking_ = true; heap_->SetIsMarkingFlag(true); - MarkingBarrier::ActivateAll(heap(), is_compacting_); + MarkingBarrier::ActivateAll(heap(), is_compacting_, + MarkingBarrierType::kMajor); GlobalHandles::EnableMarkingBarrier(heap()->isolate()); heap_->isolate()->compilation_cache()->MarkCompactPrologue(); diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h index 8d62f09308..b3a47b4cbe 100644 --- a/src/heap/incremental-marking.h +++ b/src/heap/incremental-marking.h @@ -156,6 +156,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final { // Performs incremental marking step for unit tests. void AdvanceForTesting(double max_step_size_in_ms); + bool is_minor() const { return false; } + private: class IncrementalMarkingRootMarkingVisitor; diff --git a/src/heap/local-heap.cc b/src/heap/local-heap.cc index da5ec34d43..4b58c285fb 100644 --- a/src/heap/local-heap.cc +++ b/src/heap/local-heap.cc @@ -63,8 +63,10 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind, if (!is_main_thread()) { WriteBarrier::SetForThread(marking_barrier_.get()); if (heap_->incremental_marking()->IsMarking()) { - marking_barrier_->Activate( - heap_->incremental_marking()->IsCompacting()); + marking_barrier_->Activate(heap_->incremental_marking()->IsCompacting(), + heap_->incremental_marking()->is_minor() + ? MarkingBarrierType::kMinor + : MarkingBarrierType::kMajor); } } }); diff --git a/src/heap/marking-barrier-inl.h b/src/heap/marking-barrier-inl.h index 45acfc087a..a60e8890a6 100644 --- a/src/heap/marking-barrier-inl.h +++ b/src/heap/marking-barrier-inl.h @@ -30,12 +30,22 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) { } BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value); if (is_shared_heap_ != target_page->InSharedHeap()) return false; - if (WhiteToGreyAndPush(value)) { - if (V8_UNLIKELY(FLAG_track_retaining_path)) { - heap_->AddRetainingRoot(Root::kWriteBarrier, value); + + if (is_minor()) { + // We do not need to insert into RememberedSet here because the + // C++ marking barrier already does this for us. + if (Heap::InYoungGeneration(value)) { + WhiteToGreyAndPush(value); // NEW->NEW } + return false; + } else { + if (WhiteToGreyAndPush(value)) { + if (V8_UNLIKELY(FLAG_track_retaining_path)) { + heap_->AddRetainingRoot(Root::kWriteBarrier, value); + } + } + return true; } - return true; } template @@ -47,7 +57,8 @@ inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) { // Mark both, weak and strong edges. if (object.GetHeapObject(isolate, &heap_object)) { if (MarkValue(host, heap_object) && is_compacting_) { - collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object); + DCHECK(is_major()); + major_collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object); } } } @@ -55,7 +66,7 @@ inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) { bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) { if (marking_state_.WhiteToGrey(obj)) { - worklist_.Push(obj); + current_worklist_->Push(obj); return true; } return false; diff --git a/src/heap/marking-barrier.cc b/src/heap/marking-barrier.cc index ddf8dc1bbf..75e00859d0 100644 --- a/src/heap/marking-barrier.cc +++ b/src/heap/marking-barrier.cc @@ -24,9 +24,11 @@ namespace internal { MarkingBarrier::MarkingBarrier(LocalHeap* local_heap) : heap_(local_heap->heap()), - collector_(heap_->mark_compact_collector()), + major_collector_(heap_->mark_compact_collector()), + minor_collector_(heap_->minor_mark_compact_collector()), incremental_marking_(heap_->incremental_marking()), - worklist_(*collector_->marking_worklists()->shared()), + major_worklist_(*major_collector_->marking_worklists()->shared()), + minor_worklist_(*minor_collector_->marking_worklists()->shared()), marking_state_(heap_->isolate()), is_main_thread_barrier_(local_heap->is_main_thread()), is_shared_heap_(heap_->IsShared()) {} @@ -38,15 +40,18 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot, DCHECK(IsCurrentMarkingBarrier()); if (MarkValue(host, value)) { if (is_compacting_ && slot.address()) { - collector_->RecordSlot(host, slot, value); + DCHECK(is_major()); + major_collector_->RecordSlot(host, slot, value); } } } void MarkingBarrier::WriteWithoutHost(HeapObject value) { DCHECK(is_main_thread_barrier_); + if (is_minor() && !Heap::InYoungGeneration(value)) return; + if (WhiteToGreyAndPush(value)) { - if (V8_UNLIKELY(FLAG_track_retaining_path)) { + if (V8_UNLIKELY(FLAG_track_retaining_path) && is_major()) { heap_->AddRetainingRoot(Root::kWriteBarrier, value); } } @@ -56,10 +61,11 @@ void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) { DCHECK(IsCurrentMarkingBarrier()); if (MarkValue(host, value)) { if (is_compacting_) { + DCHECK(is_major()); if (is_main_thread_barrier_) { // An optimization to avoid allocating additional typed slots for the // main thread. - collector_->RecordRelocSlot(host, reloc_info, value); + major_collector_->RecordRelocSlot(host, reloc_info, value); } else { RecordRelocSlot(host, reloc_info, value); } @@ -74,13 +80,22 @@ void MarkingBarrier::Write(JSArrayBuffer host, // The extension will be marked when the marker visits the host object. return; } - extension->Mark(); + if (is_minor()) { + if (Heap::InYoungGeneration(host)) { + extension->YoungMark(); + } + } else { + extension->Mark(); + } } void MarkingBarrier::Write(DescriptorArray descriptor_array, int number_of_own_descriptors) { DCHECK(IsCurrentMarkingBarrier()); DCHECK(IsReadOnlyHeapObject(descriptor_array.map())); + + if (is_minor() && !heap_->InYoungGeneration(descriptor_array)) return; + // The DescriptorArray needs to be marked black here to ensure that slots are // recorded by the Scavenger in case the DescriptorArray is promoted while // incremental marking is running. This is needed as the regular marking @@ -93,8 +108,14 @@ void MarkingBarrier::Write(DescriptorArray descriptor_array, MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(), descriptor_array.GetDescriptorSlot(0)); } - const int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors( - collector_->epoch(), number_of_own_descriptors); + + // Concurrent MinorMC always marks the full young generation DescriptorArray. + // We cannot use epoch like MajorMC does because only the lower 2 bits are + // used, and with many MinorMC cycles this could lead to correctness issues. + const int16_t old_marked = + is_minor() ? 0 + : descriptor_array.UpdateNumberOfMarkedDescriptors( + major_collector_->epoch(), number_of_own_descriptors); if (old_marked < number_of_own_descriptors) { // This marks the range from [old_marked, number_of_own_descriptors) instead // of registering weak slots which may temporarily hold alive more objects @@ -123,10 +144,13 @@ void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo, } // static -void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) { - heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) { - local_heap->marking_barrier()->Activate(is_compacting); - }); +void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting, + MarkingBarrierType marking_barrier_type) { + heap->safepoint()->IterateLocalHeaps( + [is_compacting, marking_barrier_type](LocalHeap* local_heap) { + local_heap->marking_barrier()->Activate(is_compacting, + marking_barrier_type); + }); } // static @@ -144,7 +168,7 @@ void MarkingBarrier::PublishAll(Heap* heap) { void MarkingBarrier::Publish() { if (is_activated_) { - worklist_.Publish(); + current_worklist_->Publish(); base::Optional optional_rwx_write_scope; if (!typed_slots_map_.empty()) { optional_rwx_write_scope.emplace( @@ -200,7 +224,7 @@ void MarkingBarrier::Deactivate() { } } DCHECK(typed_slots_map_.empty()); - DCHECK(worklist_.IsLocalEmpty()); + DCHECK(current_worklist_->IsLocalEmpty()); } void MarkingBarrier::ActivateSpace(PagedSpace* space) { @@ -217,10 +241,14 @@ void MarkingBarrier::ActivateSpace(NewSpace* space) { } } -void MarkingBarrier::Activate(bool is_compacting) { +void MarkingBarrier::Activate(bool is_compacting, + MarkingBarrierType marking_barrier_type) { DCHECK(!is_activated_); - DCHECK(worklist_.IsLocalEmpty()); + DCHECK(major_worklist_.IsLocalEmpty()); + DCHECK(minor_worklist_.IsLocalEmpty()); is_compacting_ = is_compacting; + marking_barrier_type_ = marking_barrier_type; + current_worklist_ = is_minor() ? &minor_worklist_ : &major_worklist_; is_activated_ = true; if (is_main_thread_barrier_) { ActivateSpace(heap_->old_space()); diff --git a/src/heap/marking-barrier.h b/src/heap/marking-barrier.h index 5b7a25cd95..d9bbe0b9ed 100644 --- a/src/heap/marking-barrier.h +++ b/src/heap/marking-barrier.h @@ -18,16 +18,19 @@ class LocalHeap; class PagedSpace; class NewSpace; +enum class MarkingBarrierType { kMinor, kMajor }; + class MarkingBarrier { public: explicit MarkingBarrier(LocalHeap*); ~MarkingBarrier(); - void Activate(bool is_compacting); + void Activate(bool is_compacting, MarkingBarrierType marking_barrier_type); void Deactivate(); void Publish(); - static void ActivateAll(Heap* heap, bool is_compacting); + static void ActivateAll(Heap* heap, bool is_compacting, + MarkingBarrierType marking_barrier_type); static void DeactivateAll(Heap* heap); V8_EXPORT_PRIVATE static void PublishAll(Heap* heap); @@ -58,10 +61,20 @@ class MarkingBarrier { template inline void MarkRange(HeapObject value, TSlot start, TSlot end); + bool is_minor() const { + return marking_barrier_type_ == MarkingBarrierType::kMinor; + } + bool is_major() const { + return marking_barrier_type_ == MarkingBarrierType::kMajor; + } + Heap* heap_; - MarkCompactCollector* collector_; + MarkCompactCollector* major_collector_; + MinorMarkCompactCollector* minor_collector_; IncrementalMarking* incremental_marking_; - MarkingWorklist::Local worklist_; + MarkingWorklist::Local major_worklist_; + MarkingWorklist::Local minor_worklist_; + MarkingWorklist::Local* current_worklist_; MarkingState marking_state_; std::unordered_map, MemoryChunk::Hasher> @@ -70,6 +83,7 @@ class MarkingBarrier { bool is_activated_ = false; bool is_main_thread_barrier_; bool is_shared_heap_; + MarkingBarrierType marking_barrier_type_; }; } // namespace internal