[heap] Adjust MarkingBarrier for MinorMC

This CL adjusts MarkingBarrier for MinorMC incremental marking.
The MarkingBarrier will be activated in a follow-up CL that schedules
MinorMC on a soft limit.

Bug: v8:13012
Change-Id: I525f6f158c2d55074d66f51925a1d8220cd4e9b9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3787874
Commit-Queue: Leon Bettscheider <bettscheider@google.com>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82655}
This commit is contained in:
Leon Bettscheider 2022-08-23 12:03:17 +02:00 committed by V8 LUCI CQ
parent 3266dd845d
commit 772d2ba188
6 changed files with 87 additions and 29 deletions

View File

@ -278,7 +278,8 @@ void IncrementalMarking::StartMarking() {
is_marking_ = true;
heap_->SetIsMarkingFlag(true);
MarkingBarrier::ActivateAll(heap(), is_compacting_);
MarkingBarrier::ActivateAll(heap(), is_compacting_,
MarkingBarrierType::kMajor);
GlobalHandles::EnableMarkingBarrier(heap()->isolate());
heap_->isolate()->compilation_cache()->MarkCompactPrologue();

View File

@ -156,6 +156,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking final {
// Performs incremental marking step for unit tests.
void AdvanceForTesting(double max_step_size_in_ms);
bool is_minor() const { return false; }
private:
class IncrementalMarkingRootMarkingVisitor;

View File

@ -63,8 +63,10 @@ LocalHeap::LocalHeap(Heap* heap, ThreadKind kind,
if (!is_main_thread()) {
WriteBarrier::SetForThread(marking_barrier_.get());
if (heap_->incremental_marking()->IsMarking()) {
marking_barrier_->Activate(
heap_->incremental_marking()->IsCompacting());
marking_barrier_->Activate(heap_->incremental_marking()->IsCompacting(),
heap_->incremental_marking()->is_minor()
? MarkingBarrierType::kMinor
: MarkingBarrierType::kMajor);
}
}
});

View File

@ -30,12 +30,22 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
}
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(value);
if (is_shared_heap_ != target_page->InSharedHeap()) return false;
if (WhiteToGreyAndPush(value)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
if (is_minor()) {
// We do not need to insert into RememberedSet<OLD_TO_NEW> here because the
// C++ marking barrier already does this for us.
if (Heap::InYoungGeneration(value)) {
WhiteToGreyAndPush(value); // NEW->NEW
}
return false;
} else {
if (WhiteToGreyAndPush(value)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
}
}
return true;
}
return true;
}
template <typename TSlot>
@ -47,7 +57,8 @@ inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
// Mark both, weak and strong edges.
if (object.GetHeapObject(isolate, &heap_object)) {
if (MarkValue(host, heap_object) && is_compacting_) {
collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
DCHECK(is_major());
major_collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
}
}
}
@ -55,7 +66,7 @@ inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) {
worklist_.Push(obj);
current_worklist_->Push(obj);
return true;
}
return false;

View File

@ -24,9 +24,11 @@ namespace internal {
MarkingBarrier::MarkingBarrier(LocalHeap* local_heap)
: heap_(local_heap->heap()),
collector_(heap_->mark_compact_collector()),
major_collector_(heap_->mark_compact_collector()),
minor_collector_(heap_->minor_mark_compact_collector()),
incremental_marking_(heap_->incremental_marking()),
worklist_(*collector_->marking_worklists()->shared()),
major_worklist_(*major_collector_->marking_worklists()->shared()),
minor_worklist_(*minor_collector_->marking_worklists()->shared()),
marking_state_(heap_->isolate()),
is_main_thread_barrier_(local_heap->is_main_thread()),
is_shared_heap_(heap_->IsShared()) {}
@ -38,15 +40,18 @@ void MarkingBarrier::Write(HeapObject host, HeapObjectSlot slot,
DCHECK(IsCurrentMarkingBarrier());
if (MarkValue(host, value)) {
if (is_compacting_ && slot.address()) {
collector_->RecordSlot(host, slot, value);
DCHECK(is_major());
major_collector_->RecordSlot(host, slot, value);
}
}
}
void MarkingBarrier::WriteWithoutHost(HeapObject value) {
DCHECK(is_main_thread_barrier_);
if (is_minor() && !Heap::InYoungGeneration(value)) return;
if (WhiteToGreyAndPush(value)) {
if (V8_UNLIKELY(FLAG_track_retaining_path)) {
if (V8_UNLIKELY(FLAG_track_retaining_path) && is_major()) {
heap_->AddRetainingRoot(Root::kWriteBarrier, value);
}
}
@ -56,10 +61,11 @@ void MarkingBarrier::Write(Code host, RelocInfo* reloc_info, HeapObject value) {
DCHECK(IsCurrentMarkingBarrier());
if (MarkValue(host, value)) {
if (is_compacting_) {
DCHECK(is_major());
if (is_main_thread_barrier_) {
// An optimization to avoid allocating additional typed slots for the
// main thread.
collector_->RecordRelocSlot(host, reloc_info, value);
major_collector_->RecordRelocSlot(host, reloc_info, value);
} else {
RecordRelocSlot(host, reloc_info, value);
}
@ -74,13 +80,22 @@ void MarkingBarrier::Write(JSArrayBuffer host,
// The extension will be marked when the marker visits the host object.
return;
}
extension->Mark();
if (is_minor()) {
if (Heap::InYoungGeneration(host)) {
extension->YoungMark();
}
} else {
extension->Mark();
}
}
void MarkingBarrier::Write(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
DCHECK(IsCurrentMarkingBarrier());
DCHECK(IsReadOnlyHeapObject(descriptor_array.map()));
if (is_minor() && !heap_->InYoungGeneration(descriptor_array)) return;
// The DescriptorArray needs to be marked black here to ensure that slots are
// recorded by the Scavenger in case the DescriptorArray is promoted while
// incremental marking is running. This is needed as the regular marking
@ -93,8 +108,14 @@ void MarkingBarrier::Write(DescriptorArray descriptor_array,
MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(),
descriptor_array.GetDescriptorSlot(0));
}
const int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
collector_->epoch(), number_of_own_descriptors);
// Concurrent MinorMC always marks the full young generation DescriptorArray.
// We cannot use epoch like MajorMC does because only the lower 2 bits are
// used, and with many MinorMC cycles this could lead to correctness issues.
const int16_t old_marked =
is_minor() ? 0
: descriptor_array.UpdateNumberOfMarkedDescriptors(
major_collector_->epoch(), number_of_own_descriptors);
if (old_marked < number_of_own_descriptors) {
// This marks the range from [old_marked, number_of_own_descriptors) instead
// of registering weak slots which may temporarily hold alive more objects
@ -123,10 +144,13 @@ void MarkingBarrier::RecordRelocSlot(Code host, RelocInfo* rinfo,
}
// static
void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting) {
heap->safepoint()->IterateLocalHeaps([is_compacting](LocalHeap* local_heap) {
local_heap->marking_barrier()->Activate(is_compacting);
});
void MarkingBarrier::ActivateAll(Heap* heap, bool is_compacting,
MarkingBarrierType marking_barrier_type) {
heap->safepoint()->IterateLocalHeaps(
[is_compacting, marking_barrier_type](LocalHeap* local_heap) {
local_heap->marking_barrier()->Activate(is_compacting,
marking_barrier_type);
});
}
// static
@ -144,7 +168,7 @@ void MarkingBarrier::PublishAll(Heap* heap) {
void MarkingBarrier::Publish() {
if (is_activated_) {
worklist_.Publish();
current_worklist_->Publish();
base::Optional<CodePageHeaderModificationScope> optional_rwx_write_scope;
if (!typed_slots_map_.empty()) {
optional_rwx_write_scope.emplace(
@ -200,7 +224,7 @@ void MarkingBarrier::Deactivate() {
}
}
DCHECK(typed_slots_map_.empty());
DCHECK(worklist_.IsLocalEmpty());
DCHECK(current_worklist_->IsLocalEmpty());
}
void MarkingBarrier::ActivateSpace(PagedSpace* space) {
@ -217,10 +241,14 @@ void MarkingBarrier::ActivateSpace(NewSpace* space) {
}
}
void MarkingBarrier::Activate(bool is_compacting) {
void MarkingBarrier::Activate(bool is_compacting,
MarkingBarrierType marking_barrier_type) {
DCHECK(!is_activated_);
DCHECK(worklist_.IsLocalEmpty());
DCHECK(major_worklist_.IsLocalEmpty());
DCHECK(minor_worklist_.IsLocalEmpty());
is_compacting_ = is_compacting;
marking_barrier_type_ = marking_barrier_type;
current_worklist_ = is_minor() ? &minor_worklist_ : &major_worklist_;
is_activated_ = true;
if (is_main_thread_barrier_) {
ActivateSpace(heap_->old_space());

View File

@ -18,16 +18,19 @@ class LocalHeap;
class PagedSpace;
class NewSpace;
enum class MarkingBarrierType { kMinor, kMajor };
class MarkingBarrier {
public:
explicit MarkingBarrier(LocalHeap*);
~MarkingBarrier();
void Activate(bool is_compacting);
void Activate(bool is_compacting, MarkingBarrierType marking_barrier_type);
void Deactivate();
void Publish();
static void ActivateAll(Heap* heap, bool is_compacting);
static void ActivateAll(Heap* heap, bool is_compacting,
MarkingBarrierType marking_barrier_type);
static void DeactivateAll(Heap* heap);
V8_EXPORT_PRIVATE static void PublishAll(Heap* heap);
@ -58,10 +61,20 @@ class MarkingBarrier {
template <typename TSlot>
inline void MarkRange(HeapObject value, TSlot start, TSlot end);
bool is_minor() const {
return marking_barrier_type_ == MarkingBarrierType::kMinor;
}
bool is_major() const {
return marking_barrier_type_ == MarkingBarrierType::kMajor;
}
Heap* heap_;
MarkCompactCollector* collector_;
MarkCompactCollector* major_collector_;
MinorMarkCompactCollector* minor_collector_;
IncrementalMarking* incremental_marking_;
MarkingWorklist::Local worklist_;
MarkingWorklist::Local major_worklist_;
MarkingWorklist::Local minor_worklist_;
MarkingWorklist::Local* current_worklist_;
MarkingState marking_state_;
std::unordered_map<MemoryChunk*, std::unique_ptr<TypedSlots>,
MemoryChunk::Hasher>
@ -70,6 +83,7 @@ class MarkingBarrier {
bool is_activated_ = false;
bool is_main_thread_barrier_;
bool is_shared_heap_;
MarkingBarrierType marking_barrier_type_;
};
} // namespace internal