heap: Allow concurrent DescriptorArray write barrier

The barrier checks whether an object has already been marked and
strongifies all values in case it was. This means that DescriptorArray
elements will not be reclaimed within the current garbage collection
cycle in case a write barrier triggers for the array.

Bug: v8:12133
Change-Id: I33df2f75d75527034a040275b6c55ac0aed94321
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3158325
Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#76906}
This commit is contained in:
Michael Lippautz 2021-09-17 14:15:53 +02:00 committed by V8 LUCI CQ
parent cde2a6c72e
commit 656b1d64ea
6 changed files with 44 additions and 29 deletions

View File

@ -127,16 +127,6 @@ void MainMarkingVisitor<MarkingState>::RecordRelocSlot(Code host,
MarkCompactCollector::RecordRelocSlot(host, rinfo, target);
}
template <typename MarkingState>
void MainMarkingVisitor<MarkingState>::MarkDescriptorArrayFromWriteBarrier(
DescriptorArray descriptors, int number_of_own_descriptors) {
// This is necessary because the Scavenger records slots only for the
// promoted black objects and the marking visitor of DescriptorArray skips
// the descriptors marked by the visitor.VisitDescriptors() below.
this->MarkDescriptorArrayBlack(descriptors);
this->VisitDescriptors(descriptors, number_of_own_descriptors);
}
template <LiveObjectIterationMode mode>
LiveObjectRange<mode>::iterator::iterator(const MemoryChunk* chunk,
Bitmap* bitmap, Address start)

View File

@ -1717,12 +1717,6 @@ void MarkCompactCollector::RevisitObject(HeapObject obj) {
marking_visitor_->Visit(obj.map(), obj);
}
void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
DescriptorArray descriptors, int number_of_own_descriptors) {
marking_visitor_->MarkDescriptorArrayFromWriteBarrier(
descriptors, number_of_own_descriptors);
}
void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
bool work_to_do = true;
int iterations = 0;

View File

@ -391,9 +391,6 @@ class MainMarkingVisitor final
V8_UNLIKELY(revisiting_object_);
}
void MarkDescriptorArrayFromWriteBarrier(DescriptorArray descriptors,
int number_of_own_descriptors);
private:
// Functions required by MarkingVisitorBase.
@ -582,10 +579,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
void VisitObject(HeapObject obj);
// Used by incremental marking for black-allocated objects.
void RevisitObject(HeapObject obj);
// Ensures that all descriptors int range [0, number_of_own_descripts)
// are visited.
void MarkDescriptorArrayFromWriteBarrier(DescriptorArray array,
int number_of_own_descriptors);
// Drains the main thread marking worklist until the specified number of
// bytes are processed. If the number of bytes is zero, then the worklist

View File

@ -40,6 +40,21 @@ bool MarkingBarrier::MarkValue(HeapObject host, HeapObject value) {
return true;
}
template <typename TSlot>
inline void MarkingBarrier::MarkRange(HeapObject host, TSlot start, TSlot end) {
auto* isolate = heap_->isolate();
for (TSlot slot = start; slot < end; ++slot) {
typename TSlot::TObject object = slot.Relaxed_Load();
HeapObject heap_object;
// Mark both, weak and strong edges.
if (object.GetHeapObject(isolate, &heap_object)) {
if (MarkValue(host, heap_object) && is_compacting_) {
collector_->RecordSlot(host, HeapObjectSlot(slot), heap_object);
}
}
}
}
bool MarkingBarrier::WhiteToGreyAndPush(HeapObject obj) {
if (marking_state_.WhiteToGrey(obj)) {
worklist_.Push(obj);

View File

@ -4,6 +4,7 @@
#include "src/heap/marking-barrier.h"
#include "src/base/logging.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier.h"
#include "src/heap/heap.h"
@ -15,6 +16,7 @@
#include "src/heap/marking-worklist-inl.h"
#include "src/heap/marking-worklist.h"
#include "src/heap/safepoint.h"
#include "src/objects/heap-object.h"
#include "src/objects/js-array-buffer.h"
namespace v8 {
@ -74,12 +76,30 @@ void MarkingBarrier::Write(JSArrayBuffer host,
void MarkingBarrier::Write(DescriptorArray descriptor_array,
int number_of_own_descriptors) {
DCHECK(IsCurrentMarkingBarrier());
DCHECK(is_main_thread_barrier_);
int16_t raw_marked = descriptor_array.raw_number_of_marked_descriptors();
if (NumberOfMarkedDescriptors::decode(collector_->epoch(), raw_marked) <
number_of_own_descriptors) {
collector_->MarkDescriptorArrayFromWriteBarrier(descriptor_array,
number_of_own_descriptors);
DCHECK(IsReadOnlyHeapObject(descriptor_array.map()));
// The DescriptorArray needs to be marked black here to ensure that slots are
// recorded by the Scavenger in case the DescriptorArray is promoted while
// incremental marking is running. This is needed as the regular marking
// visitor does not re-process any already marked descriptors. If we don't
// mark it black here, the Scavenger may promote a DescriptorArray and any
// already marked descriptors will not have any slots recorded.
if (!marking_state_.IsBlack(descriptor_array)) {
marking_state_.WhiteToGrey(descriptor_array);
marking_state_.GreyToBlack(descriptor_array);
MarkRange(descriptor_array, descriptor_array.GetFirstPointerSlot(),
descriptor_array.GetDescriptorSlot(0));
}
const int16_t old_marked = descriptor_array.UpdateNumberOfMarkedDescriptors(
collector_->epoch(), number_of_own_descriptors);
if (old_marked < number_of_own_descriptors) {
// This marks the range from [old_marked, number_of_own_descriptors) instead
// of registering weak slots which may temporarily hold alive more objects
// for the current GC cycle. Weakness is not needed for actual trimming, see
// `MarkCompactCollector::TrimDescriptorArray()`.
MarkRange(descriptor_array,
MaybeObjectSlot(descriptor_array.GetDescriptorSlot(old_marked)),
MaybeObjectSlot(descriptor_array.GetDescriptorSlot(
number_of_own_descriptors)));
}
}

View File

@ -55,6 +55,9 @@ class MarkingBarrier {
bool IsCurrentMarkingBarrier();
template <typename TSlot>
inline void MarkRange(HeapObject value, TSlot start, TSlot end);
Heap* heap_;
MarkCompactCollector* collector_;
IncrementalMarking* incremental_marking_;