[heap] Add shared barrier to RecordWrite builtin

This CL adds the shared barrier to the RecordWrite builtin which is
used in generated code for builtins, interpreted and optimized code.

The out-of-line part of the barrier now checks whether either the
POINTERS_TO_HERE_ARE_INTERESTING or the IN_SHARED_HEAP bit is set in
the value object's page flags.

Outside of marking the RecordWrite builtin now needs to check whether
to insert into the generational or shared remembered set. Inserting
into the shared remembered set will always call into C++ code.

During marking the RecordWrite builtin now also needs to check whether
this store created an old-to-shared pointer.

Bug: v8:11708
Change-Id: Iaca4c3c0650aece4326936d7d63754a23cd0a028
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3779679
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Jakob Linke <jgruber@chromium.org>
Commit-Queue: Dominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/main@{#82375}
This commit is contained in:
Dominik Inführ 2022-08-10 20:07:28 +02:00 committed by V8 LUCI CQ
parent e9c1884e81
commit c5d4812196
12 changed files with 114 additions and 47 deletions

View File

@ -246,13 +246,7 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
Branch(IsMarking(), &marking_is_on, &marking_is_off);
BIND(&marking_is_off);
// When incremental marking is not on, we skip cross generation pointer
// checking here, because there are checks for
// `kPointersFromHereAreInterestingMask` and
// `kPointersToHereAreInterestingMask` in
// `src/compiler/<arch>/code-generator-<arch>.cc` before calling this
// stub, which serves as the cross generation checking.
GenerationalBarrierSlow(slot, &next, fp_mode);
GenerationalOrSharedBarrierSlow(slot, &next, fp_mode);
BIND(&marking_is_on);
WriteBarrierDuringMarking(slot, &next, fp_mode);
@ -260,6 +254,27 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
BIND(&next);
}
void GenerationalOrSharedBarrierSlow(TNode<IntPtrT> slot, Label* next,
SaveFPRegsMode fp_mode) {
// When incremental marking is not on, the fast and out-of-line fast path of
// the write barrier already checked whether we need to run the generational
// or shared barrier slow path.
Label generational_barrier(this), shared_barrier(this);
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
InYoungGeneration(value, &generational_barrier, &shared_barrier);
BIND(&generational_barrier);
CSA_DCHECK(this,
IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask));
GenerationalBarrierSlow(slot, next, fp_mode);
BIND(&shared_barrier);
CSA_DCHECK(this, IsPageFlagSet(value, MemoryChunk::kInSharedHeap));
SharedBarrierSlow(slot, next, fp_mode);
}
void GenerationalBarrierSlow(TNode<IntPtrT> slot, Label* next,
SaveFPRegsMode fp_mode) {
TNode<IntPtrT> object = BitcastTaggedToWord(
@ -268,16 +283,27 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
Goto(next);
}
void SharedBarrierSlow(TNode<IntPtrT> slot, Label* next,
SaveFPRegsMode fp_mode) {
TNode<ExternalReference> function = ExternalConstant(
ExternalReference::shared_barrier_from_code_function());
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
CallCFunctionWithCallerSavedRegisters(
function, MachineTypeOf<Int32T>::value, fp_mode,
std::make_pair(MachineTypeOf<IntPtrT>::value, object),
std::make_pair(MachineTypeOf<IntPtrT>::value, slot));
Goto(next);
}
void WriteBarrierDuringMarking(TNode<IntPtrT> slot, Label* next,
SaveFPRegsMode fp_mode) {
// When incremental marking is on, we need to perform generational and
// incremental marking write barrier.
// When incremental marking is on, we need to perform generational, shared
// and incremental marking write barrier.
Label incremental_barrier(this);
// During incremental marking we always reach this slow path, so we need to
// check whether this is a old-to-new reference before calling into the
// generational barrier slow path.
GenerationalBarrier(slot, &incremental_barrier, fp_mode);
GenerationalOrSharedBarrierDuringMarking(slot, &incremental_barrier,
fp_mode);
BIND(&incremental_barrier);
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
@ -285,32 +311,50 @@ class WriteBarrierCodeStubAssembler : public CodeStubAssembler {
Goto(next);
}
void GenerationalBarrier(TNode<IntPtrT> slot, Label* next,
SaveFPRegsMode fp_mode) {
Label generational_barrier_slow(this);
void GenerationalOrSharedBarrierDuringMarking(TNode<IntPtrT> slot,
Label* next,
SaveFPRegsMode fp_mode) {
Label generational_barrier_check(this), shared_barrier_check(this),
shared_barrier_slow(this), generational_barrier_slow(this);
IsGenerationalBarrierNeeded(slot, &generational_barrier_slow, next);
// During incremental marking we always reach this slow path, so we need to
// check whether this is a old-to-new or old-to-shared reference.
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
InYoungGeneration(object, next, &generational_barrier_check);
BIND(&generational_barrier_check);
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
InYoungGeneration(value, &generational_barrier_slow, &shared_barrier_check);
BIND(&generational_barrier_slow);
GenerationalBarrierSlow(slot, next, fp_mode);
BIND(&shared_barrier_check);
InSharedHeap(value, &shared_barrier_slow, next);
BIND(&shared_barrier_slow);
SharedBarrierSlow(slot, next, fp_mode);
}
void IsGenerationalBarrierNeeded(TNode<IntPtrT> slot, Label* true_label,
Label* false_label) {
// TODO(ishell): do a new-space range check instead.
TNode<IntPtrT> value = BitcastTaggedToWord(Load<HeapObject>(slot));
// TODO(albertnetymk): Try to cache the page flag for value and
// object, instead of calling IsPageFlagSet each time.
TNode<BoolT> value_is_young =
IsPageFlagSet(value, MemoryChunk::kIsInYoungGenerationMask);
GotoIfNot(value_is_young, false_label);
TNode<IntPtrT> object = BitcastTaggedToWord(
UncheckedParameter<Object>(WriteBarrierDescriptor::kObject));
void InYoungGeneration(TNode<IntPtrT> object, Label* true_label,
Label* false_label) {
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kIsInYoungGenerationMask);
Branch(object_is_young, false_label, true_label);
Branch(object_is_young, true_label, false_label);
}
void InSharedHeap(TNode<IntPtrT> object, Label* true_label,
Label* false_label) {
TNode<BoolT> object_is_young =
IsPageFlagSet(object, MemoryChunk::kInSharedHeap);
Branch(object_is_young, true_label, false_label);
}
void IncrementalWriteBarrier(TNode<IntPtrT> slot, TNode<IntPtrT> value,

View File

@ -338,6 +338,9 @@ struct IsValidExternalReferenceType<Result (Class::*)(Args...)> {
FUNCTION_REFERENCE(write_barrier_marking_from_code_function,
WriteBarrier::MarkingFromCode)
FUNCTION_REFERENCE(shared_barrier_from_code_function,
WriteBarrier::SharedFromCode)
FUNCTION_REFERENCE(insert_remembered_set_function,
Heap::InsertIntoRememberedSetFromCode)

View File

@ -263,6 +263,7 @@ class StatsCounter;
V(address_of_wasm_int32_overflow_as_float, "wasm_int32_overflow_as_float") \
V(supports_cetss_address, "CpuFeatures::supports_cetss_address") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(shared_barrier_from_code_function, "WriteBarrier::SharedFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
V(atomic_pair_load_function, "atomic_pair_load_function") \

View File

@ -196,8 +196,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
exit());
__ CheckPageFlag(
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
eq, exit());
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;

View File

@ -288,8 +288,9 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
__ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, ne,
exit());
__ CheckPageFlag(
value_, MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask,
ne, exit());
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave
: SaveFPRegsMode::kIgnore;

View File

@ -321,9 +321,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
}
void Generate() final {
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
__ CheckPageFlag(
value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
exit());
__ lea(scratch1_, operand_);
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
? SaveFPRegsMode::kSave

View File

@ -295,9 +295,10 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
if (COMPRESS_POINTERS_BOOL) {
__ DecompressTaggedPointer(value_, value_);
}
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
__ CheckPageFlag(
value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingOrInSharedHeapMask, zero,
exit());
__ leaq(scratch1_, operand_);
SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()

View File

@ -123,6 +123,11 @@ class BasicMemoryChunk {
static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
POINTERS_TO_HERE_ARE_INTERESTING;
static constexpr MainThreadFlags
kPointersToHereAreInterestingOrInSharedHeapMask =
MainThreadFlags(POINTERS_TO_HERE_ARE_INTERESTING) |
MainThreadFlags(IN_SHARED_HEAP);
static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
POINTERS_FROM_HERE_ARE_INTERESTING;
@ -134,6 +139,8 @@ class BasicMemoryChunk {
static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
static constexpr MainThreadFlags kInSharedHeap = IN_SHARED_HEAP;
static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
MainThreadFlags(kEvacuationCandidateMask) |
MainThreadFlags(kIsInYoungGenerationMask);

View File

@ -101,6 +101,14 @@ int WriteBarrier::MarkingFromCode(Address raw_host, Address raw_slot) {
return 0;
}
int WriteBarrier::SharedFromCode(Address raw_host, Address raw_slot) {
HeapObject host = HeapObject::cast(Object(raw_host));
Heap::SharedHeapBarrierSlow(host, raw_slot);
// Called by WriteBarrierCodeStubAssembler, which doesn't accept void type
return 0;
}
#ifdef ENABLE_SLOW_DCHECKS
bool WriteBarrier::IsImmortalImmovableHeapObject(HeapObject object) {
BasicMemoryChunk* basic_chunk = BasicMemoryChunk::FromHeapObject(object);

View File

@ -62,6 +62,8 @@ class V8_EXPORT_PRIVATE WriteBarrier {
// It is invoked from generated code and has to take raw addresses.
static int MarkingFromCode(Address raw_host, Address raw_slot);
static int SharedFromCode(Address raw_host, Address raw_slot);
// Invoked from global handles where no host object is available.
static inline void MarkingFromGlobalHandle(Object value);
static inline void MarkingFromInternalFields(JSObject host);

View File

@ -7309,7 +7309,7 @@ void Heap::CombinedGenerationalAndSharedBarrierSlow(HeapObject object,
heap_internals::MemoryChunk* object_chunk =
heap_internals::MemoryChunk::FromHeapObject(object);
if (!object_chunk->InSharedHeap())
Heap::SharedHeapBarrierSlow(object, slot, value);
Heap::SharedHeapBarrierSlow(object, slot);
}
}
@ -7327,7 +7327,7 @@ void Heap::CombinedGenerationalAndSharedEphemeronBarrierSlow(
heap_internals::MemoryChunk* table_chunk =
heap_internals::MemoryChunk::FromHeapObject(table);
if (!table_chunk->InSharedHeap()) {
Heap::SharedHeapBarrierSlow(table, slot, value);
Heap::SharedHeapBarrierSlow(table, slot);
}
}
}
@ -7338,8 +7338,7 @@ void Heap::GenerationalBarrierSlow(HeapObject object, Address slot,
RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
}
void Heap::SharedHeapBarrierSlow(HeapObject object, Address slot,
HeapObject value) {
void Heap::SharedHeapBarrierSlow(HeapObject object, Address slot) {
MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::ATOMIC>(chunk, slot);
}

View File

@ -512,8 +512,7 @@ class Heap {
HeapObject value);
V8_EXPORT_PRIVATE static void SharedHeapBarrierSlow(HeapObject object,
Address slot,
HeapObject value);
Address slot);
V8_EXPORT_PRIVATE inline void RecordEphemeronKeyWrite(
EphemeronHashTable table, Address key_slot);
V8_EXPORT_PRIVATE static void EphemeronKeyWriteBarrierFromCode(