cppgc: Avoid dispatching write barrier during atomic pause

This change avoid dispatching a write barrier during the atomic pause.
The dispatch can generally be triggered through pre-finalizers.

In future, further checks may be added to avoid mis-use of
pre-finalizers.

Bug: chromium:1056170, chromium:1175560
Change-Id: I119e18372633b2375f60e17b4c881f68bb20bf66
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2679685
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72560}
This commit is contained in:
Michael Lippautz 2021-02-08 12:36:00 +01:00 committed by Commit Bot
parent 0c8b6e415c
commit c7ff90d97d
9 changed files with 128 additions and 42 deletions

View File

@ -55,7 +55,7 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
struct CagedHeapLocalData final {
explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {}
bool is_marking_in_progress = false;
bool is_incremental_marking_in_progress = false;
HeapBase* heap_base = nullptr;
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;

View File

@ -138,7 +138,7 @@ class V8_EXPORT WriteBarrierTypeForCagedHeapPolicy final {
if (!TryGetCagedHeap(value, value, params)) {
return WriteBarrier::Type::kNone;
}
if (V8_UNLIKELY(params.caged_heap().is_marking_in_progress)) {
if (V8_UNLIKELY(params.caged_heap().is_incremental_marking_in_progress)) {
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
return SetAndReturnType<WriteBarrier::Type::kNone>(params);
@ -182,7 +182,7 @@ struct WriteBarrierTypeForCagedHeapPolicy::ValueModeDispatch<
if (!within_cage) {
return WriteBarrier::Type::kNone;
}
if (V8_LIKELY(!params.caged_heap().is_marking_in_progress)) {
if (V8_LIKELY(!params.caged_heap().is_incremental_marking_in_progress)) {
#if defined(CPPGC_YOUNG_GENERATION)
params.heap = reinterpret_cast<HeapHandle*>(params.start);
params.slot_offset = reinterpret_cast<uintptr_t>(slot) - params.start;
@ -246,28 +246,19 @@ class V8_EXPORT WriteBarrierTypeForNonCagedHeapPolicy final {
static V8_INLINE WriteBarrier::Type GetForExternallyReferenced(
const void* value, WriteBarrier::Params& params,
HeapHandleCallback callback) {
return GetInternal(params, callback);
// The slot will never be used in `Get()` below.
return Get<WriteBarrier::ValueMode::kValuePresent>(nullptr, value, params,
callback);
}
private:
template <WriteBarrier::ValueMode value_mode>
struct ValueModeDispatch;
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type GetInternal(WriteBarrier::Params& params,
HeapHandleCallback callback) {
if (V8_UNLIKELY(ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) {
HeapHandle& handle = callback();
if (subtle::HeapState::IsMarking(handle)) {
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
}
return WriteBarrier::Type::kNone;
}
// TODO(chromium:1056170): Create fast path on API.
static bool IsMarking(const void*, HeapHandle**);
// TODO(chromium:1056170): Create fast path on API.
static bool IsMarking(HeapHandle&);
WriteBarrierTypeForNonCagedHeapPolicy() = delete;
};
@ -294,10 +285,17 @@ template <>
struct WriteBarrierTypeForNonCagedHeapPolicy::ValueModeDispatch<
WriteBarrier::ValueMode::kNoValuePresent> {
template <typename HeapHandleCallback>
static V8_INLINE WriteBarrier::Type Get(const void* slot, const void*,
static V8_INLINE WriteBarrier::Type Get(const void*, const void*,
WriteBarrier::Params& params,
HeapHandleCallback callback) {
return GetInternal(params, callback);
if (V8_UNLIKELY(ProcessHeap::IsAnyIncrementalOrConcurrentMarking())) {
HeapHandle& handle = callback();
if (IsMarking(handle)) {
params.heap = &handle;
return SetAndReturnType<WriteBarrier::Type::kMarking>(params);
}
}
return WriteBarrier::Type::kNone;
}
};

View File

@ -280,11 +280,11 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
marker_->LeaveAtomicPause();
}
marker_.reset();
{
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(*this);
prefinalizer_handler()->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
UnifiedHeapMarkingVerifier verifier(*this);

View File

@ -183,12 +183,12 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
marker_->FinishMarking(config_.stack_state);
}
marker_.reset();
{
// Pre finalizers are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
prefinalizer_handler_->InvokePreFinalizers();
}
marker_.reset();
// TODO(chromium:1056170): replace build flag with dedicated flag.
#if DEBUG
MarkingVerifier verifier(*this);

View File

@ -35,7 +35,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::EnterIncrementalOrConcurrentMarking();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_marking_in_progress = true;
heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
#endif
return true;
}
@ -49,7 +49,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
Marker::MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
ProcessHeap::ExitIncrementalOrConcurrentMarking();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_marking_in_progress = false;
heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
#endif
return true;
}

View File

@ -28,7 +28,7 @@ void ProcessMarkValue(HeapObjectHeader& header, MarkerBase* marker,
DCHECK(reinterpret_cast<CagedHeapLocalData*>(
reinterpret_cast<uintptr_t>(value) &
~(kCagedHeapReservationAlignment - 1))
->is_marking_in_progress);
->is_incremental_marking_in_progress);
#endif
DCHECK(header.IsMarked<AccessMode::kAtomic>());
DCHECK(marker);
@ -61,9 +61,10 @@ void WriteBarrier::DijkstraMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap();
// Marker being not set up means that no incremental/concurrent marking is in
// progress.
if (!heap->marker()) return;
// GetWriteBarrierType() checks marking state.
DCHECK(heap->marker());
// No write barriers should be executed from atomic pause marking.
DCHECK(!heap->in_atomic_pause());
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
@ -77,10 +78,11 @@ void WriteBarrier::DijkstraMarkingBarrierRangeSlow(
HeapHandle& heap_handle, const void* first_element, size_t element_size,
size_t number_of_elements, TraceCallback trace_callback) {
auto& heap_base = HeapBase::From(heap_handle);
MarkerBase* marker = heap_base.marker();
if (!marker) {
return;
}
// GetWriteBarrierType() checks marking state.
DCHECK(heap_base.marker());
// No write barriers should be executed from atomic pause marking.
DCHECK(!heap_base.in_atomic_pause());
cppgc::subtle::DisallowGarbageCollectionScope disallow_gc_scope(heap_base);
const char* array = static_cast<const char*>(first_element);
@ -103,9 +105,10 @@ void WriteBarrier::SteeleMarkingBarrierSlow(const void* value) {
const BasePage* page = BasePage::FromPayload(value);
const auto* heap = page->heap();
// Marker being not set up means that no incremental/concurrent marking is in
// progress.
if (!heap->marker()) return;
// GetWriteBarrierType() checks marking state.
DCHECK(heap->marker());
// No write barriers should be executed from atomic pause marking.
DCHECK(!heap->in_atomic_pause());
auto& header =
const_cast<HeapObjectHeader&>(page->ObjectHeaderFromInnerAddress(value));
@ -120,6 +123,11 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
const AgeTable& age_table,
const void* slot,
uintptr_t value_offset) {
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
if (local_data.heap_base->in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot.
@ -144,6 +152,12 @@ bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(const void* object,
return page->heap()->marker();
}
// static
bool WriteBarrierTypeForNonCagedHeapPolicy::IsMarking(HeapHandle& heap_handle) {
const auto& heap_base = internal::HeapBase::From(heap_handle);
return heap_base.marker();
}
#if defined(CPPGC_CAGED_HEAP)
// static

View File

@ -294,26 +294,26 @@ class IncrementalMarkingTest : public testing::TestWithHeap {
}
void FinishMarking() {
marker_->FinishMarking(MarkingConfig::StackState::kMayContainHeapPointers);
GetMarkerRef()->FinishMarking(
MarkingConfig::StackState::kMayContainHeapPointers);
// Pretend do finish sweeping as StatsCollector verifies that Notify*
// methods are called in the right order.
GetMarkerRef().reset();
Heap::From(GetHeap())->stats_collector()->NotifySweepingCompleted();
}
void InitializeMarker(HeapBase& heap, cppgc::Platform* platform,
MarkingConfig config) {
marker_ =
GetMarkerRef() =
MarkerFactory::CreateAndStartMarking<Marker>(heap, platform, config);
}
Marker* marker() const { return marker_.get(); }
MarkerBase* marker() const { return GetMarkerRef().get(); }
private:
bool SingleStep(MarkingConfig::StackState stack_state) {
return marker_->IncrementalMarkingStepForTesting(stack_state);
return GetMarkerRef()->IncrementalMarkingStepForTesting(stack_state);
}
std::unique_ptr<Marker> marker_;
};
constexpr IncrementalMarkingTest::MarkingConfig
@ -348,9 +348,8 @@ TEST_F(IncrementalMarkingTest,
InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(),
IncrementalPreciseMarkingConfig);
root->SetChild(MakeGarbageCollected<GCed>(GetAllocationHandle()));
HeapObjectHeader& header = HeapObjectHeader::FromPayload(root->child());
EXPECT_FALSE(header.IsMarked());
FinishSteps(MarkingConfig::StackState::kNoHeapPointers);
HeapObjectHeader& header = HeapObjectHeader::FromPayload(root->child());
EXPECT_TRUE(header.IsMarked());
FinishMarking();
}

View File

@ -171,6 +171,77 @@ TEST_F(PrefinalizerTest, PrefinalizerInvocationPreservesOrder) {
namespace {
class LinkedNode final : public GarbageCollected<LinkedNode> {
public:
explicit LinkedNode(LinkedNode* next) : next_(next) {}
void Trace(Visitor* visitor) const { visitor->Trace(next_); }
LinkedNode* next() const { return next_; }
void RemoveNext() {
CHECK(next_);
next_ = next_->next_;
}
private:
Member<LinkedNode> next_;
};
class MutatingPrefinalizer final
: public GarbageCollected<MutatingPrefinalizer> {
CPPGC_USING_PRE_FINALIZER(MutatingPrefinalizer, PreFinalizer);
public:
void PreFinalizer() {
// Pre-finalizers are generally used to mutate the object graph. The API
// does not allow distinguishing between live and dead objects. It is
// generally safe to re-write the dead *or* the live object graph. Adding
// a dead object to the live graph must not happen.
//
// RemoveNext() must not trigger a write barrier. In the case all LinkedNode
// objects die at the same time, the graph is mutated with a dead object.
// This is only safe when the dead object is added to a dead subgraph.
parent_node_->RemoveNext();
}
explicit MutatingPrefinalizer(LinkedNode* parent) : parent_node_(parent) {}
void Trace(Visitor* visitor) const { visitor->Trace(parent_node_); }
private:
Member<LinkedNode> parent_node_;
};
} // namespace
TEST_F(PrefinalizerTest, PrefinalizerCanRewireGraphWithLiveObjects) {
Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(GetAllocationHandle(), nullptr)))};
CHECK(root->next());
MakeGarbageCollected<MutatingPrefinalizer>(GetAllocationHandle(), root.Get());
PreciseGC();
}
TEST_F(PrefinalizerTest, PrefinalizerCanRewireGraphWithDeadObjects) {
Persistent<LinkedNode> root{MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(
GetAllocationHandle(),
MakeGarbageCollected<LinkedNode>(GetAllocationHandle(), nullptr)))};
CHECK(root->next());
MakeGarbageCollected<MutatingPrefinalizer>(GetAllocationHandle(), root.Get());
// All LinkedNode objects will die on the following GC. The pre-finalizer may
// still operate with them but not add them to a live object.
root.Clear();
PreciseGC();
}
namespace {
class AllocatingPrefinalizer : public GarbageCollected<AllocatingPrefinalizer> {
CPPGC_USING_PRE_FINALIZER(AllocatingPrefinalizer, PreFinalizer);

View File

@ -86,6 +86,10 @@ class TestWithHeap : public TestWithPlatform {
return Heap::From(GetHeap())->marker_;
}
const std::unique_ptr<MarkerBase>& GetMarkerRef() const {
return Heap::From(GetHeap())->marker_;
}
void ResetLinearAllocationBuffers();
private: