Revert "cppgc: Parallel marking in atomic pause"

This reverts commit 6747144c82.

Reason for revert: https://ci.chromium.org/ui/p/v8/builders/ci/Linux%20V8%20FYI%20Release%20(NVIDIA)/17679/test-results

Original change's description:
> cppgc: Parallel marking in atomic pause
>
> Bug: v8:12424
> Change-Id: I0633e1bd8c890c14ce2c5519253a5e6eb7592f04
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3295580
> Commit-Queue: Omer Katz <omerkatz@chromium.org>
> Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#78081}

Bug: v8:12424
Change-Id: I8cd6ad8bb72906329bf820a8c1df06e8fa8d89a2
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3301469
Owners-Override: Nico Hartmann <nicohartmann@chromium.org>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Auto-Submit: Nico Hartmann <nicohartmann@chromium.org>
Commit-Queue: Michael Achenbach <machenbach@chromium.org>
Cr-Commit-Position: refs/heads/main@{#78089}
This commit is contained in:
Nico Hartmann 2021-11-25 12:59:21 +00:00 committed by V8 LUCI CQ
parent aa58053d3a
commit 81b3178742
10 changed files with 20 additions and 60 deletions

View File

@ -331,9 +331,7 @@ CppHeap::CppHeap(
: cppgc::internal::HeapBase(
std::make_shared<CppgcPlatformAdapter>(platform), custom_spaces,
cppgc::internal::HeapBase::StackSupport::
kSupportsConservativeStackScan,
cppgc::internal::HeapBase::MarkingType::kIncrementalAndConcurrent,
cppgc::internal::HeapBase::SweepingType::kIncrementalAndConcurrent),
kSupportsConservativeStackScan),
wrapper_descriptor_(wrapper_descriptor) {
CHECK_NE(WrapperDescriptor::kUnknownEmbedderId,
wrapper_descriptor_.embedder_id_for_garbage_collected);

View File

@ -53,8 +53,7 @@ class ObjectSizeCounter : private HeapVisitor<ObjectSizeCounter> {
HeapBase::HeapBase(
std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support, MarkingType marking_support,
SweepingType sweeping_support)
StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
@ -82,9 +81,7 @@ HeapBase::HeapBase(
weak_persistent_region_(*oom_handler_.get()),
strong_cross_thread_persistent_region_(*oom_handler_.get()),
weak_cross_thread_persistent_region_(*oom_handler_.get()),
stack_support_(stack_support),
marking_support_(marking_support),
sweeping_support_(sweeping_support) {
stack_support_(stack_support) {
stats_collector_->RegisterObserver(
&allocation_observer_for_PROCESS_HEAP_STATISTICS_);
}

View File

@ -71,8 +71,6 @@ class StatsCollector;
class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
public:
using StackSupport = cppgc::Heap::StackSupport;
using MarkingType = cppgc::Heap::MarkingType;
using SweepingType = cppgc::Heap::SweepingType;
static HeapBase& From(cppgc::HeapHandle& heap_handle) {
return static_cast<HeapBase&>(heap_handle);
@ -83,8 +81,7 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
HeapBase(std::shared_ptr<cppgc::Platform> platform,
const std::vector<std::unique_ptr<CustomSpaceBase>>& custom_spaces,
StackSupport stack_support, MarkingType marking_support,
SweepingType sweeping_support);
StackSupport stack_support);
virtual ~HeapBase();
HeapBase(const HeapBase&) = delete;
@ -206,8 +203,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int GetCreationThreadId() const { return creation_thread_id_; }
MarkingType marking_support() const { return marking_support_; }
protected:
// Used by the incremental scheduler to finalize a GC if supported.
virtual void FinalizeIncrementalGarbageCollectionIfNeeded(
@ -278,9 +273,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
int creation_thread_id_ = v8::base::OS::GetCurrentThreadId();
const MarkingType marking_support_;
const SweepingType sweeping_support_;
friend class MarkerBase::IncrementalMarkingTask;
friend class cppgc::subtle::DisallowGarbageCollectionScope;
friend class cppgc::subtle::NoGarbageCollectionScope;

View File

@ -78,12 +78,13 @@ void CheckConfig(Heap::Config config, Heap::MarkingType marking_support,
Heap::Heap(std::shared_ptr<cppgc::Platform> platform,
cppgc::Heap::HeapOptions options)
: HeapBase(platform, options.custom_spaces, options.stack_support,
options.marking_support, options.sweeping_support),
: HeapBase(platform, options.custom_spaces, options.stack_support),
gc_invoker_(this, platform_.get(), options.stack_support),
growing_(&gc_invoker_, stats_collector_.get(),
options.resource_constraints, options.marking_support,
options.sweeping_support) {
options.sweeping_support),
marking_support_(options.marking_support),
sweeping_support_(options.sweeping_support) {
CHECK_IMPLIES(options.marking_support != MarkingType::kAtomic,
platform_->GetForegroundTaskRunner());
CHECK_IMPLIES(options.sweeping_support != SweepingType::kAtomic,

View File

@ -53,6 +53,9 @@ class V8_EXPORT_PRIVATE Heap final : public HeapBase,
GCInvoker gc_invoker_;
HeapGrowing growing_;
const MarkingType marking_support_;
const SweepingType sweeping_support_;
size_t epoch_ = 0;
};

View File

@ -30,6 +30,7 @@ void IncrementalMarkingSchedule::UpdateMutatorThreadMarkedBytes(
void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes(
size_t marked_bytes) {
DCHECK(!incremental_marking_start_time_.IsNull());
concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed);
}

View File

@ -240,7 +240,6 @@ void MarkerBase::StartMarking() {
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
concurrent_marking_active_ = true;
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
@ -256,9 +255,8 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
StatsCollector::kMarkAtomicPrologue);
if (ExitIncrementalMarkingIfNeeded(config_, heap())) {
// Cancel remaining incremental tasks. Concurrent marking jobs are left to
// run in parallel with the atomic pause until the mutator thread runs out
// of work.
// Cancel remaining concurrent/incremental tasks.
concurrent_marker_->Cancel();
incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
@ -278,17 +276,6 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
MarkNotFullyConstructedObjects();
}
}
if (heap().marking_support() ==
MarkingConfig::MarkingType::kIncrementalAndConcurrent) {
// Start parallel marking.
mutator_marking_state_.Publish();
if (concurrent_marking_active_) {
concurrent_marker_->NotifyIncrementalMutatorStepCompleted();
} else {
concurrent_marker_->Start();
concurrent_marking_active_ = true;
}
}
}
void MarkerBase::LeaveAtomicPause() {
@ -427,16 +414,6 @@ void MarkerBase::AdvanceMarkingOnAllocation() {
}
}
bool MarkerBase::CancelConcurrentMarkingIfNeeded() {
if (config_.marking_type != MarkingConfig::MarkingType::kAtomic ||
!concurrent_marking_active_)
return false;
concurrent_marker_->Cancel();
concurrent_marking_active_ = false;
return true;
}
bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
size_t marked_bytes_limit) {
bool is_done = false;
@ -456,9 +433,6 @@ bool MarkerBase::AdvanceMarkingWithLimits(v8::base::TimeDelta max_duration,
// adjustment.
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
if (is_done && CancelConcurrentMarkingIfNeeded()) {
is_done = ProcessWorklistsWithDeadline(marked_bytes_limit, deadline);
}
schedule_.UpdateMutatorThreadMarkedBytes(
mutator_marking_state_.marked_bytes());
}

View File

@ -173,8 +173,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
void AdvanceMarkingOnAllocation();
bool CancelConcurrentMarkingIfNeeded();
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
@ -191,7 +189,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
IncrementalMarkingSchedule schedule_;
std::unique_ptr<ConcurrentMarkerBase> concurrent_marker_{nullptr};
bool concurrent_marking_active_ = false;
bool main_marking_disabled_for_testing_{false};
bool visited_cross_thread_persistents_in_atomic_pause_{false};

View File

@ -415,17 +415,15 @@ void MutatorMarkingState::InvokeWeakRootsCallbackIfNeeded(
#if DEBUG
const HeapObjectHeader& header =
HeapObjectHeader::FromObject(desc.base_object_payload);
DCHECK_IMPLIES(header.IsInConstruction(),
header.IsMarked<AccessMode::kAtomic>());
DCHECK_IMPLIES(header.IsInConstruction(), header.IsMarked());
#endif // DEBUG
weak_callback(LivenessBrokerFactory::Create(), parameter);
}
bool MutatorMarkingState::IsMarkedWeakContainer(HeapObjectHeader& header) {
const bool result =
weak_containers_worklist_.Contains<AccessMode::kAtomic>(&header) &&
!recently_retraced_weak_containers_.Contains(&header);
DCHECK_IMPLIES(result, header.IsMarked<AccessMode::kAtomic>());
const bool result = weak_containers_worklist_.Contains(&header) &&
!recently_retraced_weak_containers_.Contains(&header);
DCHECK_IMPLIES(result, header.IsMarked());
DCHECK_IMPLIES(result, !header.IsInConstruction());
return result;
}
@ -495,7 +493,7 @@ template <AccessMode mode>
void DynamicallyTraceMarkedObject(Visitor& visitor,
const HeapObjectHeader& header) {
DCHECK(!header.IsInConstruction<mode>());
DCHECK(header.IsMarked<AccessMode::kAtomic>());
DCHECK(header.IsMarked<mode>());
header.Trace<mode>(&visitor);
}

View File

@ -4,7 +4,6 @@
#include "src/heap/cppgc/marking-visitor.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
@ -55,7 +54,7 @@ ConservativeMarkingVisitor::ConservativeMarkingVisitor(
void ConservativeMarkingVisitor::VisitFullyConstructedConservatively(
HeapObjectHeader& header) {
if (header.IsMarked<AccessMode::kAtomic>()) {
if (header.IsMarked()) {
if (marking_state_.IsMarkedWeakContainer(header))
marking_state_.ReTraceMarkedWeakContainer(visitor_, header);
return;