cppgc: Move incremental step on allocation to allocation observer

In addition, trigger the observer only every ~256KiB to avoid
excessive incremental marking steps on fragemented heaps that have to
set up LABs repeatedly.

Bug: v8:12285
Change-Id: Id3d85d2c3f96d9d914c731f998df827898e1863d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3208810
Commit-Queue: Michael Lippautz <mlippautz@chromium.org>
Reviewed-by: Omer Katz <omerkatz@chromium.org>
Cr-Commit-Position: refs/heads/main@{#77278}
This commit is contained in:
Michael Lippautz 2021-10-07 10:56:59 +02:00 committed by V8 LUCI CQ
parent e10345065d
commit 6123c57182
7 changed files with 55 additions and 16 deletions

View File

@ -13,7 +13,6 @@
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-statistics-collector.h"
#include "src/heap/cppgc/heap-visitor.h"
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
@ -97,10 +96,6 @@ size_t HeapBase::ObjectPayloadSize() const {
return ObjectSizeCounter().GetSize(const_cast<RawHeap&>(raw_heap()));
}
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
size_t HeapBase::ExecutePreFinalizers() {
#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
// Allocations in pre finalizers should not trigger another GC.

View File

@ -172,8 +172,6 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
return override_stack_state_.get();
}
void AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
// Termination drops all roots (clears them out) and runs garbage collections
// in a bounded fixed point loop until no new objects are created in
// destructors. Exceeding the loop bound results in a crash.

View File

@ -4,12 +4,14 @@
#include "src/heap/cppgc/marker.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
#include "src/base/platform/time.h"
#include "src/heap/cppgc/globals.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/heap-visitor.h"
@ -201,6 +203,27 @@ MarkerBase::~MarkerBase() {
marking_worklists_.weak_containers_worklist()->Clear();
}
class MarkerBase::IncrementalMarkingAllocationObserver final
: public StatsCollector::AllocationObserver {
public:
static constexpr size_t kMinAllocatedBytesPerStep = 256 * kKB;
explicit IncrementalMarkingAllocationObserver(MarkerBase& marker)
: marker_(marker) {}
void AllocatedObjectSizeIncreased(size_t delta) final {
current_allocated_size_ += delta;
if (current_allocated_size_ > kMinAllocatedBytesPerStep) {
marker_.AdvanceMarkingOnAllocation();
current_allocated_size_ = 0;
}
}
private:
MarkerBase& marker_;
size_t current_allocated_size_ = 0;
};
void MarkerBase::StartMarking() {
DCHECK(!is_marking_);
StatsCollector::EnabledScope stats_scope(
@ -227,6 +250,10 @@ void MarkerBase::StartMarking() {
mutator_marking_state_.Publish();
concurrent_marker_->Start();
}
incremental_marking_allocation_observer_ =
std::make_unique<IncrementalMarkingAllocationObserver>(*this);
heap().stats_collector()->RegisterObserver(
incremental_marking_allocation_observer_.get());
}
}
@ -240,6 +267,9 @@ void MarkerBase::EnterAtomicPause(MarkingConfig::StackState stack_state) {
// Cancel remaining concurrent/incremental tasks.
concurrent_marker_->Cancel();
incremental_marking_handle_.Cancel();
heap().stats_collector()->UnregisterObserver(
incremental_marking_allocation_observer_.get());
incremental_marking_allocation_observer_.reset();
}
config_.stack_state = stack_state;
config_.marking_type = MarkingConfig::MarkingType::kAtomic;

View File

@ -83,9 +83,6 @@ class V8_EXPORT_PRIVATE MarkerBase {
v8::base::TimeDelta = kMaximumIncrementalStepDuration,
size_t marked_bytes_limit = 0);
// Makes marking progress when allocation a new lab.
void AdvanceMarkingOnAllocation();
// Signals leaving the atomic marking pause. This method expects no more
// objects to be marked and merely updates marking states if needed.
void LeaveAtomicPause();
@ -141,6 +138,8 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool IsMarking() const { return is_marking_; }
protected:
class IncrementalMarkingAllocationObserver;
static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration =
v8::base::TimeDelta::FromMilliseconds(2);
@ -172,12 +171,16 @@ class V8_EXPORT_PRIVATE MarkerBase {
bool IncrementalMarkingStep(MarkingConfig::StackState);
void AdvanceMarkingOnAllocation();
HeapBase& heap_;
MarkingConfig config_ = MarkingConfig::Default();
cppgc::Platform* platform_;
std::shared_ptr<cppgc::TaskRunner> foreground_task_runner_;
IncrementalMarkingTask::Handle incremental_marking_handle_;
std::unique_ptr<IncrementalMarkingAllocationObserver>
incremental_marking_allocation_observer_;
MarkingWorklists marking_worklists_;
MutatorMarkingState mutator_marking_state_;

View File

@ -118,7 +118,6 @@ void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
stats_collector_.NotifySafePointForConservativeCollection();
raw_heap_.heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
// Objects allocated during pre finalizers should be allocated as black
// since marking is already done. Atomics are not needed because there is

View File

@ -34,7 +34,8 @@ void StatsCollector::UnregisterObserver(AllocationObserver* observer) {
auto it = std::find(allocation_observers_.begin(),
allocation_observers_.end(), observer);
DCHECK_NE(allocation_observers_.end(), it);
allocation_observers_.erase(it);
*it = nullptr;
allocation_observer_deleted_ = true;
}
void StatsCollector::NotifyAllocation(size_t bytes) {

View File

@ -218,8 +218,9 @@ class V8_EXPORT_PRIVATE StatsCollector final {
using DisabledConcurrentScope = InternalScope<kDisabled, kConcurrentThread>;
using EnabledConcurrentScope = InternalScope<kEnabled, kConcurrentThread>;
// Observer for allocated object size. May be used to implement heap growing
// heuristics.
// Observer for allocated object size. May e.g. be used to implement heap
// growing heuristics. Observers may register/unregister observers at any
// time when being invoked.
class AllocationObserver {
public:
// Called after observing at least
@ -346,6 +347,7 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// vector to allow fast iteration of observers. Register/Unregisters only
// happens on startup/teardown.
std::vector<AllocationObserver*> allocation_observers_;
bool allocation_observer_deleted_ = false;
GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
@ -363,8 +365,19 @@ class V8_EXPORT_PRIVATE StatsCollector final {
template <typename Callback>
void StatsCollector::ForAllAllocationObservers(Callback callback) {
for (AllocationObserver* observer : allocation_observers_) {
callback(observer);
// Iterate using indices to allow push_back() of new observers.
for (size_t i = 0; i < allocation_observers_.size(); ++i) {
auto* observer = allocation_observers_[i];
if (observer) {
callback(observer);
}
}
if (allocation_observer_deleted_) {
allocation_observers_.erase(
std::remove(allocation_observers_.begin(), allocation_observers_.end(),
nullptr),
allocation_observers_.end());
allocation_observer_deleted_ = false;
}
}