From f13c55d7b2297d3aac9f482b0fdfbb6b0c484b75 Mon Sep 17 00:00:00 2001 From: Omer Katz Date: Wed, 26 Aug 2020 01:17:11 +0200 Subject: [PATCH] cppgc: Port incremental marking schedule Schedule is simpler compared to the schedule in blink since it now returns deadlines based on marked bytes instead of time. If marking is ahead of schedule, return the minimum step size. Otherwise, set step size to catch up to schedule (ignoring the time passed while performing the step). No more default initial step size (needed in blink since marking speed was unknown). If estimated schedule is exceeded (marking takes longer than 500ms), the steps will try to mark all remaining objects but would still be capped by the maximum step duration of 2ms. Bug: chromium:1056170 Change-Id: I09857db161c621a12d064f9c8c21b646c34f9d71 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2375200 Commit-Queue: Omer Katz Reviewed-by: Ulan Degenbaev Reviewed-by: Anton Bikineev Cr-Commit-Position: refs/heads/master@{#69602} --- BUILD.gn | 5 +- src/heap/cppgc-js/cpp-heap.cc | 3 +- .../cppgc/incremental-marking-schedule.cc | 74 +++++++++++++++ src/heap/cppgc/incremental-marking-schedule.h | 53 +++++++++++ src/heap/cppgc/marker.cc | 52 +++++----- src/heap/cppgc/marker.h | 17 +++- test/unittests/BUILD.gn | 1 + .../incremental-marking-schedule-unittest.cc | 95 +++++++++++++++++++ test/unittests/heap/cppgc/marker-unittest.cc | 14 ++- 9 files changed, 280 insertions(+), 34 deletions(-) create mode 100644 src/heap/cppgc/incremental-marking-schedule.cc create mode 100644 src/heap/cppgc/incremental-marking-schedule.h create mode 100644 test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc diff --git a/BUILD.gn b/BUILD.gn index c759f09d19..d123dd7918 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -376,7 +376,7 @@ v8_toolset_for_shell = "host" # config("internal_config_base") { - # Only targets in this file and its subdirs can depend on this. + # Only targets in this file and its subdirs can depend on this. visibility = [ "./*" ] configs = [ ":v8_tracing_config" ] @@ -390,6 +390,7 @@ config("internal_config_base") { config("internal_config") { defines = [] + # Only targets in this file and its subdirs can depend on this. visibility = [ "./*" ] @@ -4304,6 +4305,8 @@ v8_source_set("cppgc_base") { "src/heap/cppgc/heap-visitor.h", "src/heap/cppgc/heap.cc", "src/heap/cppgc/heap.h", + "src/heap/cppgc/incremental-marking-schedule.cc", + "src/heap/cppgc/incremental-marking-schedule.h", "src/heap/cppgc/liveness-broker.cc", "src/heap/cppgc/liveness-broker.h", "src/heap/cppgc/logging.cc", diff --git a/src/heap/cppgc-js/cpp-heap.cc b/src/heap/cppgc-js/cpp-heap.cc index bd689baf34..dabc2291a1 100644 --- a/src/heap/cppgc-js/cpp-heap.cc +++ b/src/heap/cppgc-js/cpp-heap.cc @@ -137,8 +137,7 @@ void CppHeap::TracePrologue(TraceFlags flags) { bool CppHeap::AdvanceTracing(double deadline_in_ms) { // TODO(chromium:1056170): Replace std::numeric_limits::max() with a // proper deadline when unified heap transitions to bytes-based deadline. - marking_done_ = marker_->AdvanceMarkingWithDeadline( - std::numeric_limits::max(), + marking_done_ = marker_->AdvanceMarkingWithMaxDuration( v8::base::TimeDelta::FromMillisecondsD(deadline_in_ms)); return marking_done_; } diff --git a/src/heap/cppgc/incremental-marking-schedule.cc b/src/heap/cppgc/incremental-marking-schedule.cc new file mode 100644 index 0000000000..7e1ff951ab --- /dev/null +++ b/src/heap/cppgc/incremental-marking-schedule.cc @@ -0,0 +1,74 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/cppgc/incremental-marking-schedule.h" + +#include + +#include "src/heap/cppgc/globals.h" + +namespace cppgc { +namespace internal { + +const double IncrementalMarkingSchedule::kEstimatedMarkingTimeMs = 500.0; +const size_t IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep = + 64 * kKB; + +void IncrementalMarkingSchedule::NotifyIncrementalMarkingStart() { + DCHECK(incremental_marking_start_time_.IsNull()); + incremental_marking_start_time_ = v8::base::TimeTicks::Now(); +} + +void IncrementalMarkingSchedule::UpdateIncrementalMarkedBytes( + size_t overall_marked_bytes) { + DCHECK(!incremental_marking_start_time_.IsNull()); + incrementally_marked_bytes_ = overall_marked_bytes; +} + +void IncrementalMarkingSchedule::AddConcurrentlyMarkedBytes( + size_t marked_bytes) { + DCHECK(!incremental_marking_start_time_.IsNull()); + concurrently_marked_bytes_.fetch_add(marked_bytes, std::memory_order_relaxed); +} + +size_t IncrementalMarkingSchedule::GetOverallMarkedBytes() { + return incrementally_marked_bytes_ + + concurrently_marked_bytes_.load(std::memory_order_relaxed); +} + +double IncrementalMarkingSchedule::GetElapsedTimeInMs( + v8::base::TimeTicks start_time) { + if (elapsed_time_for_testing_ != kNoSetElapsedTimeForTesting) { + double elapsed_time = elapsed_time_for_testing_; + elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting; + return elapsed_time; + } + return (v8::base::TimeTicks::Now() - start_time).InMillisecondsF(); +} + +size_t IncrementalMarkingSchedule::GetNextIncrementalStepDuration( + size_t estimated_live_bytes) { + DCHECK(!incremental_marking_start_time_.IsNull()); + double elapsed_time_in_ms = + GetElapsedTimeInMs(incremental_marking_start_time_); + size_t actual_marked_bytes = GetOverallMarkedBytes(); + size_t expected_marked_bytes = std::ceil( + estimated_live_bytes * elapsed_time_in_ms / kEstimatedMarkingTimeMs); + if (expected_marked_bytes < actual_marked_bytes) { + // Marking is ahead of schedule, incremental marking should do the minimum. + return kMinimumMarkedBytesPerIncrementalStep; + } + // Assuming marking will take |kEstimatedMarkingTime|, overall there will + // be |estimated_live_bytes| live bytes to mark, and that marking speed is + // constant, after |elapsed_time| the number of marked_bytes should be + // |estimated_live_bytes| * (|elapsed_time| / |kEstimatedMarkingTime|), + // denoted as |expected_marked_bytes|. If |actual_marked_bytes| is less, + // i.e. marking is behind schedule, incremental marking should help "catch + // up" by marking (|expected_marked_bytes| - |actual_marked_bytes|). + return std::max(kMinimumMarkedBytesPerIncrementalStep, + expected_marked_bytes - actual_marked_bytes); +} + +} // namespace internal +} // namespace cppgc diff --git a/src/heap/cppgc/incremental-marking-schedule.h b/src/heap/cppgc/incremental-marking-schedule.h new file mode 100644 index 0000000000..3c8a9e1a01 --- /dev/null +++ b/src/heap/cppgc/incremental-marking-schedule.h @@ -0,0 +1,53 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_HEAP_CPPGC_INCREMENTAL_MARKING_SCHEDULE_H_ +#define V8_HEAP_CPPGC_INCREMENTAL_MARKING_SCHEDULE_H_ + +#include + +#include "src/base/platform/time.h" + +namespace cppgc { +namespace internal { + +class V8_EXPORT_PRIVATE IncrementalMarkingSchedule { + public: + // Estimated duration of GC cycle in milliseconds. + static const double kEstimatedMarkingTimeMs; + + // Minimum number of bytes that should be marked during an incremental + // marking step. + static const size_t kMinimumMarkedBytesPerIncrementalStep; + + void NotifyIncrementalMarkingStart(); + + void UpdateIncrementalMarkedBytes(size_t); + void AddConcurrentlyMarkedBytes(size_t); + + size_t GetOverallMarkedBytes(); + + size_t GetNextIncrementalStepDuration(size_t); + + void SetElapsedTimeForTesting(double elapsed_time) { + elapsed_time_for_testing_ = elapsed_time; + } + + private: + double GetElapsedTimeInMs(v8::base::TimeTicks); + + v8::base::TimeTicks incremental_marking_start_time_; + + size_t incrementally_marked_bytes_ = 0; + std::atomic_size_t concurrently_marked_bytes_{0}; + + // Using -1 as sentinel to denote + static constexpr double kNoSetElapsedTimeForTesting = -1; + double elapsed_time_for_testing_ = kNoSetElapsedTimeForTesting; +}; + +} // namespace internal +} // namespace cppgc + +#endif // V8_HEAP_CPPGC_INCREMENTAL_MARKING_SCHEDULE_H_ diff --git a/src/heap/cppgc/marker.cc b/src/heap/cppgc/marker.cc index 7e287c2b1a..7c3c9c3830 100644 --- a/src/heap/cppgc/marker.cc +++ b/src/heap/cppgc/marker.cc @@ -128,6 +128,12 @@ void TraceMarkedObject(Visitor* visitor, const HeapObjectHeader* header) { gcinfo.trace(visitor, header->Payload()); } +size_t GetNextIncrementalStepDuration(IncrementalMarkingSchedule& schedule, + HeapBase& heap) { + return schedule.GetNextIncrementalStepDuration( + heap.stats_collector()->allocated_object_size()); +} + } // namespace constexpr v8::base::TimeDelta MarkerBase::kMaximumIncrementalStepDuration; @@ -148,11 +154,8 @@ MarkerBase::IncrementalMarkingTask::Post(v8::TaskRunner* runner, void MarkerBase::IncrementalMarkingTask::Run() { if (handle_.IsCanceled()) return; - // TODO(chromium:1056170): Replace hardcoded expected marked bytes with - // schedule. if (marker_->IncrementalMarkingStep( - MarkingConfig::StackState::kNoHeapPointers, - kMinimumMarkedBytesPerIncrementalStep)) { + MarkingConfig::StackState::kNoHeapPointers)) { // Incremental marking is done so should finalize GC. marker_->heap().FinalizeIncrementalGarbageCollectionIfNeeded( MarkingConfig::StackState::kNoHeapPointers); @@ -197,6 +200,7 @@ void MarkerBase::StartMarking() { is_marking_started_ = true; if (EnterIncrementalMarkingIfNeeded(config_, heap())) { // Performing incremental or concurrent marking. + schedule_.NotifyIncrementalMarkingStart(); // Scanning the stack is expensive so we only do it at the atomic pause. VisitRoots(MarkingConfig::StackState::kNoHeapPointers); ScheduleIncrementalMarkingTask(); @@ -224,7 +228,8 @@ void MarkerBase::LeaveAtomicPause() { DCHECK(!incremental_marking_handle_); ResetRememberedSet(heap()); heap().stats_collector()->NotifyMarkingCompleted( - mutator_marking_state_.marked_bytes()); + // GetOverallMarkedBytes also includes concurrently marked bytes. + schedule_.GetOverallMarkedBytes()); } void MarkerBase::FinishMarking(MarkingConfig::StackState stack_state) { @@ -274,24 +279,21 @@ void MarkerBase::ScheduleIncrementalMarkingTask() { } bool MarkerBase::IncrementalMarkingStepForTesting( - MarkingConfig::StackState stack_state, size_t expected_marked_bytes) { - return IncrementalMarkingStep(stack_state, expected_marked_bytes); + MarkingConfig::StackState stack_state) { + return IncrementalMarkingStep(stack_state); } -bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state, - size_t expected_marked_bytes) { +bool MarkerBase::IncrementalMarkingStep(MarkingConfig::StackState stack_state) { if (stack_state == MarkingConfig::StackState::kNoHeapPointers) { marking_worklists_.FlushNotFullyConstructedObjects(); } config_.stack_state = stack_state; - return AdvanceMarkingWithDeadline(expected_marked_bytes); + return AdvanceMarkingWithDeadline(); } bool MarkerBase::AdvanceMarkingOnAllocation() { - // Replace with schedule based deadline. - bool is_done = - AdvanceMarkingWithDeadline(kMinimumMarkedBytesPerIncrementalStep); + bool is_done = AdvanceMarkingWithDeadline(); if (is_done) { // Schedule another incremental task for finalizing without a stack. ScheduleIncrementalMarkingTask(); @@ -299,10 +301,15 @@ bool MarkerBase::AdvanceMarkingOnAllocation() { return is_done; } -bool MarkerBase::AdvanceMarkingWithDeadline(size_t expected_marked_bytes, - v8::base::TimeDelta max_duration) { - bool is_done = - ProcessWorklistsWithDeadline(expected_marked_bytes, max_duration); +bool MarkerBase::AdvanceMarkingWithMaxDuration( + v8::base::TimeDelta max_duration) { + return AdvanceMarkingWithDeadline(max_duration); +} + +bool MarkerBase::AdvanceMarkingWithDeadline(v8::base::TimeDelta max_duration) { + size_t step_size_in_bytes = GetNextIncrementalStepDuration(schedule_, heap_); + bool is_done = ProcessWorklistsWithDeadline(step_size_in_bytes, max_duration); + schedule_.UpdateIncrementalMarkedBytes(mutator_marking_state_.marked_bytes()); if (!is_done) { // If marking is atomic, |is_done| should always be true. DCHECK_NE(MarkingConfig::MarkingType::kAtomic, config_.marking_type); @@ -316,7 +323,6 @@ bool MarkerBase::ProcessWorklistsWithDeadline( size_t marked_bytes_deadline = mutator_marking_state_.marked_bytes() + expected_marked_bytes; v8::base::TimeTicks time_deadline = v8::base::TimeTicks::Now() + max_duration; - do { // Convert |previously_not_fully_constructed_worklist_| to // |marking_worklist_|. This merely re-adds items with the proper @@ -328,8 +334,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline( TraceMarkedObject(&visitor(), header); mutator_marking_state_.AccountMarkedBytes(*header); }, - MarkingWorklists::kMutatorThreadId)) + MarkingWorklists::kMutatorThreadId)) { return false; + } if (!DrainWorklistWithBytesAndTimeDeadline( mutator_marking_state_, marked_bytes_deadline, time_deadline, @@ -344,8 +351,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline( item.callback(&visitor(), item.base_object_payload); mutator_marking_state_.AccountMarkedBytes(header); }, - MarkingWorklists::kMutatorThreadId)) + MarkingWorklists::kMutatorThreadId)) { return false; + } if (!DrainWorklistWithBytesAndTimeDeadline( mutator_marking_state_, marked_bytes_deadline, time_deadline, @@ -354,11 +362,11 @@ bool MarkerBase::ProcessWorklistsWithDeadline( TraceMarkedObject(&visitor(), header); mutator_marking_state_.AccountMarkedBytes(*header); }, - MarkingWorklists::kMutatorThreadId)) + MarkingWorklists::kMutatorThreadId)) { return false; + } } while (!marking_worklists_.marking_worklist()->IsLocalViewEmpty( MarkingWorklists::kMutatorThreadId)); - return true; } diff --git a/src/heap/cppgc/marker.h b/src/heap/cppgc/marker.h index 2919bc189f..3630632382 100644 --- a/src/heap/cppgc/marker.h +++ b/src/heap/cppgc/marker.h @@ -12,6 +12,7 @@ #include "src/base/macros.h" #include "src/base/platform/time.h" #include "src/heap/cppgc/globals.h" +#include "src/heap/cppgc/incremental-marking-schedule.h" #include "src/heap/cppgc/marking-state.h" #include "src/heap/cppgc/marking-visitor.h" #include "src/heap/cppgc/marking-worklists.h" @@ -69,8 +70,7 @@ class V8_EXPORT_PRIVATE MarkerBase { // Makes marking progress. // TODO(chromium:1056170): Remove TimeDelta argument when unified heap no // longer uses it. - bool AdvanceMarkingWithDeadline( - size_t, v8::base::TimeDelta = kMaximumIncrementalStepDuration); + bool AdvanceMarkingWithMaxDuration(v8::base::TimeDelta); // Makes marking progress when allocation a new lab. bool AdvanceMarkingOnAllocation(); @@ -97,7 +97,7 @@ class V8_EXPORT_PRIVATE MarkerBase { cppgc::Visitor& VisitorForTesting() { return visitor(); } void ClearAllWorklistsForTesting(); - bool IncrementalMarkingStepForTesting(MarkingConfig::StackState, size_t); + bool IncrementalMarkingStepForTesting(MarkingConfig::StackState); class IncrementalMarkingTask final : public v8::Task { public: @@ -118,7 +118,6 @@ class V8_EXPORT_PRIVATE MarkerBase { protected: static constexpr v8::base::TimeDelta kMaximumIncrementalStepDuration = v8::base::TimeDelta::FromMilliseconds(2); - static constexpr size_t kMinimumMarkedBytesPerIncrementalStep = 64 * kKB; class Key { private: @@ -136,6 +135,12 @@ class V8_EXPORT_PRIVATE MarkerBase { virtual ConservativeTracingVisitor& conservative_visitor() = 0; virtual heap::base::StackVisitor& stack_visitor() = 0; + // Makes marking progress. + // TODO(chromium:1056170): Remove TimeDelta argument when unified heap no + // longer uses it. + bool AdvanceMarkingWithDeadline( + v8::base::TimeDelta = kMaximumIncrementalStepDuration); + bool ProcessWorklistsWithDeadline(size_t, v8::base::TimeDelta); void VisitRoots(MarkingConfig::StackState); @@ -144,7 +149,7 @@ class V8_EXPORT_PRIVATE MarkerBase { void ScheduleIncrementalMarkingTask(); - bool IncrementalMarkingStep(MarkingConfig::StackState, size_t); + bool IncrementalMarkingStep(MarkingConfig::StackState); HeapBase& heap_; MarkingConfig config_ = MarkingConfig::Default(); @@ -157,6 +162,8 @@ class V8_EXPORT_PRIVATE MarkerBase { MarkingState mutator_marking_state_; bool is_marking_started_ = false; + IncrementalMarkingSchedule schedule_; + friend class MarkerFactory; }; diff --git a/test/unittests/BUILD.gn b/test/unittests/BUILD.gn index facc6f25b4..1cd387f5a4 100644 --- a/test/unittests/BUILD.gn +++ b/test/unittests/BUILD.gn @@ -56,6 +56,7 @@ v8_source_set("cppgc_unittests_sources") { "heap/cppgc/heap-object-header-unittest.cc", "heap/cppgc/heap-page-unittest.cc", "heap/cppgc/heap-unittest.cc", + "heap/cppgc/incremental-marking-schedule-unittest.cc", "heap/cppgc/logging-unittest.cc", "heap/cppgc/marker-unittest.cc", "heap/cppgc/marking-verifier-unittest.cc", diff --git a/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc b/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc new file mode 100644 index 0000000000..e91870be9d --- /dev/null +++ b/test/unittests/heap/cppgc/incremental-marking-schedule-unittest.cc @@ -0,0 +1,95 @@ +// Copyright 2020 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "src/heap/cppgc/incremental-marking-schedule.h" + +#include "src/base/platform/time.h" +#include "src/heap/cppgc/globals.h" +#include "src/heap/cppgc/stats-collector.h" +#include "testing/gtest/include/gtest/gtest.h" + +namespace cppgc { +namespace internal { + +namespace { + +class IncrementalMarkingScheduleTest : public testing::Test { + public: + static const size_t kObjectSize; +}; + +const size_t IncrementalMarkingScheduleTest::kObjectSize = + 100 * IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep; + +} // namespace + +TEST_F(IncrementalMarkingScheduleTest, FirstStepReturnsDefaultDuration) { + IncrementalMarkingSchedule schedule; + schedule.NotifyIncrementalMarkingStart(); + schedule.SetElapsedTimeForTesting(0); + EXPECT_EQ(IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep, + schedule.GetNextIncrementalStepDuration(kObjectSize)); +} + +// If marking is not behind schedule and very small time passed between steps +// the oracle should return the minimum step duration. +TEST_F(IncrementalMarkingScheduleTest, NoTimePassedReturnsMinimumDuration) { + IncrementalMarkingSchedule schedule; + schedule.NotifyIncrementalMarkingStart(); + // Add incrementally marked bytes to tell oracle this is not the first step. + schedule.UpdateIncrementalMarkedBytes( + IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep); + schedule.SetElapsedTimeForTesting(0); + EXPECT_EQ(IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep, + schedule.GetNextIncrementalStepDuration(kObjectSize)); +} + +TEST_F(IncrementalMarkingScheduleTest, OracleDoesntExccedMaximumStepDuration) { + IncrementalMarkingSchedule schedule; + schedule.NotifyIncrementalMarkingStart(); + // Add incrementally marked bytes to tell oracle this is not the first step. + static constexpr size_t kMarkedBytes = 1; + schedule.UpdateIncrementalMarkedBytes(kMarkedBytes); + schedule.SetElapsedTimeForTesting( + IncrementalMarkingSchedule::kEstimatedMarkingTimeMs); + EXPECT_EQ(kObjectSize - kMarkedBytes, + schedule.GetNextIncrementalStepDuration(kObjectSize)); +} + +TEST_F(IncrementalMarkingScheduleTest, AheadOfScheduleReturnsMinimumDuration) { + IncrementalMarkingSchedule schedule; + schedule.NotifyIncrementalMarkingStart(); + // Add incrementally marked bytes to tell oracle this is not the first step. + schedule.UpdateIncrementalMarkedBytes( + IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep); + schedule.AddConcurrentlyMarkedBytes(0.6 * kObjectSize); + schedule.SetElapsedTimeForTesting( + 0.5 * IncrementalMarkingSchedule::kEstimatedMarkingTimeMs); + EXPECT_EQ(IncrementalMarkingSchedule::kMinimumMarkedBytesPerIncrementalStep, + schedule.GetNextIncrementalStepDuration(kObjectSize)); +} + +TEST_F(IncrementalMarkingScheduleTest, BehindScheduleReturnsCorrectDuration) { + IncrementalMarkingSchedule schedule; + schedule.NotifyIncrementalMarkingStart(); + schedule.UpdateIncrementalMarkedBytes(0.1 * kObjectSize); + schedule.AddConcurrentlyMarkedBytes(0.25 * kObjectSize); + schedule.SetElapsedTimeForTesting( + 0.5 * IncrementalMarkingSchedule::kEstimatedMarkingTimeMs); + EXPECT_EQ(0.15 * kObjectSize, + schedule.GetNextIncrementalStepDuration(kObjectSize)); + schedule.AddConcurrentlyMarkedBytes(0.05 * kObjectSize); + schedule.SetElapsedTimeForTesting( + 0.5 * IncrementalMarkingSchedule::kEstimatedMarkingTimeMs); + EXPECT_EQ(0.1 * kObjectSize, + schedule.GetNextIncrementalStepDuration(kObjectSize)); + schedule.AddConcurrentlyMarkedBytes(0.05 * kObjectSize); + schedule.SetElapsedTimeForTesting( + 0.5 * IncrementalMarkingSchedule::kEstimatedMarkingTimeMs); + EXPECT_EQ(0.05 * kObjectSize, + schedule.GetNextIncrementalStepDuration(kObjectSize)); +} + +} // namespace internal +} // namespace cppgc diff --git a/test/unittests/heap/cppgc/marker-unittest.cc b/test/unittests/heap/cppgc/marker-unittest.cc index ebae8c6513..b879d9b989 100644 --- a/test/unittests/heap/cppgc/marker-unittest.cc +++ b/test/unittests/heap/cppgc/marker-unittest.cc @@ -285,7 +285,7 @@ class IncrementalMarkingTest : public testing::TestWithHeap { MarkingConfig::MarkingType::kIncremental}; void FinishSteps(MarkingConfig::StackState stack_state) { - SingleStep(stack_state, std::numeric_limits::max()); + while (!SingleStep(stack_state)) {} } void FinishMarking() { @@ -305,9 +305,8 @@ class IncrementalMarkingTest : public testing::TestWithHeap { Marker* marker() const { return marker_.get(); } private: - bool SingleStep(MarkingConfig::StackState stack_state, size_t bytes_to_mark) { - return marker_->IncrementalMarkingStepForTesting(stack_state, - bytes_to_mark); + bool SingleStep(MarkingConfig::StackState stack_state) { + return marker_->IncrementalMarkingStepForTesting(stack_state); } std::unique_ptr marker_; @@ -380,5 +379,12 @@ TEST_F(IncrementalMarkingTest, IncrementalStepDuringAllocation) { FinishMarking(); } +TEST_F(IncrementalMarkingTest, MarkingRunsOutOfWorkEventually) { + InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), + IncrementalPreciseMarkingConfig); + FinishSteps(MarkingConfig::StackState::kNoHeapPointers); + FinishMarking(); +} + } // namespace internal } // namespace cppgc