[heap] Rework incremental marking scheduling

The new scheduling reduces the main thread marking performed in
tasks and on allocation. It is based on two counters:
- bytes_marked,
- scheduled_bytes_to_mark.

The bytes_marked accounts marking done both the main thread and
the concurrent threads. The scheduled_bytes_to_mark increases based
on allocated bytes and also based on time passed since the start
of marking. The main thread steps are allowed to mark the minimal
amount if bytes_marked is greater than scheduled_bytes_to_mark.

This also changes tasks posted for marking. Before only normal
tasks were posted. Now delayed tasks are posted if the marker is
ahead of schedule.

Bug: 926189

Change-Id: I5bc9c33a5ecfc9f8d09f78d08ae277d16a2779ca
Reviewed-on: https://chromium-review.googlesource.com/c/1443056
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59433}
This commit is contained in:
Ulan Degenbaev 2019-02-07 11:58:09 +01:00 committed by Commit Bot
parent 54a1889585
commit 4c65986a44
11 changed files with 297 additions and 170 deletions

View File

@ -328,7 +328,7 @@ void GCTracer::Stop(GarbageCollector collector) {
RecordIncrementalMarkingSpeed(current_.incremental_marking_bytes,
current_.incremental_marking_duration);
recorded_incremental_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
MakeBytesAndDuration(current_.end_object_size, duration));
RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
@ -340,7 +340,7 @@ void GCTracer::Stop(GarbageCollector collector) {
RecordMutatorUtilization(
current_.end_time, duration + current_.incremental_marking_duration);
recorded_mark_compacts_.Push(
MakeBytesAndDuration(current_.start_object_size, duration));
MakeBytesAndDuration(current_.end_object_size, duration));
RecordGCSumCounters(duration);
ResetIncrementalMarkingCounters();
combined_mark_compact_speed_cache_ = 0.0;
@ -959,9 +959,15 @@ double GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const {
}
double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
const double kMinimumMarkingSpeed = 0.5;
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
// MarkCompact speed is more stable than incremental marking speed, because
// there might not be many incremental marking steps because of concurrent
// marking.
combined_mark_compact_speed_cache_ = MarkCompactSpeedInBytesPerMillisecond();
if (combined_mark_compact_speed_cache_ > 0)
return combined_mark_compact_speed_cache_;
const double kMinimumMarkingSpeed = 0.5;
double speed1 = IncrementalMarkingSpeedInBytesPerMillisecond();
double speed2 = FinalIncrementalMarkCompactSpeedInBytesPerMillisecond();
if (speed1 < kMinimumMarkingSpeed || speed2 < kMinimumMarkingSpeed) {

View File

@ -1251,7 +1251,7 @@ void Heap::ReportExternalMemoryPressure() {
// Extend the gc callback flags with external memory flags.
current_gc_callback_flags_ = static_cast<GCCallbackFlags>(
current_gc_callback_flags_ | kGCCallbackFlagsForExternalMemory);
incremental_marking()->AdvanceIncrementalMarking(
incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
}
@ -3129,14 +3129,11 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
result = true;
break;
case DO_INCREMENTAL_STEP: {
const double remaining_idle_time_in_ms =
incremental_marking()->AdvanceIncrementalMarking(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
if (remaining_idle_time_in_ms > 0.0) {
FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
incremental_marking()->AdvanceWithDeadline(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
result = incremental_marking()->IsStopped();
break;
}

View File

@ -18,15 +18,16 @@ namespace internal {
class IncrementalMarkingJob::Task : public CancelableTask {
public:
static void Step(Heap* heap,
EmbedderHeapTracer::EmbedderStackState stack_state);
static StepResult Step(Heap* heap,
EmbedderHeapTracer::EmbedderStackState stack_state);
Task(Isolate* isolate, IncrementalMarkingJob* job,
EmbedderHeapTracer::EmbedderStackState stack_state)
EmbedderHeapTracer::EmbedderStackState stack_state, TaskType task_type)
: CancelableTask(isolate),
isolate_(isolate),
job_(job),
stack_state_(stack_state) {}
stack_state_(stack_state),
task_type_(task_type) {}
// CancelableTask overrides.
void RunInternal() override;
@ -37,6 +38,7 @@ class IncrementalMarkingJob::Task : public CancelableTask {
Isolate* const isolate_;
IncrementalMarkingJob* const job_;
const EmbedderHeapTracer::EmbedderStackState stack_state_;
const TaskType task_type_;
};
void IncrementalMarkingJob::Start(Heap* heap) {
@ -44,30 +46,38 @@ void IncrementalMarkingJob::Start(Heap* heap) {
ScheduleTask(heap);
}
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_ && !heap->IsTearingDown()) {
void IncrementalMarkingJob::ScheduleTask(Heap* heap, TaskType task_type) {
if (!IsTaskPending(task_type) && !heap->IsTearingDown()) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
SetTaskPending(task_type, true);
auto taskrunner =
V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
if (taskrunner->NonNestableTasksEnabled()) {
taskrunner->PostNonNestableTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty));
if (task_type == TaskType::kNormal) {
if (taskrunner->NonNestableTasksEnabled()) {
taskrunner->PostNonNestableTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kEmpty, task_type));
} else {
taskrunner->PostTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type));
}
} else {
taskrunner->PostTask(base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown));
taskrunner->PostDelayedTask(
base::make_unique<Task>(
heap->isolate(), this,
EmbedderHeapTracer::EmbedderStackState::kUnknown, task_type),
kDelayInSeconds);
}
}
}
void IncrementalMarkingJob::Task::Step(
StepResult IncrementalMarkingJob::Task::Step(
Heap* heap, EmbedderHeapTracer::EmbedderStackState stack_state) {
const int kIncrementalMarkingDelayMs = 1;
double deadline =
heap->MonotonicallyIncreasingTimeInMs() + kIncrementalMarkingDelayMs;
heap->incremental_marking()->AdvanceIncrementalMarking(
StepResult result = heap->incremental_marking()->AdvanceWithDeadline(
deadline, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kTask);
{
@ -76,6 +86,7 @@ void IncrementalMarkingJob::Task::Step(
heap->FinalizeIncrementalMarkingIfComplete(
GarbageCollectionReason::kFinalizeMarkingViaTask);
}
return result;
}
void IncrementalMarkingJob::Task::RunInternal() {
@ -95,12 +106,14 @@ void IncrementalMarkingJob::Task::RunInternal() {
// Clear this flag after StartIncrementalMarking call to avoid
// scheduling a new task when startining incremental marking.
job_->task_pending_ = false;
job_->SetTaskPending(task_type_, false);
if (!incremental_marking->IsStopped()) {
Step(heap, stack_state_);
StepResult step_result = Step(heap, stack_state_);
if (!incremental_marking->IsStopped()) {
job_->ScheduleTask(heap);
job_->ScheduleTask(heap, step_result == StepResult::kDone
? TaskType::kDelayed
: TaskType::kNormal);
}
}
}

View File

@ -18,18 +18,32 @@ class Isolate;
// step and posts another task until the marking is completed.
class IncrementalMarkingJob {
public:
IncrementalMarkingJob() = default;
enum class TaskType { kNormal, kDelayed };
bool TaskPending() const { return task_pending_; }
IncrementalMarkingJob() V8_NOEXCEPT = default;
void Start(Heap* heap);
void ScheduleTask(Heap* heap);
void ScheduleTask(Heap* heap, TaskType task_type = TaskType::kNormal);
private:
class Task;
static constexpr double kDelayInSeconds = 10.0 / 1000.0;
bool task_pending_ = false;
bool IsTaskPending(TaskType task_type) {
return task_type == TaskType::kNormal ? normal_task_pending_
: delayed_task_pending_;
}
void SetTaskPending(TaskType task_type, bool value) {
if (task_type == TaskType::kNormal) {
normal_task_pending_ = value;
} else {
delayed_task_pending_ = value;
}
}
bool normal_task_pending_ = false;
bool delayed_task_pending_ = false;
};
} // namespace internal
} // namespace v8

View File

@ -39,7 +39,7 @@ void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
RuntimeCallTimerScope runtime_timer(
heap->isolate(),
RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
incremental_marking_.AdvanceOnAllocation();
// AdvanceIncrementalMarkingOnAllocation can start incremental marking.
incremental_marking_.EnsureBlackAllocated(addr, size);
}
@ -51,7 +51,9 @@ IncrementalMarking::IncrementalMarking(
marking_worklist_(marking_worklist),
weak_objects_(weak_objects),
initial_old_generation_size_(0),
bytes_marked_ahead_of_schedule_(0),
bytes_marked_(0),
scheduled_bytes_to_mark_(0),
schedule_update_time_ms_(0),
bytes_marked_concurrently_(0),
unscanned_bytes_of_large_object_(0),
is_compacting_(false),
@ -59,7 +61,6 @@ IncrementalMarking::IncrementalMarking(
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
trace_wrappers_toggle_(false),
request_type_(NONE),
new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
@ -307,8 +308,9 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
bytes_allocated_ = 0;
bytes_marked_ahead_of_schedule_ = 0;
bytes_marked_ = 0;
scheduled_bytes_to_mark_ = 0;
schedule_update_time_ms_ = start_time_ms_;
bytes_marked_concurrently_ = 0;
should_hurry_ = false;
was_activated_ = true;
@ -703,8 +705,7 @@ void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
size_t dead_bytes_in_new_space) {
if (!IsMarking()) return;
bytes_marked_ahead_of_schedule_ -=
Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
}
bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject obj) {
@ -787,7 +788,9 @@ intptr_t IncrementalMarking::ProcessMarkingWorklist(
return bytes_processed;
}
void IncrementalMarking::EmbedderStep(double duration_ms) {
StepResult IncrementalMarking::EmbedderStep(double duration_ms) {
if (!ShouldDoEmbedderStep()) return StepResult::kDone;
constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
@ -813,6 +816,7 @@ void IncrementalMarking::EmbedderStep(double duration_ms) {
} while (!empty_worklist &&
(heap_->MonotonicallyIncreasingTimeInMs() < deadline));
heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
return empty_worklist ? StepResult::kDone : StepResult::kMoreWorkRemaining;
}
void IncrementalMarking::Hurry() {
@ -927,7 +931,54 @@ bool IncrementalMarking::ShouldDoEmbedderStep() {
heap_->local_embedder_heap_tracer()->InUse();
}
double IncrementalMarking::AdvanceIncrementalMarking(
void IncrementalMarking::FastForwardSchedule() {
if (scheduled_bytes_to_mark_ < bytes_marked_) {
scheduled_bytes_to_mark_ = bytes_marked_;
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Fast-forwarded schedule\n");
}
}
}
void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
// Consider marking close to finalization if 75% of the initial old
// generation was marked.
if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
FastForwardSchedule();
}
}
void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
// Time interval that should be sufficient to complete incremental marking.
constexpr double kTargetMarkingWallTimeInMs = 500;
constexpr double kMinTimeBetweenScheduleInMs = 10;
if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
double delta_ms =
Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
schedule_update_time_ms_ = time_ms;
size_t bytes_to_mark =
(delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
AddScheduledBytesToMark(bytes_to_mark);
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scheduled %" PRIuS
"KB to mark based on time delta %.1fms\n",
bytes_to_mark / KB, delta_ms);
}
}
namespace {
StepResult CombineStepResults(StepResult a, StepResult b) {
if (a == StepResult::kDone && b == StepResult::kDone)
return StepResult::kDone;
return StepResult::kMoreWorkRemaining;
}
} // anonymous namespace
StepResult IncrementalMarking::AdvanceWithDeadline(
double deadline_in_ms, CompletionAction completion_action,
StepOrigin step_origin) {
HistogramTimerScope incremental_marking_scope(
@ -936,26 +987,24 @@ double IncrementalMarking::AdvanceIncrementalMarking(
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
DCHECK(!IsStopped());
ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
FastForwardScheduleIfCloseToFinalization();
double remaining_time_in_ms = 0.0;
StepResult result;
do {
if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
EmbedderStep(kStepSizeInMs);
} else {
const intptr_t step_size_in_bytes =
GCIdleTimeHandler::EstimateMarkingStepSize(
kStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
Step(step_size_in_bytes, completion_action, step_origin);
}
trace_wrappers_toggle_ = !trace_wrappers_toggle_;
StepResult embedder_result = EmbedderStep(kStepSizeInMs / 2);
StepResult v8_result =
V8Step(kStepSizeInMs / 2, completion_action, step_origin);
result = CombineStepResults(v8_result, embedder_result);
remaining_time_in_ms =
deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
} while (remaining_time_in_ms > kStepSizeInMs && !IsComplete() &&
!marking_worklist()->IsEmpty());
return remaining_time_in_ms;
!marking_worklist()->IsEmpty() &&
result == StepResult::kMoreWorkRemaining);
return result;
}
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
@ -974,9 +1023,9 @@ void IncrementalMarking::FinalizeSweeping() {
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
// Update bytes_allocated_ based on the allocation counter.
size_t current_counter = heap_->OldGenerationAllocationCounter();
bytes_allocated_ += current_counter - old_generation_allocation_counter_;
size_t result = current_counter - old_generation_allocation_counter_;
old_generation_allocation_counter_ = current_counter;
return bytes_allocated_;
return result;
}
size_t IncrementalMarking::StepSizeToMakeProgress() {
@ -994,72 +1043,89 @@ size_t IncrementalMarking::StepSizeToMakeProgress() {
kMaxStepSizeInByte);
}
void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
return;
}
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double embedder_step_time_ms = 0.0;
if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
double start = heap_->MonotonicallyIncreasingTimeInMs();
EmbedderStep(kMaxStepSizeInMs);
embedder_step_time_ms = heap_->MonotonicallyIncreasingTimeInMs() - start;
}
trace_wrappers_toggle_ = !trace_wrappers_toggle_;
size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes &&
embedder_step_time_ms < kMaxStepSizeInMs) {
StepOnAllocation(bytes_to_process,
kMaxStepSizeInMs - embedder_step_time_ms);
void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
// The overflow case.
scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
} else {
scheduled_bytes_to_mark_ += bytes_to_mark;
}
}
void IncrementalMarking::StepOnAllocation(size_t bytes_to_process,
double max_step_size) {
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
max_step_size,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, step_size);
size_t bytes_processed = 0;
void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
size_t progress_bytes = StepSizeToMakeProgress();
size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
size_t bytes_to_mark = progress_bytes + allocation_bytes;
AddScheduledBytesToMark(bytes_to_mark);
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Scheduled %" PRIuS
"KB to mark based on allocation (progress="
"%" PRIuS "KB, allocation=%" PRIuS "KB)\n",
bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
}
}
void IncrementalMarking::FetchBytesMarkedConcurrently() {
if (FLAG_concurrent_marking) {
size_t current_bytes_marked_concurrently =
heap()->concurrent_marking()->TotalMarkedBytes();
// The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
// short period of time when a concurrent marking task is finishing.
if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
bytes_marked_ahead_of_schedule_ +=
bytes_marked_ +=
current_bytes_marked_concurrently - bytes_marked_concurrently_;
bytes_marked_concurrently_ = current_bytes_marked_concurrently;
}
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marked %" PRIuS "KB on background threads\n",
heap_->concurrent_marking()->TotalMarkedBytes() / KB);
}
}
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks and concurrently have put us ahead of
// schedule. We skip processing of marking dequeue here and thus shift
// marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed += bytes_to_process;
bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
}
bytes_processed +=
Step(bytes_to_process, GC_VIA_STACK_GUARD, StepOrigin::kV8);
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
CompletionAction action,
StepOrigin step_origin) {
size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
FetchBytesMarkedConcurrently();
if (FLAG_trace_incremental_marking) {
if (scheduled_bytes_to_mark_ > bytes_marked_) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marker is %" PRIuS "KB behind schedule\n",
(scheduled_bytes_to_mark_ - bytes_marked_) / KB);
} else {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marker is %" PRIuS "KB ahead of schedule\n",
(bytes_marked_ - scheduled_bytes_to_mark_) / KB);
}
}
// Allow steps on allocation to get behind the schedule by small ammount.
// This gives higher priority to steps in tasks.
size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
return 0;
return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
}
void IncrementalMarking::AdvanceOnAllocation() {
// Code using an AlwaysAllocateScope assumes that the GC state does not
// change; that implies that no marking steps must be performed.
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
return;
}
HistogramTimerScope incremental_marking_scope(
heap_->isolate()->counters()->gc_incremental_marking());
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
ScheduleBytesToMarkBasedOnAllocation();
V8Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
StepResult IncrementalMarking::V8Step(double max_step_size_in_ms,
CompletionAction action,
StepOrigin step_origin) {
StepResult result = StepResult::kMoreWorkRemaining;
double start = heap_->MonotonicallyIncreasingTimeInMs();
if (state_ == SWEEPING) {
@ -1067,7 +1133,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
FinalizeSweeping();
}
size_t bytes_processed = 0;
size_t bytes_processed = 0, bytes_to_process = 0;
if (state_ == MARKING) {
if (FLAG_concurrent_marking) {
heap_->new_space()->ResetOriginalTop();
@ -1086,18 +1152,34 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
marking_worklist()->Print();
}
#endif
bytes_processed = ProcessMarkingWorklist(bytes_to_process);
if (step_origin == StepOrigin::kTask) {
bytes_marked_ahead_of_schedule_ += bytes_processed;
if (FLAG_trace_incremental_marking) {
heap_->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marking speed %.fKB/ms\n",
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
}
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
max_step_size_in_ms,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
if (bytes_to_process == 0) {
result = StepResult::kDone;
}
bytes_processed =
ProcessMarkingWorklist(Max(bytes_to_process, kMinStepSizeInBytes));
bytes_marked_ += bytes_processed;
if (marking_worklist()->IsEmpty()) {
if (heap_->local_embedder_heap_tracer()
->ShouldFinalizeIncrementalMarking()) {
if (!finalize_marking_completed_) {
FinalizeMarking(action);
FastForwardSchedule();
result = StepResult::kMoreWorkRemaining;
incremental_marking_job()->Start(heap_);
} else {
MarkingComplete(action);
}
@ -1122,12 +1204,7 @@ size_t IncrementalMarking::Step(size_t bytes_to_process,
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
bytes_processed / KB, bytes_to_process / KB, duration);
}
if (FLAG_trace_concurrent_marking) {
heap_->isolate()->PrintWithTimestamp(
"Concurrently marked %" PRIuS "KB\n",
heap_->concurrent_marking()->TotalMarkedBytes() / KB);
}
return bytes_processed;
return result;
}
} // namespace internal

View File

@ -20,6 +20,7 @@ class Object;
class PagedSpace;
enum class StepOrigin { kV8, kTask };
enum class StepResult { kDone, kMoreWorkRemaining };
class V8_EXPORT_PRIVATE IncrementalMarking {
public:
@ -70,8 +71,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static const size_t kOldGenerationAllocatedThreshold = 256 * KB;
static const size_t kMinStepSizeInBytes = 64 * KB;
static const int kStepSizeInMs = 1;
static const int kMaxStepSizeInMs = 5;
static constexpr double kStepSizeInMs = 1;
static constexpr double kMaxStepSizeInMs = 5;
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
@ -164,21 +165,20 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
void Epilogue();
// Performs incremental marking steps until deadline_in_ms is reached. It
// returns the remaining time that cannot be used for incremental marking
// anymore because a single step would exceed the deadline.
double AdvanceIncrementalMarking(double deadline_in_ms,
CompletionAction completion_action,
StepOrigin step_origin);
// Performs incremental marking steps and returns before the deadline_in_ms is
// reached. It may return earlier if the marker is already ahead of the
// marking schedule, which is indicated with StepResult::kDone.
StepResult AdvanceWithDeadline(double deadline_in_ms,
CompletionAction completion_action,
StepOrigin step_origin);
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,
StepOrigin step_origin);
void StepOnAllocation(size_t bytes_to_process, double max_step_size);
StepResult V8Step(double max_step_size_in_ms, CompletionAction action,
StepOrigin step_origin);
bool ShouldDoEmbedderStep();
void EmbedderStep(double duration);
StepResult EmbedderStep(double duration);
inline void RestartIfNotMarking();
@ -290,12 +290,30 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
// Visits the object and returns its size.
V8_INLINE int VisitObject(Map map, HeapObject obj);
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
// Updates scheduled_bytes_to_mark_ to ensure marking progress based on
// time.
void ScheduleBytesToMarkBasedOnTime(double time_ms);
// Updates scheduled_bytes_to_mark_ to ensure marking progress based on
// allocations.
void ScheduleBytesToMarkBasedOnAllocation();
// Helper functions for ScheduleBytesToMarkBasedOnAllocation.
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
void AddScheduledBytesToMark(size_t bytes_to_mark);
// Schedules more bytes to mark so that the marker is no longer ahead
// of schedule.
void FastForwardSchedule();
void FastForwardScheduleIfCloseToFinalization();
// Fetches marked byte counters from the concurrent marker.
void FetchBytesMarkedConcurrently();
// Returns the bytes to mark in the current step based on the scheduled
// bytes and already marked bytes.
size_t ComputeStepSizeInBytes(StepOrigin step_origin);
void AdvanceOnAllocation();
void SetState(State s) {
state_ = s;
@ -309,8 +327,9 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
double start_time_ms_;
size_t initial_old_generation_size_;
size_t old_generation_allocation_counter_;
size_t bytes_allocated_;
size_t bytes_marked_ahead_of_schedule_;
size_t bytes_marked_;
size_t scheduled_bytes_to_mark_;
double schedule_update_time_ms_;
// A sample of concurrent_marking()->TotalMarkedBytes() at the last
// incremental marking step. It is used for updating
// bytes_marked_ahead_of_schedule_ with contribution of concurrent marking.
@ -325,7 +344,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool was_activated_;
bool black_allocation_;
bool finalize_marking_completed_;
bool trace_wrappers_toggle_;
IncrementalMarkingJob incremental_marking_job_;
GCRequestType request_type_;

View File

@ -86,7 +86,7 @@ void MemoryReducer::NotifyTimer(const Event& event) {
const int kIncrementalMarkingDelayMs = 500;
double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
kIncrementalMarkingDelayMs;
heap()->incremental_marking()->AdvanceIncrementalMarking(
heap()->incremental_marking()->AdvanceWithDeadline(
deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kTask);
heap()->FinalizeIncrementalMarkingIfComplete(

View File

@ -153,6 +153,7 @@ void SimulateFullSpace(v8::internal::NewSpace* space,
}
void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
const double kStepSizeInMs = 100;
CHECK(FLAG_incremental_marking);
i::IncrementalMarking* marking = heap->incremental_marking();
i::MarkCompactCollector* collector = heap->mark_compact_collector();
@ -171,8 +172,8 @@ void SimulateIncrementalMarking(i::Heap* heap, bool force_completion) {
if (!force_completion) return;
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
i::StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}

View File

@ -2269,11 +2269,12 @@ TEST(InstanceOfStubWriteBarrier) {
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
const double kStepSizeInMs = 100;
while (!marking_state->IsBlack(f->code()) && !marking->IsStopped()) {
// Discard any pending GC requests otherwise we will get GC when we enter
// code below.
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
}
CHECK(marking->IsMarking());
@ -2364,9 +2365,10 @@ TEST(IdleNotificationFinishMarking) {
CHECK_EQ(CcTest::heap()->gc_count(), initial_gc_count);
const double kStepSizeInMs = 100;
do {
marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
} while (
!CcTest::heap()->mark_compact_collector()->marking_worklist()->IsEmpty());
@ -3577,8 +3579,6 @@ TEST(LargeObjectSlotRecording) {
// Start incremental marking to active write barrier.
heap::SimulateIncrementalMarking(heap, false);
heap->incremental_marking()->AdvanceIncrementalMarking(
10000000, IncrementalMarking::NO_GC_VIA_STACK_GUARD, StepOrigin::kV8);
// Create references from the large object to the object on the evacuation
// candidate.
@ -3588,6 +3588,8 @@ TEST(LargeObjectSlotRecording) {
CHECK(lo->get(i) == old_location);
}
heap::SimulateIncrementalMarking(heap, true);
// Move the evaucation candidate object.
CcTest::CollectAllGarbage();
@ -3641,9 +3643,7 @@ TEST(IncrementalMarkingStepMakesBigProgressWithLargeObjects) {
CcTest::heap()->StartIncrementalMarking(
i::Heap::kNoGCFlags, i::GarbageCollectionReason::kTesting);
}
// This big step should be sufficient to mark the whole array.
marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
heap::SimulateIncrementalMarking(CcTest::heap());
CHECK(marking->IsComplete() ||
marking->IsReadyToOverApproximateWeakClosure());
}
@ -4808,12 +4808,7 @@ TEST(Regress3631) {
Handle<JSReceiver> obj =
v8::Utils::OpenHandle(*v8::Local<v8::Object>::Cast(result));
Handle<JSWeakCollection> weak_map(JSWeakCollection::cast(*obj), isolate);
HeapObject weak_map_table = HeapObject::cast(weak_map->table());
IncrementalMarking::MarkingState* marking_state = marking->marking_state();
while (!marking_state->IsBlack(weak_map_table) && !marking->IsStopped()) {
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
}
SimulateIncrementalMarking(heap);
// Stash the backing store in a handle.
Handle<Object> save(weak_map->table(), isolate);
// The following line will update the backing store.
@ -5391,9 +5386,11 @@ TEST(Regress598319) {
// Now we search for a state where we are in incremental marking and have
// only partially marked the large object.
const double kSmallStepSizeInMs = 0.1;
while (!marking->IsComplete()) {
marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kSmallStepSizeInMs,
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
CHECK_NE(page->progress_bar(), arr.get()->Size());
{
@ -5409,9 +5406,11 @@ TEST(Regress598319) {
}
// Finish marking with bigger steps to speed up test.
const double kLargeStepSizeInMs = 1000;
while (!marking->IsComplete()) {
marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kLargeStepSizeInMs,
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@ -5491,9 +5490,10 @@ TEST(Regress615489) {
v8::HandleScope inner(CcTest::isolate());
isolate->factory()->NewFixedArray(500, TENURED)->Size();
}
const double kStepSizeInMs = 100;
while (!marking->IsComplete()) {
marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@ -5550,10 +5550,11 @@ TEST(Regress631969) {
CcTest::CollectGarbage(NEW_SPACE);
// Finish incremental marking.
const double kStepSizeInMs = 100;
IncrementalMarking* marking = heap->incremental_marking();
while (!marking->IsComplete()) {
marking->Step(MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
marking->V8Step(kStepSizeInMs, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
if (marking->IsReadyToOverApproximateWeakClosure()) {
marking->FinalizeIncrementally();
}
@ -5969,7 +5970,7 @@ HEAP_TEST(Regress670675) {
}
if (marking->IsStopped()) break;
double deadline = heap->MonotonicallyIncreasingTimeInMs() + 1;
marking->AdvanceIncrementalMarking(
marking->AdvanceWithDeadline(
deadline, IncrementalMarking::GC_VIA_STACK_GUARD, StepOrigin::kV8);
}
DCHECK(marking->IsStopped());

View File

@ -71,7 +71,7 @@ class MockPlatform : public TestPlatform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
UNREACHABLE();
task_ = std::move(task);
};
void PostIdleTask(std::unique_ptr<IdleTask> task) override {

View File

@ -59,7 +59,7 @@ class MockPlatform final : public TestPlatform {
void PostDelayedTask(std::unique_ptr<Task> task,
double delay_in_seconds) override {
UNREACHABLE();
tasks_.push(std::move(task));
};
void PostIdleTask(std::unique_ptr<IdleTask> task) override {