[heap] New heuristics for incremental marking step size.
This patch simplifies code for speeding up marking and removes write barrier counter. The step size is now computed based in two parts: - bytes to mark in order to keep up with allocation, - bytes to mark in order to make progress. BUG=chromium:616434, chromium:646139, chromium:644819 LOG=NO Review-Url: https://codereview.chromium.org/2359903002 Cr-Commit-Position: refs/heads/master@{#39827}
This commit is contained in:
parent
4c2fd5cd5f
commit
1beb89f24c
@ -3238,16 +3238,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
||||
Label need_incremental;
|
||||
Label need_incremental_pop_scratch;
|
||||
|
||||
__ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
|
||||
__ ldr(regs_.scratch1(),
|
||||
MemOperand(regs_.scratch0(),
|
||||
MemoryChunk::kWriteBarrierCounterOffset));
|
||||
__ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
|
||||
__ str(regs_.scratch1(),
|
||||
MemOperand(regs_.scratch0(),
|
||||
MemoryChunk::kWriteBarrierCounterOffset));
|
||||
__ b(mi, &need_incremental);
|
||||
|
||||
// Let's look at the color of the object: If it is not black we don't have
|
||||
// to inform the incremental marker.
|
||||
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
|
||||
|
@ -3155,16 +3155,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
||||
Label need_incremental;
|
||||
Label need_incremental_pop_scratch;
|
||||
|
||||
Register mem_chunk = regs_.scratch0();
|
||||
Register counter = regs_.scratch1();
|
||||
__ Bic(mem_chunk, regs_.object(), Page::kPageAlignmentMask);
|
||||
__ Ldr(counter,
|
||||
MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
|
||||
__ Subs(counter, counter, 1);
|
||||
__ Str(counter,
|
||||
MemOperand(mem_chunk, MemoryChunk::kWriteBarrierCounterOffset));
|
||||
__ B(mi, &need_incremental);
|
||||
|
||||
// If the object is not black we don't have to inform the incremental marker.
|
||||
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
|
||||
|
||||
|
@ -115,7 +115,6 @@ Heap::Heap()
|
||||
inline_allocation_disabled_(false),
|
||||
total_regexp_code_generated_(0),
|
||||
tracer_(nullptr),
|
||||
high_survival_rate_period_length_(0),
|
||||
promoted_objects_size_(0),
|
||||
promotion_ratio_(0),
|
||||
semi_space_copied_object_size_(0),
|
||||
@ -141,7 +140,7 @@ Heap::Heap()
|
||||
full_codegen_bytes_generated_(0),
|
||||
crankshaft_codegen_bytes_generated_(0),
|
||||
new_space_allocation_counter_(0),
|
||||
old_generation_allocation_counter_(0),
|
||||
old_generation_allocation_counter_at_last_gc_(0),
|
||||
old_generation_size_at_last_gc_(0),
|
||||
gcs_since_last_deopt_(0),
|
||||
global_pretenuring_feedback_(nullptr),
|
||||
@ -1269,11 +1268,6 @@ void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
|
||||
|
||||
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
|
||||
tracer()->AddSurvivalRatio(survival_rate);
|
||||
if (survival_rate > kYoungSurvivalRateHighThreshold) {
|
||||
high_survival_rate_period_length_++;
|
||||
} else {
|
||||
high_survival_rate_period_length_ = 0;
|
||||
}
|
||||
}
|
||||
|
||||
bool Heap::PerformGarbageCollection(
|
||||
@ -1310,13 +1304,6 @@ bool Heap::PerformGarbageCollection(
|
||||
|
||||
int start_new_space_size = static_cast<int>(Heap::new_space()->Size());
|
||||
|
||||
if (IsHighSurvivalRate()) {
|
||||
// We speed up the incremental marker if it is running so that it
|
||||
// does not fall behind the rate of promotion, which would cause a
|
||||
// constantly growing old space.
|
||||
incremental_marking()->NotifyOfHighPromotionRate();
|
||||
}
|
||||
|
||||
{
|
||||
Heap::PretenuringScope pretenuring_scope(this);
|
||||
|
||||
@ -1327,7 +1314,7 @@ bool Heap::PerformGarbageCollection(
|
||||
old_generation_size_configured_ = true;
|
||||
// This should be updated before PostGarbageCollectionProcessing, which
|
||||
// can cause another GC. Take into account the objects promoted during GC.
|
||||
old_generation_allocation_counter_ +=
|
||||
old_generation_allocation_counter_at_last_gc_ +=
|
||||
static_cast<size_t>(promoted_objects_size_);
|
||||
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
|
||||
} else {
|
||||
|
@ -1387,16 +1387,18 @@ class Heap {
|
||||
}
|
||||
|
||||
void UpdateOldGenerationAllocationCounter() {
|
||||
old_generation_allocation_counter_ = OldGenerationAllocationCounter();
|
||||
old_generation_allocation_counter_at_last_gc_ =
|
||||
OldGenerationAllocationCounter();
|
||||
}
|
||||
|
||||
size_t OldGenerationAllocationCounter() {
|
||||
return old_generation_allocation_counter_ + PromotedSinceLastGC();
|
||||
return old_generation_allocation_counter_at_last_gc_ +
|
||||
PromotedSinceLastGC();
|
||||
}
|
||||
|
||||
// This should be used only for testing.
|
||||
void set_old_generation_allocation_counter(size_t new_value) {
|
||||
old_generation_allocation_counter_ = new_value;
|
||||
void set_old_generation_allocation_counter_at_last_gc(size_t new_value) {
|
||||
old_generation_allocation_counter_at_last_gc_ = new_value;
|
||||
}
|
||||
|
||||
size_t PromotedSinceLastGC() {
|
||||
@ -1723,10 +1725,6 @@ class Heap {
|
||||
// Flush the number to string cache.
|
||||
void FlushNumberStringCache();
|
||||
|
||||
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
|
||||
// Re-visit incremental marking heuristics.
|
||||
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
|
||||
|
||||
void ConfigureInitialOldGenerationSize();
|
||||
|
||||
bool HasLowYoungGenerationAllocationRate();
|
||||
@ -2201,7 +2199,6 @@ class Heap {
|
||||
|
||||
GCTracer* tracer_;
|
||||
|
||||
int high_survival_rate_period_length_;
|
||||
intptr_t promoted_objects_size_;
|
||||
double promotion_ratio_;
|
||||
double promotion_rate_;
|
||||
@ -2260,7 +2257,7 @@ class Heap {
|
||||
// This counter is increased before each GC and never reset. To
|
||||
// account for the bytes allocated since the last GC, use the
|
||||
// OldGenerationAllocationCounter() function.
|
||||
size_t old_generation_allocation_counter_;
|
||||
size_t old_generation_allocation_counter_at_last_gc_;
|
||||
|
||||
// The size of objects in old generation after the last MarkCompact GC.
|
||||
size_t old_generation_size_at_last_gc_;
|
||||
|
@ -21,26 +21,20 @@ namespace internal {
|
||||
|
||||
IncrementalMarking::IncrementalMarking(Heap* heap)
|
||||
: heap_(heap),
|
||||
observer_(*this, kAllocatedThreshold),
|
||||
state_(STOPPED),
|
||||
is_compacting_(false),
|
||||
steps_count_(0),
|
||||
old_generation_space_available_at_start_of_incremental_(0),
|
||||
old_generation_space_used_at_start_of_incremental_(0),
|
||||
bytes_rescanned_(0),
|
||||
should_hurry_(false),
|
||||
marking_speed_(0),
|
||||
bytes_scanned_(0),
|
||||
allocated_(0),
|
||||
write_barriers_invoked_since_last_step_(0),
|
||||
initial_old_generation_size_(0),
|
||||
bytes_marked_ahead_of_schedule_(0),
|
||||
idle_marking_delay_counter_(0),
|
||||
unscanned_bytes_of_large_object_(0),
|
||||
idle_marking_delay_counter_(0),
|
||||
incremental_marking_finalization_rounds_(0),
|
||||
is_compacting_(false),
|
||||
should_hurry_(false),
|
||||
was_activated_(false),
|
||||
black_allocation_(false),
|
||||
finalize_marking_completed_(false),
|
||||
incremental_marking_finalization_rounds_(0),
|
||||
request_type_(NONE) {}
|
||||
request_type_(NONE),
|
||||
new_generation_observer_(*this, kAllocatedThreshold),
|
||||
old_generation_observer_(*this, kAllocatedThreshold) {}
|
||||
|
||||
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
|
||||
HeapObject* value_heap_obj = HeapObject::cast(value);
|
||||
@ -71,19 +65,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
|
||||
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
|
||||
Isolate* isolate) {
|
||||
DCHECK(obj->IsHeapObject());
|
||||
IncrementalMarking* marking = isolate->heap()->incremental_marking();
|
||||
|
||||
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
|
||||
int counter = chunk->write_barrier_counter();
|
||||
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
|
||||
marking->write_barriers_invoked_since_last_step_ +=
|
||||
MemoryChunk::kWriteBarrierCounterGranularity -
|
||||
chunk->write_barrier_counter();
|
||||
chunk->set_write_barrier_counter(
|
||||
MemoryChunk::kWriteBarrierCounterGranularity);
|
||||
}
|
||||
|
||||
marking->RecordWrite(obj, slot, *slot);
|
||||
isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
|
||||
}
|
||||
|
||||
// static
|
||||
@ -462,21 +444,6 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
|
||||
}
|
||||
|
||||
|
||||
void IncrementalMarking::NotifyOfHighPromotionRate() {
|
||||
if (IsMarking()) {
|
||||
if (marking_speed_ < kFastMarking) {
|
||||
if (FLAG_trace_gc) {
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"Increasing marking speed to %d "
|
||||
"due to high promotion rate\n",
|
||||
static_cast<int>(kFastMarking));
|
||||
}
|
||||
marking_speed_ = kFastMarking;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
static void PatchIncrementalMarkingRecordWriteStubs(
|
||||
Heap* heap, RecordWriteStub::Mode mode) {
|
||||
UnseededNumberDictionary* stubs = heap->code_stubs();
|
||||
@ -523,9 +490,14 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
|
||||
HistogramTimerScope incremental_marking_scope(
|
||||
counters->gc_incremental_marking_start());
|
||||
TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
|
||||
ResetStepCounters();
|
||||
heap_->tracer()->NotifyIncrementalMarkingStart();
|
||||
|
||||
start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
|
||||
initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
|
||||
old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
|
||||
bytes_allocated_ = 0;
|
||||
bytes_marked_ahead_of_schedule_ = 0;
|
||||
should_hurry_ = false;
|
||||
was_activated_ = true;
|
||||
|
||||
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
|
||||
@ -538,7 +510,15 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
|
||||
state_ = SWEEPING;
|
||||
}
|
||||
|
||||
heap_->new_space()->AddAllocationObserver(&observer_);
|
||||
SpaceIterator it(heap_);
|
||||
while (it.has_next()) {
|
||||
Space* space = it.next();
|
||||
if (space == heap_->new_space()) {
|
||||
space->AddAllocationObserver(&new_generation_observer_);
|
||||
} else {
|
||||
space->AddAllocationObserver(&old_generation_observer_);
|
||||
}
|
||||
}
|
||||
|
||||
incremental_marking_job()->Start(heap_);
|
||||
}
|
||||
@ -1002,9 +982,17 @@ void IncrementalMarking::Stop() {
|
||||
Max(0, old_generation_size_mb - old_generation_limit_mb));
|
||||
}
|
||||
|
||||
heap_->new_space()->RemoveAllocationObserver(&observer_);
|
||||
SpaceIterator it(heap_);
|
||||
while (it.has_next()) {
|
||||
Space* space = it.next();
|
||||
if (space == heap_->new_space()) {
|
||||
space->RemoveAllocationObserver(&new_generation_observer_);
|
||||
} else {
|
||||
space->RemoveAllocationObserver(&old_generation_observer_);
|
||||
}
|
||||
}
|
||||
|
||||
IncrementalMarking::set_should_hurry(false);
|
||||
ResetStepCounters();
|
||||
if (IsMarking()) {
|
||||
PatchIncrementalMarkingRecordWriteStubs(heap_,
|
||||
RecordWriteStub::STORE_BUFFER_ONLY);
|
||||
@ -1082,82 +1070,6 @@ double IncrementalMarking::AdvanceIncrementalMarking(
|
||||
}
|
||||
|
||||
|
||||
void IncrementalMarking::SpeedUp() {
|
||||
bool speed_up = false;
|
||||
|
||||
if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Speed up marking after %d steps\n",
|
||||
static_cast<int>(kMarkingSpeedAccellerationInterval));
|
||||
}
|
||||
speed_up = true;
|
||||
}
|
||||
|
||||
bool space_left_is_very_small =
|
||||
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
|
||||
|
||||
bool only_1_nth_of_space_that_was_available_still_left =
|
||||
(SpaceLeftInOldSpace() * (marking_speed_ + 1) <
|
||||
old_generation_space_available_at_start_of_incremental_);
|
||||
|
||||
if (space_left_is_very_small ||
|
||||
only_1_nth_of_space_that_was_available_still_left) {
|
||||
if (FLAG_trace_incremental_marking)
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Speed up marking because of low space left\n");
|
||||
speed_up = true;
|
||||
}
|
||||
|
||||
bool size_of_old_space_multiplied_by_n_during_marking =
|
||||
(heap_->PromotedTotalSize() >
|
||||
(marking_speed_ + 1) *
|
||||
old_generation_space_used_at_start_of_incremental_);
|
||||
if (size_of_old_space_multiplied_by_n_during_marking) {
|
||||
speed_up = true;
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Speed up marking because of heap size "
|
||||
"increase\n");
|
||||
}
|
||||
}
|
||||
|
||||
int64_t promoted_during_marking =
|
||||
heap_->PromotedTotalSize() -
|
||||
old_generation_space_used_at_start_of_incremental_;
|
||||
intptr_t delay = marking_speed_ * MB;
|
||||
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
|
||||
|
||||
// We try to scan at at least twice the speed that we are allocating.
|
||||
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Speed up marking because marker was not "
|
||||
"keeping up\n");
|
||||
}
|
||||
speed_up = true;
|
||||
}
|
||||
|
||||
if (speed_up) {
|
||||
if (state_ != MARKING) {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Postponing speeding up marking until marking "
|
||||
"starts\n");
|
||||
}
|
||||
} else {
|
||||
marking_speed_ += kMarkingSpeedAccelleration;
|
||||
marking_speed_ = static_cast<int>(
|
||||
Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
heap()->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Marking speed increased to %d\n",
|
||||
marking_speed_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void IncrementalMarking::FinalizeSweeping() {
|
||||
DCHECK(state_ == SWEEPING);
|
||||
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
|
||||
@ -1166,59 +1078,80 @@ void IncrementalMarking::FinalizeSweeping() {
|
||||
heap_->mark_compact_collector()->EnsureSweepingCompleted();
|
||||
}
|
||||
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
|
||||
bytes_scanned_ = 0;
|
||||
StartMarking();
|
||||
}
|
||||
}
|
||||
|
||||
void IncrementalMarking::NotifyAllocatedBytes(intptr_t allocated_bytes) {
|
||||
size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
|
||||
// Update bytes_allocated_ based on the allocation counter.
|
||||
size_t current_counter = heap_->OldGenerationAllocationCounter();
|
||||
bytes_allocated_ += current_counter - old_generation_allocation_counter_;
|
||||
old_generation_allocation_counter_ = current_counter;
|
||||
return bytes_allocated_;
|
||||
}
|
||||
|
||||
size_t IncrementalMarking::StepSizeToMakeProgress() {
|
||||
// We increase step size gradually based on the time passed in order to
|
||||
// leave marking work to standalone tasks. The ramp up duration and the
|
||||
// target step count are chosen based on benchmarks.
|
||||
const int kRampUpIntervalMs = 300;
|
||||
const size_t kTargetStepCount = 128;
|
||||
size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
|
||||
IncrementalMarking::kAllocatedThreshold);
|
||||
double time_passed_ms =
|
||||
heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
|
||||
double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
|
||||
return static_cast<size_t>(factor * step_size);
|
||||
}
|
||||
|
||||
void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
|
||||
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
|
||||
(state_ != SWEEPING && state_ != MARKING)) {
|
||||
return;
|
||||
}
|
||||
|
||||
allocated_ += allocated_bytes;
|
||||
size_t bytes_to_process =
|
||||
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
|
||||
|
||||
if (allocated_ >= kAllocatedThreshold ||
|
||||
write_barriers_invoked_since_last_step_ >=
|
||||
kWriteBarriersInvokedThreshold) {
|
||||
// The marking speed is driven either by the allocation rate or by the rate
|
||||
// at which we are having to check the color of objects in the write
|
||||
// barrier.
|
||||
// It is possible for a tight non-allocating loop to run a lot of write
|
||||
// barriers before we get here and check them (marking can only take place
|
||||
// on
|
||||
// allocation), so to reduce the lumpiness we don't use the write barriers
|
||||
// invoked since last step directly to determine the amount of work to do.
|
||||
intptr_t bytes_to_process =
|
||||
marking_speed_ *
|
||||
Max(allocated_, write_barriers_invoked_since_last_step_);
|
||||
Step(bytes_to_process, GC_VIA_STACK_GUARD, FORCE_COMPLETION,
|
||||
StepOrigin::kV8);
|
||||
if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
|
||||
// The first step after Scavenge will see many allocated bytes.
|
||||
// Cap the step size to distribute the marking work more uniformly.
|
||||
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
|
||||
kMaxStepSizeInMs,
|
||||
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
|
||||
bytes_to_process = Min(bytes_to_process, max_step_size);
|
||||
|
||||
intptr_t bytes_processed = 0;
|
||||
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
|
||||
// Steps performed in tasks have put us ahead of schedule.
|
||||
// We skip processing of marking dequeue here and thus
|
||||
// shift marking time from inside V8 to standalone tasks.
|
||||
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
|
||||
bytes_processed = bytes_to_process;
|
||||
} else {
|
||||
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
|
||||
FORCE_COMPLETION, StepOrigin::kV8);
|
||||
}
|
||||
bytes_allocated_ -= Min(bytes_allocated_, bytes_to_process);
|
||||
}
|
||||
}
|
||||
|
||||
void IncrementalMarking::Step(intptr_t bytes_to_process,
|
||||
CompletionAction action,
|
||||
ForceCompletionAction completion,
|
||||
StepOrigin step_origin) {
|
||||
size_t IncrementalMarking::Step(size_t bytes_to_process,
|
||||
CompletionAction action,
|
||||
ForceCompletionAction completion,
|
||||
StepOrigin step_origin) {
|
||||
HistogramTimerScope incremental_marking_scope(
|
||||
heap_->isolate()->counters()->gc_incremental_marking());
|
||||
TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
|
||||
double start = heap_->MonotonicallyIncreasingTimeInMs();
|
||||
|
||||
bytes_scanned_ += bytes_to_process;
|
||||
|
||||
allocated_ = 0;
|
||||
write_barriers_invoked_since_last_step_ = 0;
|
||||
|
||||
if (state_ == SWEEPING) {
|
||||
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
|
||||
FinalizeSweeping();
|
||||
}
|
||||
|
||||
intptr_t bytes_processed = 0;
|
||||
size_t bytes_processed = 0;
|
||||
if (state_ == MARKING) {
|
||||
const bool incremental_wrapper_tracing =
|
||||
FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
|
||||
@ -1228,17 +1161,9 @@ void IncrementalMarking::Step(intptr_t bytes_to_process,
|
||||
heap_->mark_compact_collector()->marking_deque()->IsEmpty());
|
||||
bool wrapper_work_left = incremental_wrapper_tracing;
|
||||
if (!process_wrappers) {
|
||||
if (step_origin == StepOrigin::kV8 &&
|
||||
bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
|
||||
// Steps performed in tasks have put us ahead of schedule.
|
||||
// We skip processing of marking dequeue here and thus
|
||||
// shift marking time from inside V8 to standalone tasks.
|
||||
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
|
||||
} else {
|
||||
bytes_processed = ProcessMarkingDeque(bytes_to_process);
|
||||
if (step_origin == StepOrigin::kTask) {
|
||||
bytes_marked_ahead_of_schedule_ += bytes_processed;
|
||||
}
|
||||
bytes_processed = ProcessMarkingDeque(bytes_to_process);
|
||||
if (step_origin == StepOrigin::kTask) {
|
||||
bytes_marked_ahead_of_schedule_ += bytes_processed;
|
||||
}
|
||||
} else {
|
||||
const double wrapper_deadline =
|
||||
@ -1267,12 +1192,6 @@ void IncrementalMarking::Step(intptr_t bytes_to_process,
|
||||
}
|
||||
}
|
||||
|
||||
steps_count_++;
|
||||
|
||||
// Speed up marking if we are marking too slow or if we are almost done
|
||||
// with marking.
|
||||
SpeedUp();
|
||||
|
||||
double end = heap_->MonotonicallyIncreasingTimeInMs();
|
||||
double duration = (end - start);
|
||||
// Note that we report zero bytes here when sweeping was in progress or
|
||||
@ -1281,30 +1200,11 @@ void IncrementalMarking::Step(intptr_t bytes_to_process,
|
||||
heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
heap_->isolate()->PrintWithTimestamp(
|
||||
"[IncrementalMarking] Step %s %d bytes (%d) in %.1f\n",
|
||||
step_origin == StepOrigin::kV8 ? "in v8" : "in task",
|
||||
static_cast<int>(bytes_processed), static_cast<int>(bytes_to_process),
|
||||
duration);
|
||||
"[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
|
||||
step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
|
||||
bytes_to_process, duration);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void IncrementalMarking::ResetStepCounters() {
|
||||
steps_count_ = 0;
|
||||
old_generation_space_available_at_start_of_incremental_ =
|
||||
SpaceLeftInOldSpace();
|
||||
old_generation_space_used_at_start_of_incremental_ =
|
||||
heap_->PromotedTotalSize();
|
||||
bytes_rescanned_ = 0;
|
||||
marking_speed_ = kInitialMarkingSpeed;
|
||||
bytes_scanned_ = 0;
|
||||
write_barriers_invoked_since_last_step_ = 0;
|
||||
bytes_marked_ahead_of_schedule_ = 0;
|
||||
}
|
||||
|
||||
|
||||
int64_t IncrementalMarking::SpaceLeftInOldSpace() {
|
||||
return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
|
||||
return bytes_processed;
|
||||
}
|
||||
|
||||
|
||||
|
@ -108,31 +108,19 @@ class IncrementalMarking {
|
||||
// incremental marker until it completes.
|
||||
// Do some marking every time this much memory has been allocated or that many
|
||||
// heavy (color-checking) write barriers have been invoked.
|
||||
static const intptr_t kAllocatedThreshold = 65536;
|
||||
static const intptr_t kWriteBarriersInvokedThreshold = 32768;
|
||||
// Start off by marking this many times more memory than has been allocated.
|
||||
static const intptr_t kInitialMarkingSpeed = 1;
|
||||
// But if we are promoting a lot of data we need to mark faster to keep up
|
||||
// with the data that is entering the old space through promotion.
|
||||
static const intptr_t kFastMarking = 3;
|
||||
// After this many steps we increase the marking/allocating factor.
|
||||
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
|
||||
// This is how much we increase the marking/allocating factor by.
|
||||
static const intptr_t kMarkingSpeedAccelleration = 2;
|
||||
static const intptr_t kMaxMarkingSpeed = 1000;
|
||||
static const size_t kAllocatedThreshold = 64 * KB;
|
||||
|
||||
static const intptr_t kStepSizeInMs = 1;
|
||||
static const int kStepSizeInMs = 1;
|
||||
static const int kMaxStepSizeInMs = 5;
|
||||
|
||||
// This is the upper bound for how many times we allow finalization of
|
||||
// incremental marking to be postponed.
|
||||
static const size_t kMaxIdleMarkingDelayCounter = 3;
|
||||
static const int kMaxIdleMarkingDelayCounter = 3;
|
||||
|
||||
void FinalizeSweeping();
|
||||
|
||||
void NotifyAllocatedBytes(intptr_t allocated_bytes);
|
||||
|
||||
void Step(intptr_t bytes_to_process, CompletionAction action,
|
||||
ForceCompletionAction completion, StepOrigin origin);
|
||||
size_t Step(size_t bytes_to_process, CompletionAction action,
|
||||
ForceCompletionAction completion, StepOrigin step_origin);
|
||||
|
||||
inline void RestartIfNotMarking();
|
||||
|
||||
@ -175,8 +163,6 @@ class IncrementalMarking {
|
||||
|
||||
void ActivateGeneratedStub(Code* stub);
|
||||
|
||||
void NotifyOfHighPromotionRate();
|
||||
|
||||
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
|
||||
unscanned_bytes_of_large_object_ = unscanned_bytes;
|
||||
}
|
||||
@ -235,7 +221,7 @@ class IncrementalMarking {
|
||||
incremental_marking_(incremental_marking) {}
|
||||
|
||||
void Step(int bytes_allocated, Address, size_t) override {
|
||||
incremental_marking_.NotifyAllocatedBytes(bytes_allocated);
|
||||
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
|
||||
}
|
||||
|
||||
private:
|
||||
@ -244,10 +230,6 @@ class IncrementalMarking {
|
||||
|
||||
int64_t SpaceLeftInOldSpace();
|
||||
|
||||
void SpeedUp();
|
||||
|
||||
void ResetStepCounters();
|
||||
|
||||
void StartMarking();
|
||||
|
||||
void StartBlackAllocation();
|
||||
@ -283,38 +265,36 @@ class IncrementalMarking {
|
||||
|
||||
void IncrementIdleMarkingDelayCounter();
|
||||
|
||||
void AdvanceIncrementalMarkingOnAllocation();
|
||||
|
||||
size_t StepSizeToKeepUpWithAllocations();
|
||||
size_t StepSizeToMakeProgress();
|
||||
|
||||
Heap* heap_;
|
||||
|
||||
Observer observer_;
|
||||
|
||||
State state_;
|
||||
bool is_compacting_;
|
||||
|
||||
int steps_count_;
|
||||
int64_t old_generation_space_available_at_start_of_incremental_;
|
||||
int64_t old_generation_space_used_at_start_of_incremental_;
|
||||
int64_t bytes_rescanned_;
|
||||
bool should_hurry_;
|
||||
int marking_speed_;
|
||||
intptr_t bytes_scanned_;
|
||||
intptr_t allocated_;
|
||||
intptr_t write_barriers_invoked_since_last_step_;
|
||||
intptr_t bytes_marked_ahead_of_schedule_;
|
||||
size_t idle_marking_delay_counter_;
|
||||
|
||||
int unscanned_bytes_of_large_object_;
|
||||
|
||||
bool was_activated_;
|
||||
|
||||
bool black_allocation_;
|
||||
|
||||
bool finalize_marking_completed_;
|
||||
double start_time_ms_;
|
||||
size_t initial_old_generation_size_;
|
||||
size_t old_generation_allocation_counter_;
|
||||
size_t bytes_allocated_;
|
||||
size_t bytes_marked_ahead_of_schedule_;
|
||||
size_t unscanned_bytes_of_large_object_;
|
||||
|
||||
int idle_marking_delay_counter_;
|
||||
int incremental_marking_finalization_rounds_;
|
||||
|
||||
bool is_compacting_;
|
||||
bool should_hurry_;
|
||||
bool was_activated_;
|
||||
bool black_allocation_;
|
||||
bool finalize_marking_completed_;
|
||||
|
||||
GCRequestType request_type_;
|
||||
|
||||
IncrementalMarkingJob incremental_marking_job_;
|
||||
Observer new_generation_observer_;
|
||||
Observer old_generation_observer_;
|
||||
|
||||
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
|
||||
};
|
||||
|
@ -3190,17 +3190,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
||||
Mode mode) {
|
||||
Label object_is_black, need_incremental, need_incremental_pop_object;
|
||||
|
||||
__ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
|
||||
__ and_(regs_.scratch0(), regs_.object());
|
||||
__ mov(regs_.scratch1(),
|
||||
Operand(regs_.scratch0(),
|
||||
MemoryChunk::kWriteBarrierCounterOffset));
|
||||
__ sub(regs_.scratch1(), Immediate(1));
|
||||
__ mov(Operand(regs_.scratch0(),
|
||||
MemoryChunk::kWriteBarrierCounterOffset),
|
||||
regs_.scratch1());
|
||||
__ j(negative, &need_incremental);
|
||||
|
||||
// Let's look at the color of the object: If it is not black we don't have
|
||||
// to inform the incremental marker.
|
||||
__ JumpIfBlack(regs_.object(),
|
||||
|
@ -3129,17 +3129,6 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
|
||||
Label need_incremental;
|
||||
Label need_incremental_pop_object;
|
||||
|
||||
__ movp(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
|
||||
__ andp(regs_.scratch0(), regs_.object());
|
||||
__ movp(regs_.scratch1(),
|
||||
Operand(regs_.scratch0(),
|
||||
MemoryChunk::kWriteBarrierCounterOffset));
|
||||
__ subp(regs_.scratch1(), Immediate(1));
|
||||
__ movp(Operand(regs_.scratch0(),
|
||||
MemoryChunk::kWriteBarrierCounterOffset),
|
||||
regs_.scratch1());
|
||||
__ j(negative, &need_incremental);
|
||||
|
||||
// Let's look at the color of the object: If it is not black we don't have
|
||||
// to inform the incremental marker.
|
||||
__ JumpIfBlack(regs_.object(),
|
||||
|
@ -6209,7 +6209,8 @@ TEST(OldSpaceAllocationCounter) {
|
||||
CHECK_LE(kSize, counter4 - counter3);
|
||||
// Test counter overflow.
|
||||
size_t max_counter = -1;
|
||||
heap->set_old_generation_allocation_counter(max_counter - 10 * kSize);
|
||||
heap->set_old_generation_allocation_counter_at_last_gc(max_counter -
|
||||
10 * kSize);
|
||||
size_t start = heap->OldGenerationAllocationCounter();
|
||||
for (int i = 0; i < 20; i++) {
|
||||
AllocateInSpace(isolate, kSize, OLD_SPACE);
|
||||
|
Loading…
Reference in New Issue
Block a user