Revert of [heap] Simplify heuristics for incremental step size. (patchset #5 id:180001 of https://codereview.chromium.org/2290333002/ )

Reason for revert:
Regressed GC times in v8.infinite_scroll_tbmv2 benchmarks.

Original issue's description:
> [heap] Simplify heuristics for incremental step size.
>
> This patch removes the code for speeding up marking.
>
> Now the step size depends on allocated bytes and
> invoked write barriers.
>
> We also ensure that the step size is large enough to
> justify the overhead of interrupting the generated code.
>
> BUG=chromium:616434
> LOG=NO
>
> Committed: https://crrev.com/71a793e4b1026e69d4009df0a419fe260fe1a235
> Cr-Commit-Position: refs/heads/master@{#39067}

TBR=mlippautz@chromium.org
# Skipping CQ checks because original CL landed less than 1 days ago.
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=chromium:616434

Review-Url: https://codereview.chromium.org/2304613002
Cr-Commit-Position: refs/heads/master@{#39098}
This commit is contained in:
ulan 2016-09-01 10:28:46 -07:00 committed by Commit bot
parent 5f8a6ec4b1
commit d09e026a9e
4 changed files with 155 additions and 23 deletions

View File

@ -1307,6 +1307,13 @@ bool Heap::PerformGarbageCollection(
int start_new_space_size = Heap::new_space()->SizeAsInt();
if (IsHighSurvivalRate()) {
// We speed up the incremental marker if it is running so that it
// does not fall behind the rate of promotion, which would cause a
// constantly growing old space.
incremental_marking()->NotifyOfHighPromotionRate();
}
{
Heap::PretenuringScope pretenuring_scope(this);

View File

@ -1679,6 +1679,10 @@ class Heap {
// Flush the number to string cache.
void FlushNumberStringCache();
// TODO(hpayer): Allocation site pretenuring may make this method obsolete.
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize();
bool HasLowYoungGenerationAllocationRate();

View File

@ -30,7 +30,13 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
observer_(*this, kAllocatedThreshold),
state_(STOPPED),
is_compacting_(false),
steps_count_(0),
old_generation_space_available_at_start_of_incremental_(0),
old_generation_space_used_at_start_of_incremental_(0),
bytes_rescanned_(0),
should_hurry_(false),
marking_speed_(0),
bytes_scanned_(0),
allocated_(0),
write_barriers_invoked_since_last_step_(0),
idle_marking_delay_counter_(0),
@ -75,11 +81,9 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
int counter = chunk->write_barrier_counter();
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
marking->write_barriers_invoked_since_last_step_ =
Min(kMaxWriteBarrierCounter,
marking->write_barriers_invoked_since_last_step_ +
MemoryChunk::kWriteBarrierCounterGranularity -
chunk->write_barrier_counter());
marking->write_barriers_invoked_since_last_step_ +=
MemoryChunk::kWriteBarrierCounterGranularity -
chunk->write_barrier_counter();
chunk->set_write_barrier_counter(
MemoryChunk::kWriteBarrierCounterGranularity);
}
@ -463,6 +467,21 @@ void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
}
void IncrementalMarking::NotifyOfHighPromotionRate() {
if (IsMarking()) {
if (marking_speed_ < kFastMarking) {
if (FLAG_trace_gc) {
heap()->isolate()->PrintWithTimestamp(
"Increasing marking speed to %d "
"due to high promotion rate\n",
static_cast<int>(kFastMarking));
}
marking_speed_ = kFastMarking;
}
}
}
static void PatchIncrementalMarkingRecordWriteStubs(
Heap* heap, RecordWriteStub::Mode mode) {
UnseededNumberDictionary* stubs = heap->code_stubs();
@ -1065,11 +1084,87 @@ void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
"old space step");
} else {
Step(allocated, GC_VIA_STACK_GUARD);
Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
}
}
void IncrementalMarking::SpeedUp() {
bool speed_up = false;
if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Speed up marking after %d steps\n",
static_cast<int>(kMarkingSpeedAccellerationInterval));
}
speed_up = true;
}
bool space_left_is_very_small =
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
bool only_1_nth_of_space_that_was_available_still_left =
(SpaceLeftInOldSpace() * (marking_speed_ + 1) <
old_generation_space_available_at_start_of_incremental_);
if (space_left_is_very_small ||
only_1_nth_of_space_that_was_available_still_left) {
if (FLAG_trace_incremental_marking)
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Speed up marking because of low space left\n");
speed_up = true;
}
bool size_of_old_space_multiplied_by_n_during_marking =
(heap_->PromotedTotalSize() >
(marking_speed_ + 1) *
old_generation_space_used_at_start_of_incremental_);
if (size_of_old_space_multiplied_by_n_during_marking) {
speed_up = true;
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Speed up marking because of heap size "
"increase\n");
}
}
int64_t promoted_during_marking =
heap_->PromotedTotalSize() -
old_generation_space_used_at_start_of_incremental_;
intptr_t delay = marking_speed_ * MB;
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
// We try to scan at at least twice the speed that we are allocating.
if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Speed up marking because marker was not "
"keeping up\n");
}
speed_up = true;
}
if (speed_up) {
if (state_ != MARKING) {
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Postponing speeding up marking until marking "
"starts\n");
}
} else {
marking_speed_ += kMarkingSpeedAccelleration;
marking_speed_ = static_cast<int>(
Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
if (FLAG_trace_incremental_marking) {
heap()->isolate()->PrintWithTimestamp(
"[IncrementalMarking] Marking speed increased to %d\n",
marking_speed_);
}
}
}
}
void IncrementalMarking::FinalizeSweeping() {
DCHECK(state_ == SWEEPING);
if (heap_->mark_compact_collector()->sweeping_in_progress() &&
@ -1078,6 +1173,7 @@ void IncrementalMarking::FinalizeSweeping() {
heap_->mark_compact_collector()->EnsureSweepingCompleted();
}
if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
bytes_scanned_ = 0;
StartMarking();
}
}
@ -1115,24 +1211,22 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
double start = heap_->MonotonicallyIncreasingTimeInMs();
// Make sure that the step size is large enough to justify the overhead
// of interrupting the generated code to perform the step.
intptr_t min_bytes_to_process = GCIdleTimeHandler::EstimateMarkingStepSize(
kMinIncrementalStepDurationInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
// The marking speed is driven either by the allocation rate or by the rate
// at which we are having to check the color of objects in the write
// barrier.
// It is possible for a tight non-allocating loop to run a lot of write
// barriers before we get here and check them (marking can only take place
// on allocation).
intptr_t bytes_to_process = Max(
min_bytes_to_process, kBytesToMarkPerAllocatedByte * allocated_ +
kBytesToMarkPerWriteBarrier *
write_barriers_invoked_since_last_step_);
// on
// allocation), so to reduce the lumpiness we don't use the write barriers
// invoked since last step directly to determine the amount of work to do.
intptr_t bytes_to_process =
marking_speed_ *
Max(allocated_, write_barriers_invoked_since_last_step_);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
bytes_scanned_ += bytes_to_process;
if (state_ == SWEEPING) {
TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
FinalizeSweeping();
@ -1181,6 +1275,12 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
}
}
steps_count_++;
// Speed up marking if we are marking too slow or if we are almost done
// with marking.
SpeedUp();
double end = heap_->MonotonicallyIncreasingTimeInMs();
double duration = (end - start);
// Note that we report zero bytes here when sweeping was in progress or
@ -1193,7 +1293,14 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
void IncrementalMarking::ResetStepCounters() {
allocated_ = 0;
steps_count_ = 0;
old_generation_space_available_at_start_of_incremental_ =
SpaceLeftInOldSpace();
old_generation_space_used_at_start_of_incremental_ =
heap_->PromotedTotalSize();
bytes_rescanned_ = 0;
marking_speed_ = kInitialMarkingSpeed;
bytes_scanned_ = 0;
write_barriers_invoked_since_last_step_ = 0;
}

View File

@ -123,12 +123,16 @@ class IncrementalMarking {
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
static const intptr_t kWriteBarriersInvokedThreshold = 32768;
// Cap the write barrier counter to avoid overflows.
static const intptr_t kMaxWriteBarrierCounter = 1 << 20;
// These constants are arbitrary and chosen based on benchmarks.
static const intptr_t kBytesToMarkPerAllocatedByte = 3;
static const intptr_t kBytesToMarkPerWriteBarrier = 10;
static const intptr_t kMinIncrementalStepDurationInMs = 1;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
// This is the upper bound for how many times we allow finalization of
// incremental marking to be postponed.
@ -183,6 +187,8 @@ class IncrementalMarking {
void ActivateGeneratedStub(Code* stub);
void NotifyOfHighPromotionRate();
void NotifyIncompleteScanOfObject(int unscanned_bytes) {
unscanned_bytes_of_large_object_ = unscanned_bytes;
}
@ -249,6 +255,8 @@ class IncrementalMarking {
int64_t SpaceLeftInOldSpace();
void SpeedUp();
void ResetStepCounters();
void StartMarking();
@ -293,7 +301,13 @@ class IncrementalMarking {
State state_;
bool is_compacting_;
int steps_count_;
int64_t old_generation_space_available_at_start_of_incremental_;
int64_t old_generation_space_used_at_start_of_incremental_;
int64_t bytes_rescanned_;
bool should_hurry_;
int marking_speed_;
intptr_t bytes_scanned_;
intptr_t allocated_;
intptr_t write_barriers_invoked_since_last_step_;
size_t idle_marking_delay_counter_;