Make the speed of incremental marking depend also on the rate

at which we are hitting expensive write barrier operations,
not just on the rate of allocation.
Review URL: https://chromiumcodereview.appspot.com/10974003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12618 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
erik.corry@gmail.com 2012-09-26 11:35:42 +00:00
parent 16782af657
commit e8ffc2bebd
9 changed files with 121 additions and 38 deletions

View File

@ -7431,6 +7431,16 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_scratch;
__ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
__ ldr(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC);
__ str(regs_.scratch1(),
MemOperand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ b(mi, &need_incremental);
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);

View File

@ -5133,7 +5133,8 @@ bool Heap::IdleNotification(int hint) {
// The size factor is in range [5..250]. The numbers here are chosen from
// experiments. If you changes them, make sure to test with
// chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
intptr_t step_size = size_factor * IncrementalMarking::kAllocatedThreshold;
intptr_t step_size =
size_factor * IncrementalMarking::kAllocatedThreshold;
if (contexts_disposed_ > 0) {
if (hint >= kMaxHint) {

View File

@ -7326,6 +7326,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Mode mode) {
Label object_is_black, need_incremental, need_incremental_pop_object;
__ mov(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ and_(regs_.scratch0(), regs_.object());
__ mov(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ sub(regs_.scratch1(), Immediate(1));
__ mov(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),

View File

@ -111,7 +111,7 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
if (FLAG_trace_gc) {
PrintPID("Hurrying incremental marking because of lack of progress\n");
}
allocation_marking_factor_ = kMaxAllocationMarkingFactor;
marking_speed_ = kMaxMarkingSpeed;
}
}

View File

@ -52,7 +52,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
steps_count_since_last_gc_(0),
steps_took_since_last_gc_(0),
should_hurry_(false),
allocation_marking_factor_(0),
marking_speed_(0),
allocated_(0),
no_marking_scope_depth_(0) {
}
@ -81,17 +81,19 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
Object* value,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
// Fast cases should already be covered by RecordWriteStub.
ASSERT(value->IsHeapObject());
ASSERT(!value->IsHeapNumber());
ASSERT(!value->IsString() ||
value->IsConsString() ||
value->IsSlicedString());
ASSERT(Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(value))));
IncrementalMarking* marking = isolate->heap()->incremental_marking();
ASSERT(!marking->is_compacting_);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
int counter = chunk->write_barrier_counter();
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
marking->write_barriers_invoked_since_last_step_ +=
MemoryChunk::kWriteBarrierCounterGranularity -
chunk->write_barrier_counter();
chunk->set_write_barrier_counter(
MemoryChunk::kWriteBarrierCounterGranularity);
}
marking->RecordWrite(obj, NULL, value);
}
@ -99,8 +101,20 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
void IncrementalMarking::RecordWriteForEvacuationFromCode(HeapObject* obj,
Object** slot,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
ASSERT(marking->is_compacting_);
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
int counter = chunk->write_barrier_counter();
if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
marking->write_barriers_invoked_since_last_step_ +=
MemoryChunk::kWriteBarrierCounterGranularity -
chunk->write_barrier_counter();
chunk->set_write_barrier_counter(
MemoryChunk::kWriteBarrierCounterGranularity);
}
marking->RecordWrite(obj, slot, *slot);
}
@ -773,11 +787,25 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
allocated_ += allocated_bytes;
if (allocated_ < kAllocatedThreshold) return;
if (allocated_ < kAllocatedThreshold &&
write_barriers_invoked_since_last_step_ <
kWriteBarriersInvokedThreshold) {
return;
}
if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
intptr_t bytes_to_process = allocated_ * allocation_marking_factor_;
// The marking speed is driven either by the allocation rate or by the rate
// at which we are having to check the color of objects in the write barrier.
// It is possible for a tight non-allocating loop to run a lot of write
// barriers before we get here and check them (marking can only take place on
// allocation), so to reduce the lumpiness we don't use the write barriers
// invoked since last step directly to determine the amount of work to do.
intptr_t bytes_to_process =
marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
bytes_scanned_ += bytes_to_process;
double start = 0;
@ -832,17 +860,15 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
allocated_ = 0;
steps_count_++;
steps_count_since_last_gc_++;
bool speed_up = false;
if ((steps_count_ % kAllocationMarkingFactorSpeedupInterval) == 0) {
if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
if (FLAG_trace_gc) {
PrintPID("Speed up marking after %d steps\n",
static_cast<int>(kAllocationMarkingFactorSpeedupInterval));
static_cast<int>(kMarkingSpeedAccellerationInterval));
}
speed_up = true;
}
@ -851,7 +877,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
(old_generation_space_available_at_start_of_incremental_ < 10 * MB);
bool only_1_nth_of_space_that_was_available_still_left =
(SpaceLeftInOldSpace() * (allocation_marking_factor_ + 1) <
(SpaceLeftInOldSpace() * (marking_speed_ + 1) <
old_generation_space_available_at_start_of_incremental_);
if (space_left_is_very_small ||
@ -862,7 +888,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
bool size_of_old_space_multiplied_by_n_during_marking =
(heap_->PromotedTotalSize() >
(allocation_marking_factor_ + 1) *
(marking_speed_ + 1) *
old_generation_space_used_at_start_of_incremental_);
if (size_of_old_space_multiplied_by_n_during_marking) {
speed_up = true;
@ -873,7 +899,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
int64_t promoted_during_marking = heap_->PromotedTotalSize()
- old_generation_space_used_at_start_of_incremental_;
intptr_t delay = allocation_marking_factor_ * MB;
intptr_t delay = marking_speed_ * MB;
intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
// We try to scan at at least twice the speed that we are allocating.
@ -890,12 +916,12 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
PrintPID("Postponing speeding up marking until marking starts\n");
}
} else {
allocation_marking_factor_ += kAllocationMarkingFactorSpeedup;
allocation_marking_factor_ = static_cast<int>(
Min(kMaxAllocationMarkingFactor,
static_cast<intptr_t>(allocation_marking_factor_ * 1.3)));
marking_speed_ += kMarkingSpeedAccellerationInterval;
marking_speed_ = static_cast<int>(
Min(kMaxMarkingSpeed,
static_cast<intptr_t>(marking_speed_ * 1.3)));
if (FLAG_trace_gc) {
PrintPID("Marking speed increased to %d\n", allocation_marking_factor_);
PrintPID("Marking speed increased to %d\n", marking_speed_);
}
}
}
@ -921,8 +947,9 @@ void IncrementalMarking::ResetStepCounters() {
steps_count_since_last_gc_ = 0;
steps_took_since_last_gc_ = 0;
bytes_rescanned_ = 0;
allocation_marking_factor_ = kInitialAllocationMarkingFactor;
marking_speed_ = kInitialMarkingSpeed;
bytes_scanned_ = 0;
write_barriers_invoked_since_last_step_ = 0;
}

View File

@ -95,21 +95,23 @@ class IncrementalMarking {
// progress in the face of the mutator creating new work for it. We start
// of at a moderate rate of work and gradually increase the speed of the
// incremental marker until it completes.
// Do some marking every time this much memory has been allocated.
// Do some marking every time this much memory has been allocated or that many
// heavy (color-checking) write barriers have been invoked.
static const intptr_t kAllocatedThreshold = 65536;
static const intptr_t kWriteBarriersInvokedThreshold = 65536;
// Start off by marking this many times more memory than has been allocated.
static const intptr_t kInitialAllocationMarkingFactor = 1;
static const intptr_t kInitialMarkingSpeed = 1;
// But if we are promoting a lot of data we need to mark faster to keep up
// with the data that is entering the old space through promotion.
static const intptr_t kFastMarking = 3;
// After this many steps we increase the marking/allocating factor.
static const intptr_t kAllocationMarkingFactorSpeedupInterval = 1024;
static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
// This is how much we increase the marking/allocating factor by.
static const intptr_t kAllocationMarkingFactorSpeedup = 2;
static const intptr_t kMaxAllocationMarkingFactor = 1000;
static const intptr_t kMarkingSpeedAccelleration = 2;
static const intptr_t kMaxMarkingSpeed = 1000;
void OldSpaceStep(intptr_t allocated) {
Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
Step(allocated * kFastMarking / kInitialMarkingSpeed,
GC_VIA_STACK_GUARD);
}
@ -211,13 +213,13 @@ class IncrementalMarking {
void NotifyOfHighPromotionRate() {
if (IsMarking()) {
if (allocation_marking_factor_ < kFastMarking) {
if (marking_speed_ < kFastMarking) {
if (FLAG_trace_gc) {
PrintPID("Increasing marking speed to %d "
"due to high promotion rate\n",
static_cast<int>(kFastMarking));
}
allocation_marking_factor_ = kFastMarking;
marking_speed_ = kFastMarking;
}
}
}
@ -275,9 +277,10 @@ class IncrementalMarking {
double steps_took_since_last_gc_;
int64_t bytes_rescanned_;
bool should_hurry_;
int allocation_marking_factor_;
int marking_speed_;
intptr_t bytes_scanned_;
intptr_t allocated_;
intptr_t write_barriers_invoked_since_last_step_;
int no_marking_scope_depth_;

View File

@ -447,6 +447,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->InitializeReservedMemory();
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);

View File

@ -284,7 +284,9 @@ class Bitmap {
bool IsClean() {
for (int i = 0; i < CellsCount(); i++) {
if (cells()[i] != 0) return false;
if (cells()[i] != 0) {
return false;
}
}
return true;
}
@ -373,6 +375,11 @@ class MemoryChunk {
return addr >= area_start() && addr <= area_end();
}
// Every n write barrier invocations we go to runtime even though
// we could have handled it in generated code. This lets us check
// whether we have hit the limit and should do some more marking.
static const int kWriteBarrierCounterGranularity = 500;
enum MemoryChunkFlags {
IS_EXECUTABLE,
ABOUT_TO_BE_FREED,
@ -468,6 +475,15 @@ class MemoryChunk {
return live_byte_count_;
}
int write_barrier_counter() {
return static_cast<int>(write_barrier_counter_);
}
void set_write_barrier_counter(int counter) {
write_barrier_counter_ = counter;
}
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
}
@ -488,9 +504,11 @@ class MemoryChunk {
static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
static const size_t kHeaderSize =
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(MAP_POINTER_ALIGN(kHeaderSize + Bitmap::kSize));
@ -625,6 +643,7 @@ class MemoryChunk {
int live_byte_count_;
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
static MemoryChunk* Initialize(Heap* heap,
Address base,

View File

@ -6276,6 +6276,17 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
Label need_incremental;
Label need_incremental_pop_object;
__ movq(regs_.scratch0(), Immediate(~Page::kPageAlignmentMask));
__ and_(regs_.scratch0(), regs_.object());
__ movq(regs_.scratch1(),
Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset));
__ subq(regs_.scratch1(), Immediate(1));
__ movq(Operand(regs_.scratch0(),
MemoryChunk::kWriteBarrierCounterOffset),
regs_.scratch1());
__ j(negative, &need_incremental);
// Let's look at the color of the object: If it is not black we don't have
// to inform the incremental marker.
__ JumpIfBlack(regs_.object(),