Make progress in incremental marking if scavenge is delaying mark-sweep.
R=mstarzinger@chromium.org Review URL: https://chromiumcodereview.appspot.com/9965054 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@11213 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
f3348e9745
commit
2b554f2448
21
src/heap.cc
21
src/heap.cc
@ -145,7 +145,6 @@ Heap::Heap()
|
||||
number_idle_notifications_(0),
|
||||
last_idle_notification_gc_count_(0),
|
||||
last_idle_notification_gc_count_init_(false),
|
||||
idle_notification_will_schedule_next_gc_(false),
|
||||
mark_sweeps_since_idle_round_started_(0),
|
||||
ms_count_at_last_idle_notification_(0),
|
||||
gc_count_at_last_idle_gc_(0),
|
||||
@ -504,11 +503,17 @@ bool Heap::CollectGarbage(AllocationSpace space,
|
||||
!incremental_marking()->IsStopped() &&
|
||||
!incremental_marking()->should_hurry() &&
|
||||
FLAG_incremental_marking_steps) {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
|
||||
// Make progress in incremental marking.
|
||||
const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
|
||||
incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
|
||||
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
|
||||
if (!incremental_marking()->IsComplete()) {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
|
||||
}
|
||||
collector = SCAVENGER;
|
||||
collector_reason = "incremental marking delaying mark-sweep";
|
||||
}
|
||||
collector = SCAVENGER;
|
||||
collector_reason = "incremental marking delaying mark-sweep";
|
||||
}
|
||||
|
||||
bool next_gc_likely_to_collect_more = false;
|
||||
@ -4817,10 +4822,8 @@ void Heap::EnsureHeapIsIterable() {
|
||||
|
||||
|
||||
void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
|
||||
// This flag prevents incremental marking from requesting GC via stack guard
|
||||
idle_notification_will_schedule_next_gc_ = true;
|
||||
incremental_marking()->Step(step_size);
|
||||
idle_notification_will_schedule_next_gc_ = false;
|
||||
incremental_marking()->Step(step_size,
|
||||
IncrementalMarking::NO_GC_VIA_STACK_GUARD);
|
||||
|
||||
if (incremental_marking()->IsComplete()) {
|
||||
bool uncommit = false;
|
||||
|
@ -1569,10 +1569,6 @@ class Heap {
|
||||
// The roots that have an index less than this are always in old space.
|
||||
static const int kOldSpaceRoots = 0x20;
|
||||
|
||||
bool idle_notification_will_schedule_next_gc() {
|
||||
return idle_notification_will_schedule_next_gc_;
|
||||
}
|
||||
|
||||
uint32_t HashSeed() {
|
||||
uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
|
||||
ASSERT(FLAG_randomize_hashes || seed == 0);
|
||||
@ -2033,7 +2029,6 @@ class Heap {
|
||||
unsigned int last_idle_notification_gc_count_;
|
||||
bool last_idle_notification_gc_count_init_;
|
||||
|
||||
bool idle_notification_will_schedule_next_gc_;
|
||||
int mark_sweeps_since_idle_round_started_;
|
||||
int ms_count_at_last_idle_notification_;
|
||||
unsigned int gc_count_at_last_idle_gc_;
|
||||
|
@ -743,7 +743,7 @@ void IncrementalMarking::Finalize() {
|
||||
}
|
||||
|
||||
|
||||
void IncrementalMarking::MarkingComplete() {
|
||||
void IncrementalMarking::MarkingComplete(CompletionAction action) {
|
||||
state_ = COMPLETE;
|
||||
// We will set the stack guard to request a GC now. This will mean the rest
|
||||
// of the GC gets performed as soon as possible (we can't do a GC here in a
|
||||
@ -754,13 +754,14 @@ void IncrementalMarking::MarkingComplete() {
|
||||
if (FLAG_trace_incremental_marking) {
|
||||
PrintF("[IncrementalMarking] Complete (normal).\n");
|
||||
}
|
||||
if (!heap_->idle_notification_will_schedule_next_gc()) {
|
||||
if (action == GC_VIA_STACK_GUARD) {
|
||||
heap_->isolate()->stack_guard()->RequestGC();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void IncrementalMarking::Step(intptr_t allocated_bytes) {
|
||||
void IncrementalMarking::Step(intptr_t allocated_bytes,
|
||||
CompletionAction action) {
|
||||
if (heap_->gc_state() != Heap::NOT_IN_GC ||
|
||||
!FLAG_incremental_marking ||
|
||||
!FLAG_incremental_marking_steps ||
|
||||
@ -833,7 +834,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) {
|
||||
Marking::MarkBlack(obj_mark_bit);
|
||||
MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
|
||||
}
|
||||
if (marking_deque_.IsEmpty()) MarkingComplete();
|
||||
if (marking_deque_.IsEmpty()) MarkingComplete(action);
|
||||
}
|
||||
|
||||
allocated_ = 0;
|
||||
|
@ -46,6 +46,11 @@ class IncrementalMarking {
|
||||
COMPLETE
|
||||
};
|
||||
|
||||
enum CompletionAction {
|
||||
GC_VIA_STACK_GUARD,
|
||||
NO_GC_VIA_STACK_GUARD
|
||||
};
|
||||
|
||||
explicit IncrementalMarking(Heap* heap);
|
||||
|
||||
void TearDown();
|
||||
@ -82,7 +87,7 @@ class IncrementalMarking {
|
||||
|
||||
void Abort();
|
||||
|
||||
void MarkingComplete();
|
||||
void MarkingComplete(CompletionAction action);
|
||||
|
||||
// It's hard to know how much work the incremental marker should do to make
|
||||
// progress in the face of the mutator creating new work for it. We start
|
||||
@ -102,10 +107,11 @@ class IncrementalMarking {
|
||||
static const intptr_t kMaxAllocationMarkingFactor = 1000;
|
||||
|
||||
void OldSpaceStep(intptr_t allocated) {
|
||||
Step(allocated * kFastMarking / kInitialAllocationMarkingFactor);
|
||||
Step(allocated * kFastMarking / kInitialAllocationMarkingFactor,
|
||||
GC_VIA_STACK_GUARD);
|
||||
}
|
||||
|
||||
void Step(intptr_t allocated);
|
||||
void Step(intptr_t allocated, CompletionAction action);
|
||||
|
||||
inline void RestartIfNotMarking() {
|
||||
if (state_ == COMPLETE) {
|
||||
|
@ -1234,13 +1234,15 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) {
|
||||
allocation_info_.limit + inline_allocation_limit_step_,
|
||||
high);
|
||||
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
|
||||
heap()->incremental_marking()->Step(bytes_allocated);
|
||||
heap()->incremental_marking()->Step(
|
||||
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
|
||||
top_on_previous_step_ = new_top;
|
||||
return AllocateRaw(size_in_bytes);
|
||||
} else if (AddFreshPage()) {
|
||||
// Switched to new page. Try allocating again.
|
||||
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
|
||||
heap()->incremental_marking()->Step(bytes_allocated);
|
||||
heap()->incremental_marking()->Step(
|
||||
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
|
||||
top_on_previous_step_ = to_space_.page_low();
|
||||
return AllocateRaw(size_in_bytes);
|
||||
} else {
|
||||
|
@ -1521,17 +1521,13 @@ TEST(InstanceOfStubWriteBarrier) {
|
||||
|
||||
while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) &&
|
||||
!marking->IsStopped()) {
|
||||
marking->Step(MB);
|
||||
// Discard any pending GC requests otherwise we will get GC when we enter
|
||||
// code below.
|
||||
marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
|
||||
}
|
||||
|
||||
CHECK(marking->IsMarking());
|
||||
|
||||
// Discard any pending GC requests otherwise we will get GC when we enter
|
||||
// code below.
|
||||
if (ISOLATE->stack_guard()->IsGCRequest()) {
|
||||
ISOLATE->stack_guard()->Continue(GC_REQUEST);
|
||||
}
|
||||
|
||||
{
|
||||
v8::HandleScope scope;
|
||||
v8::Handle<v8::Object> global = v8::Context::GetCurrent()->Global();
|
||||
|
Loading…
Reference in New Issue
Block a user