diff --git a/src/heap.cc b/src/heap.cc index 4ce1816673..48e8b56762 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -145,7 +145,6 @@ Heap::Heap() number_idle_notifications_(0), last_idle_notification_gc_count_(0), last_idle_notification_gc_count_init_(false), - idle_notification_will_schedule_next_gc_(false), mark_sweeps_since_idle_round_started_(0), ms_count_at_last_idle_notification_(0), gc_count_at_last_idle_gc_(0), @@ -504,11 +503,17 @@ bool Heap::CollectGarbage(AllocationSpace space, !incremental_marking()->IsStopped() && !incremental_marking()->should_hurry() && FLAG_incremental_marking_steps) { - if (FLAG_trace_incremental_marking) { - PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); + // Make progress in incremental marking. + const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB; + incremental_marking()->Step(kStepSizeWhenDelayedByScavenge, + IncrementalMarking::NO_GC_VIA_STACK_GUARD); + if (!incremental_marking()->IsComplete()) { + if (FLAG_trace_incremental_marking) { + PrintF("[IncrementalMarking] Delaying MarkSweep.\n"); + } + collector = SCAVENGER; + collector_reason = "incremental marking delaying mark-sweep"; } - collector = SCAVENGER; - collector_reason = "incremental marking delaying mark-sweep"; } bool next_gc_likely_to_collect_more = false; @@ -4817,10 +4822,8 @@ void Heap::EnsureHeapIsIterable() { void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { - // This flag prevents incremental marking from requesting GC via stack guard - idle_notification_will_schedule_next_gc_ = true; - incremental_marking()->Step(step_size); - idle_notification_will_schedule_next_gc_ = false; + incremental_marking()->Step(step_size, + IncrementalMarking::NO_GC_VIA_STACK_GUARD); if (incremental_marking()->IsComplete()) { bool uncommit = false; diff --git a/src/heap.h b/src/heap.h index 2bd037f15b..0391e0e526 100644 --- a/src/heap.h +++ b/src/heap.h @@ -1569,10 +1569,6 @@ class Heap { // The roots that have an index less than this are always in old space. static const int kOldSpaceRoots = 0x20; - bool idle_notification_will_schedule_next_gc() { - return idle_notification_will_schedule_next_gc_; - } - uint32_t HashSeed() { uint32_t seed = static_cast(hash_seed()->value()); ASSERT(FLAG_randomize_hashes || seed == 0); @@ -2033,7 +2029,6 @@ class Heap { unsigned int last_idle_notification_gc_count_; bool last_idle_notification_gc_count_init_; - bool idle_notification_will_schedule_next_gc_; int mark_sweeps_since_idle_round_started_; int ms_count_at_last_idle_notification_; unsigned int gc_count_at_last_idle_gc_; diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc index 8fe89b4a98..f3255e21eb 100644 --- a/src/incremental-marking.cc +++ b/src/incremental-marking.cc @@ -743,7 +743,7 @@ void IncrementalMarking::Finalize() { } -void IncrementalMarking::MarkingComplete() { +void IncrementalMarking::MarkingComplete(CompletionAction action) { state_ = COMPLETE; // We will set the stack guard to request a GC now. This will mean the rest // of the GC gets performed as soon as possible (we can't do a GC here in a @@ -754,13 +754,14 @@ void IncrementalMarking::MarkingComplete() { if (FLAG_trace_incremental_marking) { PrintF("[IncrementalMarking] Complete (normal).\n"); } - if (!heap_->idle_notification_will_schedule_next_gc()) { + if (action == GC_VIA_STACK_GUARD) { heap_->isolate()->stack_guard()->RequestGC(); } } -void IncrementalMarking::Step(intptr_t allocated_bytes) { +void IncrementalMarking::Step(intptr_t allocated_bytes, + CompletionAction action) { if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || !FLAG_incremental_marking_steps || @@ -833,7 +834,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes) { Marking::MarkBlack(obj_mark_bit); MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size); } - if (marking_deque_.IsEmpty()) MarkingComplete(); + if (marking_deque_.IsEmpty()) MarkingComplete(action); } allocated_ = 0; diff --git a/src/incremental-marking.h b/src/incremental-marking.h index 4f8fa6b127..8cbe6c18e7 100644 --- a/src/incremental-marking.h +++ b/src/incremental-marking.h @@ -46,6 +46,11 @@ class IncrementalMarking { COMPLETE }; + enum CompletionAction { + GC_VIA_STACK_GUARD, + NO_GC_VIA_STACK_GUARD + }; + explicit IncrementalMarking(Heap* heap); void TearDown(); @@ -82,7 +87,7 @@ class IncrementalMarking { void Abort(); - void MarkingComplete(); + void MarkingComplete(CompletionAction action); // It's hard to know how much work the incremental marker should do to make // progress in the face of the mutator creating new work for it. We start @@ -102,10 +107,11 @@ class IncrementalMarking { static const intptr_t kMaxAllocationMarkingFactor = 1000; void OldSpaceStep(intptr_t allocated) { - Step(allocated * kFastMarking / kInitialAllocationMarkingFactor); + Step(allocated * kFastMarking / kInitialAllocationMarkingFactor, + GC_VIA_STACK_GUARD); } - void Step(intptr_t allocated); + void Step(intptr_t allocated, CompletionAction action); inline void RestartIfNotMarking() { if (state_ == COMPLETE) { diff --git a/src/spaces.cc b/src/spaces.cc index a404b1e75e..6144464304 100644 --- a/src/spaces.cc +++ b/src/spaces.cc @@ -1234,13 +1234,15 @@ MaybeObject* NewSpace::SlowAllocateRaw(int size_in_bytes) { allocation_info_.limit + inline_allocation_limit_step_, high); int bytes_allocated = static_cast(new_top - top_on_previous_step_); - heap()->incremental_marking()->Step(bytes_allocated); + heap()->incremental_marking()->Step( + bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); top_on_previous_step_ = new_top; return AllocateRaw(size_in_bytes); } else if (AddFreshPage()) { // Switched to new page. Try allocating again. int bytes_allocated = static_cast(old_top - top_on_previous_step_); - heap()->incremental_marking()->Step(bytes_allocated); + heap()->incremental_marking()->Step( + bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD); top_on_previous_step_ = to_space_.page_low(); return AllocateRaw(size_in_bytes); } else { diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index 999e2c6651..d6f08c3eac 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -1521,17 +1521,13 @@ TEST(InstanceOfStubWriteBarrier) { while (!Marking::IsBlack(Marking::MarkBitFrom(f->code())) && !marking->IsStopped()) { - marking->Step(MB); + // Discard any pending GC requests otherwise we will get GC when we enter + // code below. + marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD); } CHECK(marking->IsMarking()); - // Discard any pending GC requests otherwise we will get GC when we enter - // code below. - if (ISOLATE->stack_guard()->IsGCRequest()) { - ISOLATE->stack_guard()->Continue(GC_REQUEST); - } - { v8::HandleScope scope; v8::Handle global = v8::Context::GetCurrent()->Global();