Reland "[heap] New heuristics for starting of incremental marking. (patchset #9 id:160001 of https://codereview.chromium.org/2364923002/ )"

This reverts commit a5440d1190.

BUG=chromium:616434
TBR=hpayer@chromium.org
LOG=NO

Review-Url: https://codereview.chromium.org/2379663002
Cr-Commit-Position: refs/heads/master@{#39838}
This commit is contained in:
ulan 2016-09-28 10:51:48 -07:00 committed by Commit bot
parent c98fed4c78
commit 7e652694a0
9 changed files with 95 additions and 81 deletions

View File

@ -169,18 +169,6 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
return old_space_->allocation_limit_address();
}
bool Heap::HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
intptr_t adjusted_allocation_limit = limit - new_space_->Capacity();
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
if (HighMemoryPressure()) return true;
return false;
}
void Heap::UpdateNewSpaceAllocationCounter() {
new_space_allocation_counter_ = NewSpaceAllocationCounter();
}
@ -492,13 +480,6 @@ bool Heap::InOldSpaceSlow(Address address) {
return old_space_->ContainsSlow(address);
}
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped() && !ShouldOptimizeForMemoryUsage()) {
return false;
}
return OldGenerationSpaceAvailable() < 0;
}
template <PromotionMode promotion_mode>
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
if (promotion_mode == PROMOTE_MARKED) {

View File

@ -266,13 +266,6 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
return MARK_COMPACTOR;
}
// Is enough data promoted to justify a global GC?
if (OldGenerationAllocationLimitReached()) {
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
*reason = "promotion limit reached";
return MARK_COMPACTOR;
}
// Is there enough space left in OLD to guarantee that a scavenge can
// succeed?
//
@ -967,7 +960,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() && FLAG_incremental_marking &&
OldGenerationAllocationLimitReached()) {
OldGenerationSpaceAvailable() <= 0) {
if (!incremental_marking()->IsComplete() &&
!mark_compact_collector()->marking_deque_.IsEmpty() &&
!FLAG_gc_global) {
@ -1079,12 +1072,17 @@ void Heap::StartIncrementalMarking(int gc_flags,
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
int gc_flags, const GCCallbackFlags gc_callback_flags) {
if (incremental_marking()->IsStopped() &&
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
StartIncrementalMarking(gc_flags, GarbageCollectionReason::kAllocationLimit,
if (incremental_marking()->IsStopped()) {
IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
StartIncrementalMarking(gc_flags,
GarbageCollectionReason::kAllocationLimit,
gc_callback_flags);
}
}
}
void Heap::StartIdleIncrementalMarking(GarbageCollectionReason gc_reason) {
gc_idle_time_handler_->ResetNoProgressCounter();
@ -5329,7 +5327,6 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
}
}
void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
double gc_speed,
double mutator_speed) {
@ -5348,6 +5345,53 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
}
}
// This predicate is called when an old generation space cannot allocated from
// the free list and is about to add a new page. Returning false will cause a
// major GC. It happens when the old generation allocation limit is reached and
// - either we need to optimize for memory usage,
// - or the incremental marking is not in progress and we cannot start it.
bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
// We reached the old generation allocation limit.
if (ShouldOptimizeForMemoryUsage()) return false;
if (incremental_marking()->IsStopped() &&
IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
// We cannot start incremental marking.
return false;
}
return true;
}
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
// The kNoLimit means that either incremental marking is disabled or it is too
// early to start incremental marking.
// The kSoftLimit means that incremental marking should be started soon.
// The kHardLimit means that incremental marking should be started immediately.
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (!incremental_marking()->CanBeActivated() ||
PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
// Incremental marking is disabled or it is too early to start.
return IncrementalMarkingLimit::kNoLimit;
}
if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
HighMemoryPressure()) {
// If there is high memory pressure or stress testing is enabled, then
// start marking immediately.
return IncrementalMarkingLimit::kHardLimit;
}
intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
// We are close to the allocation limit.
// Choose between the hard and the soft limits.
if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
return IncrementalMarkingLimit::kHardLimit;
}
return IncrementalMarkingLimit::kSoftLimit;
}
void Heap::EnableInlineAllocation() {
if (!inline_allocation_disabled_) return;

View File

@ -709,22 +709,11 @@ class Heap {
// should not happen during deserialization.
void NotifyDeserializationComplete();
intptr_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
inline Address* NewSpaceAllocationTopAddress();
inline Address* NewSpaceAllocationLimitAddress();
inline Address* OldSpaceAllocationTopAddress();
inline Address* OldSpaceAllocationLimitAddress();
bool CanExpandOldGeneration(int size) {
if (force_oom_) return false;
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
}
// Clear the Instanceof cache (used when a prototype changes).
inline void ClearInstanceofCache();
@ -847,8 +836,6 @@ class Heap {
// Check new space expansion criteria and expand semispaces if it was hit.
void CheckNewSpaceExpansionCriteria();
inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit);
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
// An object should be promoted if the object has survived a
@ -862,8 +849,6 @@ class Heap {
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
inline bool OldGenerationAllocationLimitReached();
// Completely clear the Instanceof cache (to stop it keeping objects alive
// around a GC).
inline void CompletelyClearInstanceofCache();
@ -1846,6 +1831,22 @@ class Heap {
intptr_t MinimumAllocationLimitGrowingStep();
intptr_t old_generation_allocation_limit() const {
return old_generation_allocation_limit_;
}
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
bool CanExpandOldGeneration(int size) {
if (force_oom_) return false;
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
}
bool ShouldExpandOldGenerationOnAllocationFailure();
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
IncrementalMarkingLimit IncrementalMarkingLimitReached();
// ===========================================================================
// Idle notification. ========================================================
// ===========================================================================
@ -2322,12 +2323,15 @@ class Heap {
friend class HeapIterator;
friend class IdleScavengeObserver;
friend class IncrementalMarking;
friend class IncrementalMarkingJob;
friend class IteratePromotedObjectsVisitor;
friend class LargeObjectSpace;
friend class MarkCompactCollector;
friend class MarkCompactMarkingVisitor;
friend class NewSpace;
friend class ObjectStatsCollector;
friend class Page;
friend class PagedSpace;
friend class Scavenger;
friend class StoreBuffer;
friend class TestMemoryAllocatorScope;

View File

@ -45,6 +45,14 @@ void IncrementalMarkingJob::Task::RunInternal() {
Heap* heap = isolate()->heap();
job_->NotifyTask();
IncrementalMarking* incremental_marking = heap->incremental_marking();
if (incremental_marking->IsStopped()) {
if (heap->IncrementalMarkingLimitReached() !=
Heap::IncrementalMarkingLimit::kNoLimit) {
heap->StartIncrementalMarking(Heap::kNoGCFlags,
GarbageCollectionReason::kIdleTask,
kNoGCCallbackFlags);
}
}
if (!incremental_marking->IsStopped()) {
Step(heap);
if (!incremental_marking->IsStopped()) {

View File

@ -400,22 +400,6 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
}
bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
#else
// TODO(gc) consider setting this to some low level so that some
// debug tests run with incremental marking and some without.
static const intptr_t kActivationThreshold = 0;
#endif
// Don't switch on for very small heaps.
return CanBeActivated() &&
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
heap_->HeapIsFullEnoughToStartIncrementalMarking(
heap_->old_generation_allocation_limit());
}
bool IncrementalMarking::WasActivated() { return was_activated_; }

View File

@ -72,8 +72,6 @@ class IncrementalMarking {
bool CanBeActivated();
bool ShouldActivateEvenWithoutIdleNotification();
bool WasActivated();
void Start(GarbageCollectionReason gc_reason);
@ -117,6 +115,12 @@ class IncrementalMarking {
// incremental marking to be postponed.
static const int kMaxIdleMarkingDelayCounter = 3;
#ifndef DEBUG
static const intptr_t kActivationThreshold = 8 * MB;
#else
static const intptr_t kActivationThreshold = 0;
#endif
void FinalizeSweeping();
size_t Step(size_t bytes_to_process, CompletionAction action,

View File

@ -2889,19 +2889,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
}
}
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
if (!heap()->always_allocate() &&
heap()->OldGenerationAllocationLimitReached()) {
// If sweeper threads are active, wait for them at that point and steal
// elements form their free-lists.
HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
return object;
}
// Try to expand the space and allocate in the new next page.
if (Expand()) {
if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
DCHECK((CountTotalPages() > 1) ||
(size_in_bytes <= free_list_.Available()));
return free_list_.Allocate(size_in_bytes);

View File

@ -30,6 +30,7 @@
V(TestMemoryReducerSampleJsCalls) \
V(TestSizeOfObjects) \
V(Regress587004) \
V(Regress538257) \
V(Regress589413) \
V(WriteBarriersInCopyJSObject)

View File

@ -5583,8 +5583,7 @@ static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
}
UNINITIALIZED_TEST(Regress538257) {
HEAP_TEST(Regress538257) {
i::FLAG_manual_evacuation_candidates_selection = true;
v8::Isolate::CreateParams create_params;
// Set heap limits.
@ -5609,7 +5608,8 @@ UNINITIALIZED_TEST(Regress538257) {
->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
}
heap::SimulateFullSpace(old_space);
heap->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting);
heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
i::GarbageCollectionReason::kTesting);
// If we get this far, we've successfully aborted compaction. Any further
// allocations might trigger OOM.
}