Reland "[heap] New heuristics for starting of incremental marking. (patchset #9 id:160001 of https://codereview.chromium.org/2364923002/ )"
This reverts commit a5440d1190
.
BUG=chromium:616434
TBR=hpayer@chromium.org
LOG=NO
Review-Url: https://codereview.chromium.org/2379663002
Cr-Commit-Position: refs/heads/master@{#39838}
This commit is contained in:
parent
c98fed4c78
commit
7e652694a0
@ -169,18 +169,6 @@ Address* Heap::OldSpaceAllocationLimitAddress() {
|
|||||||
return old_space_->allocation_limit_address();
|
return old_space_->allocation_limit_address();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Heap::HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit) {
|
|
||||||
if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
|
|
||||||
|
|
||||||
intptr_t adjusted_allocation_limit = limit - new_space_->Capacity();
|
|
||||||
|
|
||||||
if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
|
|
||||||
|
|
||||||
if (HighMemoryPressure()) return true;
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
void Heap::UpdateNewSpaceAllocationCounter() {
|
void Heap::UpdateNewSpaceAllocationCounter() {
|
||||||
new_space_allocation_counter_ = NewSpaceAllocationCounter();
|
new_space_allocation_counter_ = NewSpaceAllocationCounter();
|
||||||
}
|
}
|
||||||
@ -492,13 +480,6 @@ bool Heap::InOldSpaceSlow(Address address) {
|
|||||||
return old_space_->ContainsSlow(address);
|
return old_space_->ContainsSlow(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Heap::OldGenerationAllocationLimitReached() {
|
|
||||||
if (!incremental_marking()->IsStopped() && !ShouldOptimizeForMemoryUsage()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return OldGenerationSpaceAvailable() < 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
template <PromotionMode promotion_mode>
|
template <PromotionMode promotion_mode>
|
||||||
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
|
bool Heap::ShouldBePromoted(Address old_address, int object_size) {
|
||||||
if (promotion_mode == PROMOTE_MARKED) {
|
if (promotion_mode == PROMOTE_MARKED) {
|
||||||
|
@ -266,13 +266,6 @@ GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
|
|||||||
return MARK_COMPACTOR;
|
return MARK_COMPACTOR;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is enough data promoted to justify a global GC?
|
|
||||||
if (OldGenerationAllocationLimitReached()) {
|
|
||||||
isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
|
|
||||||
*reason = "promotion limit reached";
|
|
||||||
return MARK_COMPACTOR;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is there enough space left in OLD to guarantee that a scavenge can
|
// Is there enough space left in OLD to guarantee that a scavenge can
|
||||||
// succeed?
|
// succeed?
|
||||||
//
|
//
|
||||||
@ -967,7 +960,7 @@ bool Heap::CollectGarbage(GarbageCollector collector,
|
|||||||
if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
|
if (collector == MARK_COMPACTOR && !ShouldFinalizeIncrementalMarking() &&
|
||||||
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
|
!ShouldAbortIncrementalMarking() && !incremental_marking()->IsStopped() &&
|
||||||
!incremental_marking()->should_hurry() && FLAG_incremental_marking &&
|
!incremental_marking()->should_hurry() && FLAG_incremental_marking &&
|
||||||
OldGenerationAllocationLimitReached()) {
|
OldGenerationSpaceAvailable() <= 0) {
|
||||||
if (!incremental_marking()->IsComplete() &&
|
if (!incremental_marking()->IsComplete() &&
|
||||||
!mark_compact_collector()->marking_deque_.IsEmpty() &&
|
!mark_compact_collector()->marking_deque_.IsEmpty() &&
|
||||||
!FLAG_gc_global) {
|
!FLAG_gc_global) {
|
||||||
@ -1079,10 +1072,15 @@ void Heap::StartIncrementalMarking(int gc_flags,
|
|||||||
|
|
||||||
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
|
void Heap::StartIncrementalMarkingIfAllocationLimitIsReached(
|
||||||
int gc_flags, const GCCallbackFlags gc_callback_flags) {
|
int gc_flags, const GCCallbackFlags gc_callback_flags) {
|
||||||
if (incremental_marking()->IsStopped() &&
|
if (incremental_marking()->IsStopped()) {
|
||||||
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) {
|
IncrementalMarkingLimit reached_limit = IncrementalMarkingLimitReached();
|
||||||
StartIncrementalMarking(gc_flags, GarbageCollectionReason::kAllocationLimit,
|
if (reached_limit == IncrementalMarkingLimit::kSoftLimit) {
|
||||||
gc_callback_flags);
|
incremental_marking()->incremental_marking_job()->ScheduleTask(this);
|
||||||
|
} else if (reached_limit == IncrementalMarkingLimit::kHardLimit) {
|
||||||
|
StartIncrementalMarking(gc_flags,
|
||||||
|
GarbageCollectionReason::kAllocationLimit,
|
||||||
|
gc_callback_flags);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5329,7 +5327,6 @@ void Heap::SetOldGenerationAllocationLimit(intptr_t old_gen_size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
|
void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
|
||||||
double gc_speed,
|
double gc_speed,
|
||||||
double mutator_speed) {
|
double mutator_speed) {
|
||||||
@ -5348,6 +5345,53 @@ void Heap::DampenOldGenerationAllocationLimit(intptr_t old_gen_size,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This predicate is called when an old generation space cannot allocated from
|
||||||
|
// the free list and is about to add a new page. Returning false will cause a
|
||||||
|
// major GC. It happens when the old generation allocation limit is reached and
|
||||||
|
// - either we need to optimize for memory usage,
|
||||||
|
// - or the incremental marking is not in progress and we cannot start it.
|
||||||
|
bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
|
||||||
|
if (always_allocate() || OldGenerationSpaceAvailable() > 0) return true;
|
||||||
|
// We reached the old generation allocation limit.
|
||||||
|
|
||||||
|
if (ShouldOptimizeForMemoryUsage()) return false;
|
||||||
|
|
||||||
|
if (incremental_marking()->IsStopped() &&
|
||||||
|
IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
|
||||||
|
// We cannot start incremental marking.
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function returns either kNoLimit, kSoftLimit, or kHardLimit.
|
||||||
|
// The kNoLimit means that either incremental marking is disabled or it is too
|
||||||
|
// early to start incremental marking.
|
||||||
|
// The kSoftLimit means that incremental marking should be started soon.
|
||||||
|
// The kHardLimit means that incremental marking should be started immediately.
|
||||||
|
Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
|
||||||
|
if (!incremental_marking()->CanBeActivated() ||
|
||||||
|
PromotedSpaceSizeOfObjects() < IncrementalMarking::kActivationThreshold) {
|
||||||
|
// Incremental marking is disabled or it is too early to start.
|
||||||
|
return IncrementalMarkingLimit::kNoLimit;
|
||||||
|
}
|
||||||
|
if ((FLAG_stress_compaction && (gc_count_ & 1) != 0) ||
|
||||||
|
HighMemoryPressure()) {
|
||||||
|
// If there is high memory pressure or stress testing is enabled, then
|
||||||
|
// start marking immediately.
|
||||||
|
return IncrementalMarkingLimit::kHardLimit;
|
||||||
|
}
|
||||||
|
intptr_t old_generation_space_available = OldGenerationSpaceAvailable();
|
||||||
|
if (old_generation_space_available > new_space_->Capacity()) {
|
||||||
|
return IncrementalMarkingLimit::kNoLimit;
|
||||||
|
}
|
||||||
|
// We are close to the allocation limit.
|
||||||
|
// Choose between the hard and the soft limits.
|
||||||
|
if (old_generation_space_available <= 0 || ShouldOptimizeForMemoryUsage()) {
|
||||||
|
return IncrementalMarkingLimit::kHardLimit;
|
||||||
|
}
|
||||||
|
return IncrementalMarkingLimit::kSoftLimit;
|
||||||
|
}
|
||||||
|
|
||||||
void Heap::EnableInlineAllocation() {
|
void Heap::EnableInlineAllocation() {
|
||||||
if (!inline_allocation_disabled_) return;
|
if (!inline_allocation_disabled_) return;
|
||||||
|
@ -709,22 +709,11 @@ class Heap {
|
|||||||
// should not happen during deserialization.
|
// should not happen during deserialization.
|
||||||
void NotifyDeserializationComplete();
|
void NotifyDeserializationComplete();
|
||||||
|
|
||||||
intptr_t old_generation_allocation_limit() const {
|
|
||||||
return old_generation_allocation_limit_;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
|
|
||||||
|
|
||||||
inline Address* NewSpaceAllocationTopAddress();
|
inline Address* NewSpaceAllocationTopAddress();
|
||||||
inline Address* NewSpaceAllocationLimitAddress();
|
inline Address* NewSpaceAllocationLimitAddress();
|
||||||
inline Address* OldSpaceAllocationTopAddress();
|
inline Address* OldSpaceAllocationTopAddress();
|
||||||
inline Address* OldSpaceAllocationLimitAddress();
|
inline Address* OldSpaceAllocationLimitAddress();
|
||||||
|
|
||||||
bool CanExpandOldGeneration(int size) {
|
|
||||||
if (force_oom_) return false;
|
|
||||||
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Clear the Instanceof cache (used when a prototype changes).
|
// Clear the Instanceof cache (used when a prototype changes).
|
||||||
inline void ClearInstanceofCache();
|
inline void ClearInstanceofCache();
|
||||||
|
|
||||||
@ -847,8 +836,6 @@ class Heap {
|
|||||||
// Check new space expansion criteria and expand semispaces if it was hit.
|
// Check new space expansion criteria and expand semispaces if it was hit.
|
||||||
void CheckNewSpaceExpansionCriteria();
|
void CheckNewSpaceExpansionCriteria();
|
||||||
|
|
||||||
inline bool HeapIsFullEnoughToStartIncrementalMarking(intptr_t limit);
|
|
||||||
|
|
||||||
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
|
void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
|
||||||
|
|
||||||
// An object should be promoted if the object has survived a
|
// An object should be promoted if the object has survived a
|
||||||
@ -862,8 +849,6 @@ class Heap {
|
|||||||
|
|
||||||
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
|
void IncrementDeferredCount(v8::Isolate::UseCounterFeature feature);
|
||||||
|
|
||||||
inline bool OldGenerationAllocationLimitReached();
|
|
||||||
|
|
||||||
// Completely clear the Instanceof cache (to stop it keeping objects alive
|
// Completely clear the Instanceof cache (to stop it keeping objects alive
|
||||||
// around a GC).
|
// around a GC).
|
||||||
inline void CompletelyClearInstanceofCache();
|
inline void CompletelyClearInstanceofCache();
|
||||||
@ -1846,6 +1831,22 @@ class Heap {
|
|||||||
|
|
||||||
intptr_t MinimumAllocationLimitGrowingStep();
|
intptr_t MinimumAllocationLimitGrowingStep();
|
||||||
|
|
||||||
|
intptr_t old_generation_allocation_limit() const {
|
||||||
|
return old_generation_allocation_limit_;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool always_allocate() { return always_allocate_scope_count_.Value() != 0; }
|
||||||
|
|
||||||
|
bool CanExpandOldGeneration(int size) {
|
||||||
|
if (force_oom_) return false;
|
||||||
|
return (OldGenerationCapacity() + size) < MaxOldGenerationSize();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool ShouldExpandOldGenerationOnAllocationFailure();
|
||||||
|
|
||||||
|
enum class IncrementalMarkingLimit { kNoLimit, kSoftLimit, kHardLimit };
|
||||||
|
IncrementalMarkingLimit IncrementalMarkingLimitReached();
|
||||||
|
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
// Idle notification. ========================================================
|
// Idle notification. ========================================================
|
||||||
// ===========================================================================
|
// ===========================================================================
|
||||||
@ -2322,12 +2323,15 @@ class Heap {
|
|||||||
friend class HeapIterator;
|
friend class HeapIterator;
|
||||||
friend class IdleScavengeObserver;
|
friend class IdleScavengeObserver;
|
||||||
friend class IncrementalMarking;
|
friend class IncrementalMarking;
|
||||||
|
friend class IncrementalMarkingJob;
|
||||||
friend class IteratePromotedObjectsVisitor;
|
friend class IteratePromotedObjectsVisitor;
|
||||||
|
friend class LargeObjectSpace;
|
||||||
friend class MarkCompactCollector;
|
friend class MarkCompactCollector;
|
||||||
friend class MarkCompactMarkingVisitor;
|
friend class MarkCompactMarkingVisitor;
|
||||||
friend class NewSpace;
|
friend class NewSpace;
|
||||||
friend class ObjectStatsCollector;
|
friend class ObjectStatsCollector;
|
||||||
friend class Page;
|
friend class Page;
|
||||||
|
friend class PagedSpace;
|
||||||
friend class Scavenger;
|
friend class Scavenger;
|
||||||
friend class StoreBuffer;
|
friend class StoreBuffer;
|
||||||
friend class TestMemoryAllocatorScope;
|
friend class TestMemoryAllocatorScope;
|
||||||
|
@ -45,6 +45,14 @@ void IncrementalMarkingJob::Task::RunInternal() {
|
|||||||
Heap* heap = isolate()->heap();
|
Heap* heap = isolate()->heap();
|
||||||
job_->NotifyTask();
|
job_->NotifyTask();
|
||||||
IncrementalMarking* incremental_marking = heap->incremental_marking();
|
IncrementalMarking* incremental_marking = heap->incremental_marking();
|
||||||
|
if (incremental_marking->IsStopped()) {
|
||||||
|
if (heap->IncrementalMarkingLimitReached() !=
|
||||||
|
Heap::IncrementalMarkingLimit::kNoLimit) {
|
||||||
|
heap->StartIncrementalMarking(Heap::kNoGCFlags,
|
||||||
|
GarbageCollectionReason::kIdleTask,
|
||||||
|
kNoGCCallbackFlags);
|
||||||
|
}
|
||||||
|
}
|
||||||
if (!incremental_marking->IsStopped()) {
|
if (!incremental_marking->IsStopped()) {
|
||||||
Step(heap);
|
Step(heap);
|
||||||
if (!incremental_marking->IsStopped()) {
|
if (!incremental_marking->IsStopped()) {
|
||||||
|
@ -400,22 +400,6 @@ void IncrementalMarking::ActivateIncrementalWriteBarrier() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
|
|
||||||
#ifndef DEBUG
|
|
||||||
static const intptr_t kActivationThreshold = 8 * MB;
|
|
||||||
#else
|
|
||||||
// TODO(gc) consider setting this to some low level so that some
|
|
||||||
// debug tests run with incremental marking and some without.
|
|
||||||
static const intptr_t kActivationThreshold = 0;
|
|
||||||
#endif
|
|
||||||
// Don't switch on for very small heaps.
|
|
||||||
return CanBeActivated() &&
|
|
||||||
heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
|
|
||||||
heap_->HeapIsFullEnoughToStartIncrementalMarking(
|
|
||||||
heap_->old_generation_allocation_limit());
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
bool IncrementalMarking::WasActivated() { return was_activated_; }
|
bool IncrementalMarking::WasActivated() { return was_activated_; }
|
||||||
|
|
||||||
|
|
||||||
|
@ -72,8 +72,6 @@ class IncrementalMarking {
|
|||||||
|
|
||||||
bool CanBeActivated();
|
bool CanBeActivated();
|
||||||
|
|
||||||
bool ShouldActivateEvenWithoutIdleNotification();
|
|
||||||
|
|
||||||
bool WasActivated();
|
bool WasActivated();
|
||||||
|
|
||||||
void Start(GarbageCollectionReason gc_reason);
|
void Start(GarbageCollectionReason gc_reason);
|
||||||
@ -117,6 +115,12 @@ class IncrementalMarking {
|
|||||||
// incremental marking to be postponed.
|
// incremental marking to be postponed.
|
||||||
static const int kMaxIdleMarkingDelayCounter = 3;
|
static const int kMaxIdleMarkingDelayCounter = 3;
|
||||||
|
|
||||||
|
#ifndef DEBUG
|
||||||
|
static const intptr_t kActivationThreshold = 8 * MB;
|
||||||
|
#else
|
||||||
|
static const intptr_t kActivationThreshold = 0;
|
||||||
|
#endif
|
||||||
|
|
||||||
void FinalizeSweeping();
|
void FinalizeSweeping();
|
||||||
|
|
||||||
size_t Step(size_t bytes_to_process, CompletionAction action,
|
size_t Step(size_t bytes_to_process, CompletionAction action,
|
||||||
|
@ -2889,19 +2889,7 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Free list allocation failed and there is no next page. Fail if we have
|
if (heap()->ShouldExpandOldGenerationOnAllocationFailure() && Expand()) {
|
||||||
// hit the old generation size limit that should cause a garbage
|
|
||||||
// collection.
|
|
||||||
if (!heap()->always_allocate() &&
|
|
||||||
heap()->OldGenerationAllocationLimitReached()) {
|
|
||||||
// If sweeper threads are active, wait for them at that point and steal
|
|
||||||
// elements form their free-lists.
|
|
||||||
HeapObject* object = SweepAndRetryAllocation(size_in_bytes);
|
|
||||||
return object;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try to expand the space and allocate in the new next page.
|
|
||||||
if (Expand()) {
|
|
||||||
DCHECK((CountTotalPages() > 1) ||
|
DCHECK((CountTotalPages() > 1) ||
|
||||||
(size_in_bytes <= free_list_.Available()));
|
(size_in_bytes <= free_list_.Available()));
|
||||||
return free_list_.Allocate(size_in_bytes);
|
return free_list_.Allocate(size_in_bytes);
|
||||||
|
@ -30,6 +30,7 @@
|
|||||||
V(TestMemoryReducerSampleJsCalls) \
|
V(TestMemoryReducerSampleJsCalls) \
|
||||||
V(TestSizeOfObjects) \
|
V(TestSizeOfObjects) \
|
||||||
V(Regress587004) \
|
V(Regress587004) \
|
||||||
|
V(Regress538257) \
|
||||||
V(Regress589413) \
|
V(Regress589413) \
|
||||||
V(WriteBarriersInCopyJSObject)
|
V(WriteBarriersInCopyJSObject)
|
||||||
|
|
||||||
|
@ -5583,8 +5583,7 @@ static void RequestInterrupt(const v8::FunctionCallbackInfo<v8::Value>& args) {
|
|||||||
CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
|
CcTest::isolate()->RequestInterrupt(&InterruptCallback357137, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HEAP_TEST(Regress538257) {
|
||||||
UNINITIALIZED_TEST(Regress538257) {
|
|
||||||
i::FLAG_manual_evacuation_candidates_selection = true;
|
i::FLAG_manual_evacuation_candidates_selection = true;
|
||||||
v8::Isolate::CreateParams create_params;
|
v8::Isolate::CreateParams create_params;
|
||||||
// Set heap limits.
|
// Set heap limits.
|
||||||
@ -5609,7 +5608,8 @@ UNINITIALIZED_TEST(Regress538257) {
|
|||||||
->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
|
||||||
}
|
}
|
||||||
heap::SimulateFullSpace(old_space);
|
heap::SimulateFullSpace(old_space);
|
||||||
heap->CollectGarbage(OLD_SPACE, i::GarbageCollectionReason::kTesting);
|
heap->CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask,
|
||||||
|
i::GarbageCollectionReason::kTesting);
|
||||||
// If we get this far, we've successfully aborted compaction. Any further
|
// If we get this far, we've successfully aborted compaction. Any further
|
||||||
// allocations might trigger OOM.
|
// allocations might trigger OOM.
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user