[heap] Use RAIL mode for initial heap sizing

BUG=chromium:613518

Review-Url: https://codereview.chromium.org/2407153002
Cr-Commit-Position: refs/heads/master@{#41459}
This commit is contained in:
ulan 2016-12-02 08:33:45 -08:00 committed by Commit bot
parent e7a51fff24
commit aea4f1a704
8 changed files with 74 additions and 49 deletions

View File

@ -80,9 +80,6 @@ Heap::Heap()
max_semi_space_size_(8 * (kPointerSize / 4) * MB),
initial_semispace_size_(MB),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ /
kInitalOldGenerationLimitFactor),
old_generation_size_configured_(false),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
@ -111,7 +108,7 @@ Heap::Heap()
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(initial_old_generation_size_),
old_generation_allocation_limit_(0),
inline_allocation_disabled_(false),
total_regexp_code_generated_(0),
tracer_(nullptr),
@ -1049,7 +1046,6 @@ bool Heap::CollectGarbage(GarbageCollector collector,
int Heap::NotifyContextDisposed(bool dependant_context) {
if (!dependant_context) {
tracer()->ResetSurvivalEvents();
old_generation_size_configured_ = false;
MemoryReducer::Event event;
event.type = MemoryReducer::kPossibleGarbage;
event.time_ms = MonotonicallyIncreasingTimeInMs();
@ -1313,7 +1309,6 @@ bool Heap::PerformGarbageCollection(
UpdateOldGenerationAllocationCounter();
// Perform mark-sweep with optional compaction.
MarkCompact();
old_generation_size_configured_ = true;
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during
// GC.
@ -1333,7 +1328,6 @@ bool Heap::PerformGarbageCollection(
}
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
isolate_->counters()->objs_since_last_young()->Set(0);
@ -1361,8 +1355,7 @@ bool Heap::PerformGarbageCollection(
external_memory_at_last_mark_compact_ = external_memory_;
external_memory_limit_ = external_memory_ + kExternalAllocationSoftLimit;
SetOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
} else if (HasLowYoungGenerationAllocationRate() &&
old_generation_size_configured_) {
} else if (HasLowYoungGenerationAllocationRate()) {
DampenOldGenerationAllocationLimit(old_gen_size, gc_speed, mutator_speed);
}
@ -1992,17 +1985,6 @@ void Heap::UnregisterArrayBuffer(JSArrayBuffer* buffer) {
}
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
Max(MinimumAllocationLimitGrowingStep(),
static_cast<size_t>(
static_cast<double>(old_generation_allocation_limit_) *
(tracer()->AverageSurvivalRatio() / 100)));
}
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
@ -4257,7 +4239,8 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action,
incremental_marking()->AdvanceIncrementalMarking(
deadline_in_ms, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
IncrementalMarking::FORCE_COMPLETION, StepOrigin::kTask);
if (remaining_idle_time_in_ms > 0.0) {
if (remaining_idle_time_in_ms > 0.0 &&
incremental_marking()->IsMarking()) {
TryFinalizeIdleIncrementalMarking(
remaining_idle_time_in_ms,
GarbageCollectionReason::kFinalizeMarkingViaTask);
@ -5049,14 +5032,6 @@ bool Heap::ConfigureHeap(size_t max_semi_space_size, size_t max_old_space_size,
max_executable_size_ = max_old_generation_size_;
}
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
initial_old_generation_size_ =
max_old_generation_size_ / kInitalOldGenerationLimitFactor;
}
old_generation_allocation_limit_ = initial_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(kMaxRegularHeapObjectSize >=
(JSArray::kSize +
@ -5294,6 +5269,25 @@ void Heap::DampenOldGenerationAllocationLimit(size_t old_gen_size,
}
}
size_t Heap::OldGenerationSpaceAvailable() {
if (old_generation_allocation_limit_ == 0) {
// Lazy initialization of allocation limit.
old_generation_allocation_limit_ = CalculateOldGenerationAllocationLimit(
kConservativeHeapGrowingFactor, PromotedSpaceSizeOfObjects());
}
if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
return old_generation_allocation_limit_ -
static_cast<size_t>(PromotedTotalSize());
}
bool Heap::ShouldOptimizeForLoadTime() {
return isolate()->rail_mode() == PERFORMANCE_LOAD &&
PromotedTotalSize() <
max_old_generation_size_ / kInitalOldGenerationLimitFactor &&
MonotonicallyIncreasingTimeInMs() <
isolate()->LoadStartTimeMs() + kMaxLoadTimeMs;
}
// This predicate is called when an old generation space cannot allocated from
// the free list and is about to add a new page. Returning false will cause a
// major GC. It happens when the old generation allocation limit is reached and
@ -5305,6 +5299,8 @@ bool Heap::ShouldExpandOldGenerationOnAllocationFailure() {
if (ShouldOptimizeForMemoryUsage()) return false;
if (ShouldOptimizeForLoadTime()) return true;
if (incremental_marking()->IsStopped() &&
IncrementalMarkingLimitReached() == IncrementalMarkingLimit::kNoLimit) {
// We cannot start incremental marking.
@ -5335,6 +5331,9 @@ Heap::IncrementalMarkingLimit Heap::IncrementalMarkingLimitReached() {
if (old_generation_space_available > new_space_->Capacity()) {
return IncrementalMarkingLimit::kNoLimit;
}
if (ShouldOptimizeForLoadTime()) return IncrementalMarkingLimit::kNoLimit;
// We are close to the allocation limit.
// Choose between the hard and the soft limits.
if (old_generation_space_available == 0 || ShouldOptimizeForMemoryUsage()) {

View File

@ -581,8 +581,6 @@ class Heap {
};
typedef List<Chunk> Reservation;
static const int kInitalOldGenerationLimitFactor = 2;
#if V8_OS_ANDROID
// Don't apply pointer multiplier on Android since it has no swap space and
// should instead adapt it's heap size based on available physical memory.
@ -1721,8 +1719,6 @@ class Heap {
// Flush the number to string cache.
void FlushNumberStringCache();
void ConfigureInitialOldGenerationSize();
bool HasLowYoungGenerationAllocationRate();
bool HasLowOldGenerationAllocationRate();
double YoungGenerationMutatorUtilization();
@ -1813,11 +1809,7 @@ class Heap {
// GC statistics. ============================================================
// ===========================================================================
inline size_t OldGenerationSpaceAvailable() {
if (old_generation_allocation_limit_ <= PromotedTotalSize()) return 0;
return old_generation_allocation_limit_ -
static_cast<size_t>(PromotedTotalSize());
}
size_t OldGenerationSpaceAvailable();
void UpdateTotalGCTime(double duration);
@ -1827,6 +1819,16 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
static const int kInitalOldGenerationLimitFactor = 2;
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
// This constant limits the effect of load RAIL mode on GC.
// The value is arbitrary and chosen as the largest load time observed in
// v8 browsing benchmarks.
static const int kMaxLoadTimeMs = 3000;
bool ShouldOptimizeForLoadTime();
// Decrease the allocation limit if the new limit based on the given
// parameters is lower than the current limit.
void DampenOldGenerationAllocationLimit(size_t old_gen_size, double gc_speed,
@ -2116,7 +2118,6 @@ class Heap {
size_t initial_semispace_size_;
size_t max_old_generation_size_;
size_t initial_old_generation_size_;
bool old_generation_size_configured_;
size_t max_executable_size_;
size_t maximum_committed_;

View File

@ -19,12 +19,11 @@ void IncrementalMarkingJob::Start(Heap* heap) {
ScheduleTask(heap);
}
void IncrementalMarkingJob::NotifyTask() { task_pending_ = false; }
void IncrementalMarkingJob::NotifyTask() { task_pending_.SetValue(false); }
void IncrementalMarkingJob::ScheduleTask(Heap* heap) {
if (!task_pending_) {
if (task_pending_.TrySetValue(false, true)) {
v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap->isolate());
task_pending_ = true;
auto task = new Task(heap->isolate(), this);
V8::GetCurrentPlatform()->CallOnForegroundThread(isolate, task);
}

View File

@ -5,6 +5,7 @@
#ifndef V8_HEAP_INCREMENTAL_MARKING_JOB_H_
#define V8_HEAP_INCREMENTAL_MARKING_JOB_H_
#include "src/base/atomic-utils.h"
#include "src/cancelable-task.h"
namespace v8 {
@ -32,8 +33,6 @@ class IncrementalMarkingJob {
IncrementalMarkingJob() : task_pending_(false) {}
bool TaskPending() { return task_pending_; }
void Start(Heap* heap);
void NotifyTask();
@ -41,7 +40,7 @@ class IncrementalMarkingJob {
void ScheduleTask(Heap* heap);
private:
bool task_pending_;
base::AtomicValue<bool> task_pending_;
};
} // namespace internal
} // namespace v8

View File

@ -2112,6 +2112,7 @@ Isolate::Isolate(bool enable_serializer)
// be fixed once the default isolate cleanup is done.
random_number_generator_(NULL),
rail_mode_(PERFORMANCE_ANIMATION),
load_start_time_ms_(0),
serializer_enabled_(enable_serializer),
has_fatal_error_(false),
initialized_from_snapshot_(false),
@ -3441,8 +3442,22 @@ void Isolate::CheckDetachedContextsAfterGC() {
}
}
double Isolate::LoadStartTimeMs() {
base::LockGuard<base::Mutex> guard(&rail_mutex_);
return load_start_time_ms_;
}
void Isolate::SetRAILMode(RAILMode rail_mode) {
RAILMode old_rail_mode = rail_mode_.Value();
if (old_rail_mode != PERFORMANCE_LOAD && rail_mode == PERFORMANCE_LOAD) {
base::LockGuard<base::Mutex> guard(&rail_mutex_);
load_start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
}
rail_mode_.SetValue(rail_mode);
if (old_rail_mode == PERFORMANCE_LOAD && rail_mode != PERFORMANCE_LOAD) {
heap()->incremental_marking()->incremental_marking_job()->ScheduleTask(
heap());
}
if (FLAG_trace_rail) {
PrintIsolate(this, "RAIL mode: %s\n", RAILModeName(rail_mode));
}

View File

@ -1166,6 +1166,10 @@ class Isolate {
void SetRAILMode(RAILMode rail_mode);
RAILMode rail_mode() { return rail_mode_.Value(); }
double LoadStartTimeMs();
void IsolateInForegroundNotification();
void IsolateInBackgroundNotification();
@ -1354,6 +1358,8 @@ class Isolate {
AccessCompilerData* access_compiler_data_;
base::RandomNumberGenerator* random_number_generator_;
base::AtomicValue<RAILMode> rail_mode_;
base::Mutex rail_mutex_;
double load_start_time_ms_;
// Whether the isolate has been created for snapshotting.
bool serializer_enabled_;

View File

@ -6600,7 +6600,7 @@ TEST(Regress598319) {
CHECK_NOT_NULL(page);
// GC to cleanup state
CcTest::CollectGarbage(OLD_SPACE);
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->sweeping_in_progress()) {
collector->EnsureSweepingCompleted();

View File

@ -18172,9 +18172,15 @@ TEST(RecursionWithSourceURLInMessageScriptResourceNameOrSourceURL) {
static void CreateGarbageInOldSpace() {
i::Factory* factory = CcTest::i_isolate()->factory();
v8::HandleScope scope(CcTest::isolate());
i::AlwaysAllocateScope always_allocate(CcTest::i_isolate());
for (int i = 0; i < 1000; i++) {
factory->NewFixedArray(1000, i::TENURED);
{
i::AlwaysAllocateScope always_allocate(CcTest::i_isolate());
for (int i = 0; i < 1000; i++) {
factory->NewFixedArray(1000, i::TENURED);
}
}
CcTest::CollectAllGarbage(i::Heap::kFinalizeIncrementalMarkingMask);
if (CcTest::heap()->mark_compact_collector()->sweeping_in_progress()) {
CcTest::heap()->mark_compact_collector()->EnsureSweepingCompleted();
}
}