Reland Shrink initial old generation size based on new space survival rate.

I picked a more conservative start value this time of half max old generation size.

BUG=

Review URL: https://codereview.chromium.org/804323004

Cr-Commit-Position: refs/heads/master@{#25890}
This commit is contained in:
hpayer 2014-12-18 08:55:46 -08:00 committed by Commit bot
parent 3ff951943f
commit e89318138c
5 changed files with 103 additions and 5 deletions

View File

@ -557,6 +557,7 @@ DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
"Grow the new space based on the percentage of survivors instead "
"of their absolute value.")
DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
DEFINE_BOOL(gc_global, false, "always perform global GCs")
DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")

View File

@ -31,6 +31,11 @@ GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
}
GCTracer::SurvivalEvent::SurvivalEvent(double survival_rate) {
survival_rate_ = survival_rate;
}
GCTracer::Event::Event(Type type, const char* gc_reason,
const char* collector_reason)
: type(type),
@ -252,6 +257,11 @@ void GCTracer::AddContextDisposalTime(double time) {
}
void GCTracer::AddSurvivalRate(double survival_rate) {
survival_events_.push_front(SurvivalEvent(survival_rate));
}
void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
cumulative_incremental_marking_steps_++;
cumulative_incremental_marking_bytes_ += bytes;
@ -361,8 +371,10 @@ void GCTracer::PrintNVP() const {
PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
PrintF("average_survival_rate%.1f%% ", AverageSurvivalRate());
PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
NewSpaceAllocationThroughputInBytesPerMillisecond());
PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
@ -556,5 +568,24 @@ double GCTracer::ContextDisposalRateInMilliseconds() const {
return (begin - end) / context_disposal_events_.size();
}
double GCTracer::AverageSurvivalRate() const {
if (survival_events_.size() == 0) return 0.0;
double sum_of_rates = 0.0;
SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
while (iter != survival_events_.end()) {
sum_of_rates += iter->survival_rate_;
++iter;
}
return sum_of_rates / static_cast<double>(survival_events_.size());
}
bool GCTracer::SurvivalEventsRecorded() const {
return survival_events_.size() > 0;
}
}
} // namespace v8::internal

View File

@ -159,6 +159,17 @@ class GCTracer {
};
class SurvivalEvent {
public:
// Default constructor leaves the event uninitialized.
SurvivalEvent() {}
explicit SurvivalEvent(double survival_rate);
double survival_rate_;
};
class Event {
public:
enum Type {
@ -267,6 +278,8 @@ class GCTracer {
typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
ContextDisposalEventBuffer;
typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
explicit GCTracer(Heap* heap);
// Start collecting data.
@ -281,6 +294,8 @@ class GCTracer {
void AddContextDisposalTime(double time);
void AddSurvivalRate(double survival_rate);
// Log an incremental marking step.
void AddIncrementalMarkingStep(double duration, intptr_t bytes);
@ -367,6 +382,14 @@ class GCTracer {
// Returns 0 if no events have been recorded.
double ContextDisposalRateInMilliseconds() const;
// Computes the average survival rate based on the last recorded survival
// events.
// Returns 0 if no events have been recorded.
double AverageSurvivalRate() const;
// Returns true if at least one survival event was recorded.
bool SurvivalEventsRecorded() const;
private:
// Print one detailed trace line in name=value format.
// TODO(ernstm): Move to Heap.
@ -417,8 +440,12 @@ class GCTracer {
// RingBuffer for allocation events.
AllocationEventBuffer allocation_events_;
// RingBuffer for context disposal events.
ContextDisposalEventBuffer context_disposal_events_;
// RingBuffer for survival events.
SurvivalEventBuffer survival_events_;
// Cumulative number of incremental marking steps since creation of tracer.
int cumulative_incremental_marking_steps_;

View File

@ -63,6 +63,8 @@ Heap::Heap()
initial_semispace_size_(Page::kPageSize),
target_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
initial_old_generation_size_(max_old_generation_size_ / 2),
old_generation_size_configured_(false),
max_executable_size_(256ul * (kPointerSize / 4) * MB),
// Variables set based on semispace_size_ and old_generation_size_ in
// ConfigureHeap.
@ -97,7 +99,7 @@ Heap::Heap()
#ifdef DEBUG
allocation_timeout_(0),
#endif // DEBUG
old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
old_generation_allocation_limit_(initial_old_generation_size_),
old_gen_exhausted_(false),
inline_allocation_disabled_(false),
store_buffer_rebuilder_(store_buffer()),
@ -107,8 +109,9 @@ Heap::Heap()
tracer_(this),
high_survival_rate_period_length_(0),
promoted_objects_size_(0),
promotion_rate_(0),
promotion_ratio_(0),
semi_space_copied_object_size_(0),
previous_semi_space_copied_object_size_(0),
semi_space_copied_rate_(0),
nodes_died_in_new_space_(0),
nodes_copied_in_new_space_(0),
@ -433,6 +436,7 @@ void Heap::GarbageCollectionPrologue() {
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
semi_space_copied_object_size_ = 0;
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
@ -1036,14 +1040,23 @@ void Heap::ClearNormalizedMapCaches() {
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
static_cast<double>(start_new_space_size) * 100);
if (previous_semi_space_copied_object_size_ > 0) {
promotion_rate_ =
(static_cast<double>(promoted_objects_size_) /
static_cast<double>(previous_semi_space_copied_object_size_) * 100);
} else {
promotion_rate_ = 0;
}
semi_space_copied_rate_ =
(static_cast<double>(semi_space_copied_object_size_) /
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_rate_ + semi_space_copied_rate_;
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
tracer()->AddSurvivalRate(survival_rate);
if (survival_rate > kYoungSurvivalRateHighThreshold) {
high_survival_rate_period_length_++;
@ -1101,11 +1114,13 @@ bool Heap::PerformGarbageCollection(
old_generation_allocation_limit_ =
OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
old_gen_exhausted_ = false;
old_generation_size_configured_ = true;
} else {
Scavenge();
}
UpdateSurvivalStatistics(start_new_space_size);
ConfigureInitialOldGenerationSize();
isolate_->counters()->objs_since_last_young()->Set(0);
@ -2346,6 +2361,17 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
}
void Heap::ConfigureInitialOldGenerationSize() {
if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
old_generation_allocation_limit_ =
Max(kMinimumOldGenerationAllocationLimit,
static_cast<intptr_t>(
static_cast<double>(initial_old_generation_size_) *
(tracer()->AverageSurvivalRate() / 100)));
}
}
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result;
@ -5158,6 +5184,13 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
max_old_generation_size_);
if (FLAG_initial_old_space_size > 0) {
initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
} else {
initial_old_generation_size_ = max_old_generation_size_ / 2;
}
old_generation_allocation_limit_ = initial_old_generation_size_;
// We rely on being able to allocate new arrays in paged spaces.
DCHECK(Page::kMaxRegularHeapObjectSize >=
(JSArray::kSize +

View File

@ -1506,6 +1506,8 @@ class Heap {
int initial_semispace_size_;
int target_semispace_size_;
intptr_t max_old_generation_size_;
intptr_t initial_old_generation_size_;
bool old_generation_size_configured_;
intptr_t max_executable_size_;
intptr_t maximum_committed_;
@ -1993,8 +1995,10 @@ class Heap {
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
double promotion_ratio_;
double promotion_rate_;
intptr_t semi_space_copied_object_size_;
intptr_t previous_semi_space_copied_object_size_;
double semi_space_copied_rate_;
int nodes_died_in_new_space_;
int nodes_copied_in_new_space_;
@ -2010,6 +2014,8 @@ class Heap {
// Re-visit incremental marking heuristics.
bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
void ConfigureInitialOldGenerationSize();
void SelectScavengingVisitorsTable();
void IdleMarkCompact(const char* message);