[heap] Make survival counters concurrent (atomic)

Those counters will be incremented concurrently during parallel evacuation of
newspace. This change makes sure we do the updates atomically.

Note that actual reasoning about them happens sequentially on the main thread.

BUG=chromium:524425
LOG=N
R=ulan@chromium.org

Review URL: https://codereview.chromium.org/1585843010

Cr-Commit-Position: refs/heads/master@{#33326}
This commit is contained in:
mlippautz 2016-01-15 04:23:04 -08:00 committed by Commit bot
parent 0aeaf0cbd1
commit 7cf64f4017
3 changed files with 45 additions and 34 deletions

View File

@ -437,9 +437,9 @@ void Heap::GarbageCollectionPrologue() {
}
// Reset GC statistics.
promoted_objects_size_ = 0;
previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
semi_space_copied_object_size_ = 0;
promoted_objects_size_.SetValue(0);
previous_semi_space_copied_object_size_ = semi_space_copied_object_size();
semi_space_copied_object_size_.SetValue(0);
nodes_died_in_new_space_ = 0;
nodes_copied_in_new_space_ = 0;
nodes_promoted_ = 0;
@ -1234,19 +1234,19 @@ void Heap::ClearNormalizedMapCaches() {
void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
if (start_new_space_size == 0) return;
promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
promotion_ratio_ = (static_cast<double>(promoted_objects_size()) /
static_cast<double>(start_new_space_size) * 100);
if (previous_semi_space_copied_object_size_ > 0) {
promotion_rate_ =
(static_cast<double>(promoted_objects_size_) /
(static_cast<double>(promoted_objects_size()) /
static_cast<double>(previous_semi_space_copied_object_size_) * 100);
} else {
promotion_rate_ = 0;
}
semi_space_copied_rate_ =
(static_cast<double>(semi_space_copied_object_size_) /
(static_cast<double>(semi_space_copied_object_size()) /
static_cast<double>(start_new_space_size) * 100);
double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
@ -1309,7 +1309,7 @@ bool Heap::PerformGarbageCollection(
// This should be updated before PostGarbageCollectionProcessing, which
// can cause another GC. Take into account the objects promoted during GC.
old_generation_allocation_counter_ +=
static_cast<size_t>(promoted_objects_size_);
static_cast<size_t>(promoted_objects_size());
old_generation_size_at_last_gc_ = PromotedSpaceSizeOfObjects();
} else {
Scavenge();
@ -1513,18 +1513,18 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
void Heap::CheckNewSpaceExpansionCriteria() {
if (FLAG_experimental_new_space_growth_heuristic) {
if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
survived_last_scavenge() * 100 / new_space_.TotalCapacity() >= 10) {
// Grow the size of new space if there is room to grow, and more than 10%
// have survived the last scavenge.
new_space_.Grow();
survived_since_last_expansion_ = 0;
survived_since_last_expansion_.SetValue(0);
}
} else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
survived_since_last_expansion_ > new_space_.TotalCapacity()) {
survived_since_last_expansion() > new_space_.TotalCapacity()) {
// Grow the size of new space if there is room to grow, and enough data
// has survived scavenge since the last expansion.
new_space_.Grow();
survived_since_last_expansion_ = 0;
survived_since_last_expansion_.SetValue(0);
}
}
@ -1761,8 +1761,8 @@ void Heap::Scavenge() {
array_buffer_tracker()->FreeDead(true);
// Update how much has survived scavenge.
IncrementYoungSurvivorsCounter(static_cast<int>(
(PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
IncrementYoungSurvivorsCounter(PromotedSpaceSizeOfObjects() -
survived_watermark + new_space_.Size());
LOG(isolate_, ResourceEvent("scavenge", "end"));

View File

@ -1407,22 +1407,26 @@ class Heap {
void UpdateSurvivalStatistics(int start_new_space_size);
inline void IncrementPromotedObjectsSize(int object_size) {
inline void IncrementPromotedObjectsSize(intptr_t object_size) {
DCHECK_GE(object_size, 0);
promoted_objects_size_ += object_size;
promoted_objects_size_.Increment(object_size);
}
inline intptr_t promoted_objects_size() { return promoted_objects_size_; }
inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
DCHECK_GE(object_size, 0);
semi_space_copied_object_size_ += object_size;
inline intptr_t promoted_objects_size() {
return promoted_objects_size_.Value();
}
inline void IncrementSemiSpaceCopiedObjectSize(intptr_t object_size) {
DCHECK_GE(object_size, 0);
semi_space_copied_object_size_.Increment(object_size);
}
inline intptr_t semi_space_copied_object_size() {
return semi_space_copied_object_size_;
return semi_space_copied_object_size_.Value();
}
inline intptr_t SurvivedNewSpaceObjectSize() {
return promoted_objects_size_ + semi_space_copied_object_size_;
return promoted_objects_size() + semi_space_copied_object_size();
}
inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
@ -1431,10 +1435,18 @@ class Heap {
inline void IncrementNodesPromoted() { nodes_promoted_++; }
inline void IncrementYoungSurvivorsCounter(int survived) {
DCHECK(survived >= 0);
survived_last_scavenge_ = survived;
survived_since_last_expansion_ += survived;
inline void IncrementYoungSurvivorsCounter(intptr_t survived) {
DCHECK_GE(survived, 0);
survived_last_scavenge_.SetValue(survived);
survived_since_last_expansion_.Increment(survived);
}
inline intptr_t survived_last_scavenge() {
return survived_last_scavenge_.Value();
}
inline intptr_t survived_since_last_expansion() {
return survived_since_last_expansion_.Value();
}
inline intptr_t PromotedTotalSize() {
@ -2181,10 +2193,10 @@ class Heap {
// For keeping track of how much data has survived
// scavenge since last new space expansion.
int survived_since_last_expansion_;
AtomicNumber<intptr_t> survived_since_last_expansion_;
// ... and since the last scavenge.
int survived_last_scavenge_;
AtomicNumber<intptr_t> survived_last_scavenge_;
// This is not the depth of nested AlwaysAllocateScope's but rather a single
// count, as scopes can be acquired from multiple tasks (read: threads).
@ -2282,10 +2294,10 @@ class Heap {
GCTracer* tracer_;
int high_survival_rate_period_length_;
intptr_t promoted_objects_size_;
AtomicNumber<intptr_t> promoted_objects_size_;
double promotion_ratio_;
double promotion_rate_;
intptr_t semi_space_copied_object_size_;
AtomicNumber<intptr_t> semi_space_copied_object_size_;
intptr_t previous_semi_space_copied_object_size_;
double semi_space_copied_rate_;
int nodes_died_in_new_space_;

View File

@ -3100,13 +3100,12 @@ HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
USE(ok);
DCHECK(ok);
}
heap_->IncrementPromotedObjectsSize(
static_cast<int>(new_space_visitor.promoted_size()));
heap_->IncrementPromotedObjectsSize(new_space_visitor.promoted_size());
heap_->IncrementSemiSpaceCopiedObjectSize(
static_cast<int>(new_space_visitor.semispace_copied_size()));
new_space_visitor.semispace_copied_size());
heap_->IncrementYoungSurvivorsCounter(
static_cast<int>(new_space_visitor.promoted_size()) +
static_cast<int>(new_space_visitor.semispace_copied_size()));
new_space_visitor.promoted_size() +
new_space_visitor.semispace_copied_size());
return local_pretenuring_feedback;
}