Refactor the ring buffer in GCTracer.
Now instead of saving all event details in the ring buffer, we save only the bytes and duration. This reduces the GCTracer size from 20K to 3K and simplifies code. BUG=chromium:597310 LOG=NO Review URL: https://codereview.chromium.org/1830723004 Cr-Commit-Position: refs/heads/master@{#35104}
This commit is contained in:
parent
945a2b7a86
commit
c42b2c4493
@ -45,23 +45,6 @@ GCTracer::Scope::~Scope() {
|
||||
}
|
||||
|
||||
|
||||
GCTracer::AllocationEvent::AllocationEvent(double duration,
|
||||
size_t allocation_in_bytes) {
|
||||
duration_ = duration;
|
||||
allocation_in_bytes_ = allocation_in_bytes;
|
||||
}
|
||||
|
||||
|
||||
GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
|
||||
time_ = time;
|
||||
}
|
||||
|
||||
|
||||
GCTracer::SurvivalEvent::SurvivalEvent(double promotion_ratio) {
|
||||
promotion_ratio_ = promotion_ratio;
|
||||
}
|
||||
|
||||
|
||||
GCTracer::Event::Event(Type type, const char* gc_reason,
|
||||
const char* collector_reason)
|
||||
: type(type),
|
||||
@ -202,7 +185,6 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void GCTracer::Stop(GarbageCollector collector) {
|
||||
start_counter_--;
|
||||
if (start_counter_ != 0) {
|
||||
@ -233,6 +215,7 @@ void GCTracer::Stop(GarbageCollector collector) {
|
||||
heap_->isolate()->counters()->aggregated_memory_heap_used()->AddSample(
|
||||
current_.end_time, used_memory);
|
||||
|
||||
double duration = current_.end_time - current_.start_time;
|
||||
if (current_.type == Event::SCAVENGER) {
|
||||
current_.incremental_marking_steps =
|
||||
current_.cumulative_incremental_marking_steps -
|
||||
@ -246,7 +229,10 @@ void GCTracer::Stop(GarbageCollector collector) {
|
||||
current_.pure_incremental_marking_duration =
|
||||
current_.cumulative_pure_incremental_marking_duration -
|
||||
previous_.cumulative_pure_incremental_marking_duration;
|
||||
scavenger_events_.push_front(current_);
|
||||
recorded_scavenges_total_.Push(
|
||||
MakeBytesAndDuration(current_.new_space_object_size, duration));
|
||||
recorded_scavenges_survived_.Push(MakeBytesAndDuration(
|
||||
current_.survived_new_space_object_size, duration));
|
||||
} else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
|
||||
current_.incremental_marking_steps =
|
||||
current_.cumulative_incremental_marking_steps -
|
||||
@ -265,20 +251,24 @@ void GCTracer::Stop(GarbageCollector collector) {
|
||||
previous_incremental_mark_compactor_event_
|
||||
.cumulative_pure_incremental_marking_duration;
|
||||
longest_incremental_marking_step_ = 0.0;
|
||||
incremental_mark_compactor_events_.push_front(current_);
|
||||
recorded_incremental_marking_steps_.Push(
|
||||
MakeBytesAndDuration(current_.incremental_marking_bytes,
|
||||
current_.pure_incremental_marking_duration));
|
||||
recorded_incremental_mark_compacts_.Push(
|
||||
MakeBytesAndDuration(current_.start_object_size, duration));
|
||||
combined_mark_compact_speed_cache_ = 0.0;
|
||||
} else {
|
||||
DCHECK(current_.incremental_marking_bytes == 0);
|
||||
DCHECK(current_.incremental_marking_duration == 0);
|
||||
DCHECK(current_.pure_incremental_marking_duration == 0);
|
||||
longest_incremental_marking_step_ = 0.0;
|
||||
mark_compactor_events_.push_front(current_);
|
||||
recorded_mark_compacts_.Push(
|
||||
MakeBytesAndDuration(current_.start_object_size, duration));
|
||||
combined_mark_compact_speed_cache_ = 0.0;
|
||||
}
|
||||
|
||||
// TODO(ernstm): move the code below out of GCTracer.
|
||||
|
||||
double duration = current_.end_time - current_.start_time;
|
||||
double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
|
||||
|
||||
heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
|
||||
@ -336,12 +326,12 @@ void GCTracer::SampleAllocation(double current_ms,
|
||||
void GCTracer::AddAllocation(double current_ms) {
|
||||
allocation_time_ms_ = current_ms;
|
||||
if (allocation_duration_since_gc_ > 0) {
|
||||
new_space_allocation_events_.push_front(
|
||||
AllocationEvent(allocation_duration_since_gc_,
|
||||
new_space_allocation_in_bytes_since_gc_));
|
||||
old_generation_allocation_events_.push_front(
|
||||
AllocationEvent(allocation_duration_since_gc_,
|
||||
old_generation_allocation_in_bytes_since_gc_));
|
||||
recorded_new_generation_allocations_.Push(
|
||||
MakeBytesAndDuration(new_space_allocation_in_bytes_since_gc_,
|
||||
allocation_duration_since_gc_));
|
||||
recorded_old_generation_allocations_.Push(
|
||||
MakeBytesAndDuration(old_generation_allocation_in_bytes_since_gc_,
|
||||
allocation_duration_since_gc_));
|
||||
}
|
||||
allocation_duration_since_gc_ = 0;
|
||||
new_space_allocation_in_bytes_since_gc_ = 0;
|
||||
@ -350,19 +340,19 @@ void GCTracer::AddAllocation(double current_ms) {
|
||||
|
||||
|
||||
void GCTracer::AddContextDisposalTime(double time) {
|
||||
context_disposal_events_.push_front(ContextDisposalEvent(time));
|
||||
recorded_context_disposal_times_.Push(time);
|
||||
}
|
||||
|
||||
|
||||
void GCTracer::AddCompactionEvent(double duration,
|
||||
intptr_t live_bytes_compacted) {
|
||||
compaction_events_.push_front(
|
||||
CompactionEvent(duration, live_bytes_compacted));
|
||||
recorded_compactions_.Push(
|
||||
MakeBytesAndDuration(live_bytes_compacted, duration));
|
||||
}
|
||||
|
||||
|
||||
void GCTracer::AddSurvivalRatio(double promotion_ratio) {
|
||||
survival_events_.push_front(SurvivalEvent(promotion_ratio));
|
||||
recorded_survival_ratios_.Push(promotion_ratio);
|
||||
}
|
||||
|
||||
|
||||
@ -669,128 +659,62 @@ void GCTracer::PrintNVP() const {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
double GCTracer::MeanDuration(const EventBuffer& events) const {
|
||||
if (events.empty()) return 0.0;
|
||||
|
||||
double mean = 0.0;
|
||||
EventBuffer::const_iterator iter = events.begin();
|
||||
while (iter != events.end()) {
|
||||
mean += iter->end_time - iter->start_time;
|
||||
++iter;
|
||||
}
|
||||
|
||||
return mean / events.size();
|
||||
int GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
|
||||
const BytesAndDuration& initial, double time_ms) {
|
||||
BytesAndDuration sum = buffer.Sum(
|
||||
[time_ms](BytesAndDuration a, BytesAndDuration b) {
|
||||
if (time_ms != 0 && a.second >= time_ms) return a;
|
||||
return std::make_pair(a.first + b.first, a.second + b.second);
|
||||
},
|
||||
initial);
|
||||
uint64_t bytes = sum.first;
|
||||
double durations = sum.second;
|
||||
if (durations == 0.0) return 0;
|
||||
double speed = bytes / durations + 0.5;
|
||||
const int max_speed = 1024 * MB;
|
||||
const int min_speed = 1;
|
||||
if (speed >= max_speed) return max_speed;
|
||||
if (speed <= min_speed) return min_speed;
|
||||
return static_cast<int>(speed);
|
||||
}
|
||||
|
||||
|
||||
double GCTracer::MaxDuration(const EventBuffer& events) const {
|
||||
if (events.empty()) return 0.0;
|
||||
|
||||
double maximum = 0.0f;
|
||||
EventBuffer::const_iterator iter = events.begin();
|
||||
while (iter != events.end()) {
|
||||
maximum = Max(iter->end_time - iter->start_time, maximum);
|
||||
++iter;
|
||||
}
|
||||
|
||||
return maximum;
|
||||
int GCTracer::AverageSpeed(const RingBuffer<BytesAndDuration>& buffer) {
|
||||
return AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 0);
|
||||
}
|
||||
|
||||
|
||||
intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
|
||||
if (cumulative_incremental_marking_duration_ == 0.0) return 0;
|
||||
|
||||
// We haven't completed an entire round of incremental marking, yet.
|
||||
// Use data from GCTracer instead of data from event buffers.
|
||||
if (incremental_mark_compactor_events_.empty()) {
|
||||
if (recorded_incremental_marking_steps_.Count() == 0) {
|
||||
return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
|
||||
cumulative_pure_incremental_marking_duration_);
|
||||
}
|
||||
|
||||
intptr_t bytes = 0;
|
||||
double durations = 0.0;
|
||||
EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
|
||||
while (iter != incremental_mark_compactor_events_.end()) {
|
||||
bytes += iter->incremental_marking_bytes;
|
||||
durations += iter->pure_incremental_marking_duration;
|
||||
++iter;
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
// Make sure the result is at least 1.
|
||||
return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
|
||||
return AverageSpeed(recorded_incremental_marking_steps_);
|
||||
}
|
||||
|
||||
|
||||
intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond(
|
||||
ScavengeSpeedMode mode) const {
|
||||
intptr_t bytes = 0;
|
||||
double durations = 0.0;
|
||||
EventBuffer::const_iterator iter = scavenger_events_.begin();
|
||||
while (iter != scavenger_events_.end()) {
|
||||
bytes += mode == kForAllObjects ? iter->new_space_object_size
|
||||
: iter->survived_new_space_object_size;
|
||||
durations += iter->end_time - iter->start_time;
|
||||
++iter;
|
||||
if (mode == kForAllObjects) {
|
||||
return AverageSpeed(recorded_scavenges_total_);
|
||||
} else {
|
||||
return AverageSpeed(recorded_scavenges_survived_);
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
// Make sure the result is at least 1.
|
||||
return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
|
||||
}
|
||||
|
||||
|
||||
intptr_t GCTracer::CompactionSpeedInBytesPerMillisecond() const {
|
||||
if (compaction_events_.size() == 0) return 0;
|
||||
intptr_t bytes = 0;
|
||||
double durations = 0.0;
|
||||
CompactionEventBuffer::const_iterator iter = compaction_events_.begin();
|
||||
while (iter != compaction_events_.end()) {
|
||||
bytes += iter->live_bytes_compacted;
|
||||
durations += iter->duration;
|
||||
++iter;
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
// Make sure the result is at least 1.
|
||||
return Max<intptr_t>(static_cast<intptr_t>(bytes / durations + 0.5), 1);
|
||||
return AverageSpeed(recorded_compactions_);
|
||||
}
|
||||
|
||||
|
||||
intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
|
||||
intptr_t bytes = 0;
|
||||
double durations = 0.0;
|
||||
EventBuffer::const_iterator iter = mark_compactor_events_.begin();
|
||||
while (iter != mark_compactor_events_.end()) {
|
||||
bytes += iter->start_object_size;
|
||||
durations += iter->end_time - iter->start_time;
|
||||
++iter;
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
// Make sure the result is at least 1.
|
||||
return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
|
||||
return AverageSpeed(recorded_mark_compacts_);
|
||||
}
|
||||
|
||||
|
||||
intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
|
||||
const {
|
||||
intptr_t bytes = 0;
|
||||
double durations = 0.0;
|
||||
EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
|
||||
while (iter != incremental_mark_compactor_events_.end()) {
|
||||
bytes += iter->start_object_size;
|
||||
durations += iter->end_time - iter->start_time;
|
||||
++iter;
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
// Make sure the result is at least 1.
|
||||
return Max<size_t>(static_cast<size_t>(bytes / durations + 0.5), 1);
|
||||
return AverageSpeed(recorded_incremental_mark_compacts_);
|
||||
}
|
||||
|
||||
|
||||
double GCTracer::CombinedMarkCompactSpeedInBytesPerMillisecond() {
|
||||
if (combined_mark_compact_speed_cache_ > 0)
|
||||
return combined_mark_compact_speed_cache_;
|
||||
@ -816,39 +740,16 @@ double GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms) const {
|
||||
size_t bytes = new_space_allocation_in_bytes_since_gc_;
|
||||
double durations = allocation_duration_since_gc_;
|
||||
AllocationEventBuffer::const_iterator iter =
|
||||
new_space_allocation_events_.begin();
|
||||
const size_t max_bytes = static_cast<size_t>(-1);
|
||||
while (iter != new_space_allocation_events_.end() &&
|
||||
bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
|
||||
bytes += iter->allocation_in_bytes_;
|
||||
durations += iter->duration_;
|
||||
++iter;
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
|
||||
// Make sure the result is at least 1.
|
||||
return Max<double>(bytes / durations, 1);
|
||||
return AverageSpeed(recorded_new_generation_allocations_,
|
||||
MakeBytesAndDuration(bytes, durations), time_ms);
|
||||
}
|
||||
|
||||
double GCTracer::OldGenerationAllocationThroughputInBytesPerMillisecond(
|
||||
double time_ms) const {
|
||||
size_t bytes = old_generation_allocation_in_bytes_since_gc_;
|
||||
double durations = allocation_duration_since_gc_;
|
||||
AllocationEventBuffer::const_iterator iter =
|
||||
old_generation_allocation_events_.begin();
|
||||
const size_t max_bytes = static_cast<size_t>(-1);
|
||||
while (iter != old_generation_allocation_events_.end() &&
|
||||
bytes < max_bytes - bytes && (time_ms == 0 || durations < time_ms)) {
|
||||
bytes += iter->allocation_in_bytes_;
|
||||
durations += iter->duration_;
|
||||
++iter;
|
||||
}
|
||||
|
||||
if (durations == 0.0) return 0;
|
||||
// Make sure the result is at least 1.
|
||||
return Max<double>(bytes / durations, 1);
|
||||
return AverageSpeed(recorded_old_generation_allocations_,
|
||||
MakeBytesAndDuration(bytes, durations), time_ms);
|
||||
}
|
||||
|
||||
double GCTracer::AllocationThroughputInBytesPerMillisecond(
|
||||
@ -869,42 +770,27 @@ size_t GCTracer::CurrentOldGenerationAllocationThroughputInBytesPerMillisecond()
|
||||
kThroughputTimeFrameMs);
|
||||
}
|
||||
|
||||
|
||||
double GCTracer::ContextDisposalRateInMilliseconds() const {
|
||||
if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
|
||||
|
||||
if (recorded_context_disposal_times_.Count() <
|
||||
recorded_context_disposal_times_.kSize)
|
||||
return 0.0;
|
||||
double begin = heap_->MonotonicallyIncreasingTimeInMs();
|
||||
double end = 0.0;
|
||||
ContextDisposalEventBuffer::const_iterator iter =
|
||||
context_disposal_events_.begin();
|
||||
while (iter != context_disposal_events_.end()) {
|
||||
end = iter->time_;
|
||||
++iter;
|
||||
}
|
||||
|
||||
return (begin - end) / context_disposal_events_.size();
|
||||
double end = recorded_context_disposal_times_.Sum(
|
||||
[](double a, double b) { return b; }, 0.0);
|
||||
return (begin - end) / recorded_context_disposal_times_.Count();
|
||||
}
|
||||
|
||||
|
||||
double GCTracer::AverageSurvivalRatio() const {
|
||||
if (survival_events_.size() == 0) return 0.0;
|
||||
|
||||
double sum_of_rates = 0.0;
|
||||
SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
|
||||
while (iter != survival_events_.end()) {
|
||||
sum_of_rates += iter->promotion_ratio_;
|
||||
++iter;
|
||||
}
|
||||
|
||||
return sum_of_rates / static_cast<double>(survival_events_.size());
|
||||
if (recorded_survival_ratios_.Count() == 0) return 0.0;
|
||||
double sum = recorded_survival_ratios_.Sum(
|
||||
[](double a, double b) { return a + b; }, 0.0);
|
||||
return sum / recorded_survival_ratios_.Count();
|
||||
}
|
||||
|
||||
|
||||
bool GCTracer::SurvivalEventsRecorded() const {
|
||||
return survival_events_.size() > 0;
|
||||
return recorded_survival_ratios_.Count() > 0;
|
||||
}
|
||||
|
||||
|
||||
void GCTracer::ResetSurvivalEvents() { survival_events_.reset(); }
|
||||
void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); }
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -12,80 +12,49 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
// A simple ring buffer class with maximum size known at compile time.
|
||||
// The class only implements the functionality required in GCTracer.
|
||||
template <typename T, size_t MAX_SIZE>
|
||||
template <typename T>
|
||||
class RingBuffer {
|
||||
public:
|
||||
class const_iterator {
|
||||
public:
|
||||
const_iterator() : index_(0), elements_(NULL) {}
|
||||
|
||||
const_iterator(size_t index, const T* elements)
|
||||
: index_(index), elements_(elements) {}
|
||||
|
||||
bool operator==(const const_iterator& rhs) const {
|
||||
return elements_ == rhs.elements_ && index_ == rhs.index_;
|
||||
RingBuffer() { Reset(); }
|
||||
static const int kSize = 10;
|
||||
void Push(const T& value) {
|
||||
if (count_ == kSize) {
|
||||
elements_[start_++] = value;
|
||||
if (start_ == kSize) start_ = 0;
|
||||
} else {
|
||||
DCHECK_EQ(start_, 0);
|
||||
elements_[count_++] = value;
|
||||
}
|
||||
|
||||
bool operator!=(const const_iterator& rhs) const {
|
||||
return elements_ != rhs.elements_ || index_ != rhs.index_;
|
||||
}
|
||||
|
||||
operator const T*() const { return elements_ + index_; }
|
||||
|
||||
const T* operator->() const { return elements_ + index_; }
|
||||
|
||||
const T& operator*() const { return elements_[index_]; }
|
||||
|
||||
const_iterator& operator++() {
|
||||
index_ = (index_ + 1) % (MAX_SIZE + 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
const_iterator& operator--() {
|
||||
index_ = (index_ + MAX_SIZE) % (MAX_SIZE + 1);
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
size_t index_;
|
||||
const T* elements_;
|
||||
};
|
||||
|
||||
RingBuffer() : begin_(0), end_(0) {}
|
||||
|
||||
bool empty() const { return begin_ == end_; }
|
||||
size_t size() const {
|
||||
return (end_ - begin_ + MAX_SIZE + 1) % (MAX_SIZE + 1);
|
||||
}
|
||||
const_iterator begin() const { return const_iterator(begin_, elements_); }
|
||||
const_iterator end() const { return const_iterator(end_, elements_); }
|
||||
const_iterator back() const { return --end(); }
|
||||
void push_back(const T& element) {
|
||||
elements_[end_] = element;
|
||||
end_ = (end_ + 1) % (MAX_SIZE + 1);
|
||||
if (end_ == begin_) begin_ = (begin_ + 1) % (MAX_SIZE + 1);
|
||||
}
|
||||
void push_front(const T& element) {
|
||||
begin_ = (begin_ + MAX_SIZE) % (MAX_SIZE + 1);
|
||||
if (begin_ == end_) end_ = (end_ + MAX_SIZE) % (MAX_SIZE + 1);
|
||||
elements_[begin_] = element;
|
||||
}
|
||||
|
||||
void reset() {
|
||||
begin_ = 0;
|
||||
end_ = 0;
|
||||
int Count() const { return count_; }
|
||||
|
||||
template <typename Callback>
|
||||
T Sum(Callback callback, const T& initial) const {
|
||||
int j = start_ + count_ - 1;
|
||||
if (j >= kSize) j -= kSize;
|
||||
T result = initial;
|
||||
for (int i = 0; i < count_; i++) {
|
||||
result = callback(result, elements_[j]);
|
||||
if (--j == -1) j += kSize;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Reset() { start_ = count_ = 0; }
|
||||
|
||||
private:
|
||||
T elements_[MAX_SIZE + 1];
|
||||
size_t begin_;
|
||||
size_t end_;
|
||||
|
||||
T elements_[kSize];
|
||||
int start_;
|
||||
int count_;
|
||||
DISALLOW_COPY_AND_ASSIGN(RingBuffer);
|
||||
};
|
||||
|
||||
typedef std::pair<uint64_t, double> BytesAndDuration;
|
||||
|
||||
inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
|
||||
return std::make_pair(bytes, duration);
|
||||
}
|
||||
|
||||
enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
|
||||
|
||||
@ -158,58 +127,6 @@ class GCTracer {
|
||||
};
|
||||
|
||||
|
||||
class AllocationEvent {
|
||||
public:
|
||||
// Default constructor leaves the event uninitialized.
|
||||
AllocationEvent() {}
|
||||
|
||||
AllocationEvent(double duration, size_t allocation_in_bytes);
|
||||
|
||||
// Time spent in the mutator during the end of the last sample to the
|
||||
// beginning of the next sample.
|
||||
double duration_;
|
||||
|
||||
// Memory allocated in the new space during the end of the last sample
|
||||
// to the beginning of the next sample
|
||||
size_t allocation_in_bytes_;
|
||||
};
|
||||
|
||||
|
||||
class CompactionEvent {
|
||||
public:
|
||||
CompactionEvent() : duration(0), live_bytes_compacted(0) {}
|
||||
|
||||
CompactionEvent(double duration, intptr_t live_bytes_compacted)
|
||||
: duration(duration), live_bytes_compacted(live_bytes_compacted) {}
|
||||
|
||||
double duration;
|
||||
intptr_t live_bytes_compacted;
|
||||
};
|
||||
|
||||
|
||||
class ContextDisposalEvent {
|
||||
public:
|
||||
// Default constructor leaves the event uninitialized.
|
||||
ContextDisposalEvent() {}
|
||||
|
||||
explicit ContextDisposalEvent(double time);
|
||||
|
||||
// Time when context disposal event happened.
|
||||
double time_;
|
||||
};
|
||||
|
||||
|
||||
class SurvivalEvent {
|
||||
public:
|
||||
// Default constructor leaves the event uninitialized.
|
||||
SurvivalEvent() {}
|
||||
|
||||
explicit SurvivalEvent(double survival_ratio);
|
||||
|
||||
double promotion_ratio_;
|
||||
};
|
||||
|
||||
|
||||
class Event {
|
||||
public:
|
||||
enum Type {
|
||||
@ -314,19 +231,6 @@ class GCTracer {
|
||||
double scopes[Scope::NUMBER_OF_SCOPES];
|
||||
};
|
||||
|
||||
static const size_t kRingBufferMaxSize = 10;
|
||||
|
||||
typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
|
||||
|
||||
typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
|
||||
|
||||
typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
|
||||
ContextDisposalEventBuffer;
|
||||
|
||||
typedef RingBuffer<CompactionEvent, kRingBufferMaxSize> CompactionEventBuffer;
|
||||
|
||||
typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
|
||||
|
||||
static const int kThroughputTimeFrameMs = 5000;
|
||||
|
||||
explicit GCTracer(Heap* heap);
|
||||
@ -445,6 +349,13 @@ class GCTracer {
|
||||
// Discard all recorded survival events.
|
||||
void ResetSurvivalEvents();
|
||||
|
||||
// Returns the average speed of the events in the buffer.
|
||||
// If the buffer is empty, the result is 0.
|
||||
// Otherwise, the result is between 1 byte/ms and 1 GB/ms.
|
||||
static int AverageSpeed(const RingBuffer<BytesAndDuration>& buffer);
|
||||
static int AverageSpeed(const RingBuffer<BytesAndDuration>& buffer,
|
||||
const BytesAndDuration& initial, double time_ms);
|
||||
|
||||
private:
|
||||
// Print one detailed trace line in name=value format.
|
||||
// TODO(ernstm): Move to Heap.
|
||||
@ -458,12 +369,6 @@ class GCTracer {
|
||||
// it can be included in later crash dumps.
|
||||
void Output(const char* format, ...) const;
|
||||
|
||||
// Compute the mean duration of the events in the given ring buffer.
|
||||
double MeanDuration(const EventBuffer& events) const;
|
||||
|
||||
// Compute the max duration of the events in the given ring buffer.
|
||||
double MaxDuration(const EventBuffer& events) const;
|
||||
|
||||
void ClearMarkCompactStatistics() {
|
||||
cumulative_incremental_marking_steps_ = 0;
|
||||
cumulative_incremental_marking_bytes_ = 0;
|
||||
@ -500,28 +405,6 @@ class GCTracer {
|
||||
// Previous INCREMENTAL_MARK_COMPACTOR event.
|
||||
Event previous_incremental_mark_compactor_event_;
|
||||
|
||||
// RingBuffers for SCAVENGER events.
|
||||
EventBuffer scavenger_events_;
|
||||
|
||||
// RingBuffers for MARK_COMPACTOR events.
|
||||
EventBuffer mark_compactor_events_;
|
||||
|
||||
// RingBuffers for INCREMENTAL_MARK_COMPACTOR events.
|
||||
EventBuffer incremental_mark_compactor_events_;
|
||||
|
||||
// RingBuffer for allocation events.
|
||||
AllocationEventBuffer new_space_allocation_events_;
|
||||
AllocationEventBuffer old_generation_allocation_events_;
|
||||
|
||||
// RingBuffer for context disposal events.
|
||||
ContextDisposalEventBuffer context_disposal_events_;
|
||||
|
||||
// RingBuffer for compaction events.
|
||||
CompactionEventBuffer compaction_events_;
|
||||
|
||||
// RingBuffer for survival events.
|
||||
SurvivalEventBuffer survival_events_;
|
||||
|
||||
// Cumulative number of incremental marking steps since creation of tracer.
|
||||
int cumulative_incremental_marking_steps_;
|
||||
|
||||
@ -581,6 +464,17 @@ class GCTracer {
|
||||
// Separate timer used for --runtime_call_stats
|
||||
RuntimeCallTimer timer_;
|
||||
|
||||
RingBuffer<BytesAndDuration> recorded_incremental_marking_steps_;
|
||||
RingBuffer<BytesAndDuration> recorded_scavenges_total_;
|
||||
RingBuffer<BytesAndDuration> recorded_scavenges_survived_;
|
||||
RingBuffer<BytesAndDuration> recorded_compactions_;
|
||||
RingBuffer<BytesAndDuration> recorded_mark_compacts_;
|
||||
RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
|
||||
RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
|
||||
RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
|
||||
RingBuffer<double> recorded_context_disposal_times_;
|
||||
RingBuffer<double> recorded_survival_ratios_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(GCTracer);
|
||||
};
|
||||
} // namespace internal
|
||||
|
@ -143,7 +143,6 @@
|
||||
'test-fixed-dtoa.cc',
|
||||
'test-flags.cc',
|
||||
'test-func-name-inference.cc',
|
||||
'test-gc-tracer.cc',
|
||||
'test-global-handles.cc',
|
||||
'test-global-object.cc',
|
||||
'test-hashing.cc',
|
||||
|
@ -1,124 +0,0 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <utility>
|
||||
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "test/cctest/cctest.h"
|
||||
|
||||
using namespace v8::internal;
|
||||
|
||||
TEST(RingBufferPartialFill) {
|
||||
const int max_size = 6;
|
||||
typedef RingBuffer<int, max_size>::const_iterator Iter;
|
||||
RingBuffer<int, max_size> ring_buffer;
|
||||
CHECK(ring_buffer.empty());
|
||||
CHECK_EQ(static_cast<int>(ring_buffer.size()), 0);
|
||||
CHECK(ring_buffer.begin() == ring_buffer.end());
|
||||
|
||||
// Fill ring_buffer partially: [0, 1, 2]
|
||||
for (int i = 0; i < max_size / 2; i++) ring_buffer.push_back(i);
|
||||
|
||||
CHECK(!ring_buffer.empty());
|
||||
CHECK(static_cast<int>(ring_buffer.size()) == max_size / 2);
|
||||
CHECK(ring_buffer.begin() != ring_buffer.end());
|
||||
|
||||
// Test forward itartion
|
||||
int i = 0;
|
||||
for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
|
||||
CHECK(*iter == i);
|
||||
++i;
|
||||
}
|
||||
CHECK_EQ(i, 3); // one past last element.
|
||||
|
||||
// Test backward iteration
|
||||
i = 2;
|
||||
Iter iter = ring_buffer.back();
|
||||
while (true) {
|
||||
CHECK(*iter == i);
|
||||
if (iter == ring_buffer.begin()) break;
|
||||
--iter;
|
||||
--i;
|
||||
}
|
||||
CHECK_EQ(i, 0);
|
||||
}
|
||||
|
||||
|
||||
TEST(RingBufferWrapAround) {
|
||||
const int max_size = 6;
|
||||
typedef RingBuffer<int, max_size>::const_iterator Iter;
|
||||
RingBuffer<int, max_size> ring_buffer;
|
||||
|
||||
// Fill ring_buffer (wrap around): [9, 10, 11, 12, 13, 14]
|
||||
for (int i = 0; i < 2 * max_size + 3; i++) ring_buffer.push_back(i);
|
||||
|
||||
CHECK(!ring_buffer.empty());
|
||||
CHECK(static_cast<int>(ring_buffer.size()) == max_size);
|
||||
CHECK(ring_buffer.begin() != ring_buffer.end());
|
||||
|
||||
// Test forward iteration
|
||||
int i = 9;
|
||||
for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
|
||||
CHECK(*iter == i);
|
||||
++i;
|
||||
}
|
||||
CHECK_EQ(i, 15); // one past last element.
|
||||
|
||||
// Test backward iteration
|
||||
i = 14;
|
||||
Iter iter = ring_buffer.back();
|
||||
while (true) {
|
||||
CHECK(*iter == i);
|
||||
if (iter == ring_buffer.begin()) break;
|
||||
--iter;
|
||||
--i;
|
||||
}
|
||||
CHECK_EQ(i, 9);
|
||||
}
|
||||
|
||||
|
||||
TEST(RingBufferPushFront) {
|
||||
const int max_size = 6;
|
||||
typedef RingBuffer<int, max_size>::const_iterator Iter;
|
||||
RingBuffer<int, max_size> ring_buffer;
|
||||
|
||||
// Fill ring_buffer (wrap around): [14, 13, 12, 11, 10, 9]
|
||||
for (int i = 0; i < 2 * max_size + 3; i++) ring_buffer.push_front(i);
|
||||
|
||||
CHECK(!ring_buffer.empty());
|
||||
CHECK(static_cast<int>(ring_buffer.size()) == max_size);
|
||||
CHECK(ring_buffer.begin() != ring_buffer.end());
|
||||
|
||||
// Test forward iteration
|
||||
int i = 14;
|
||||
for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
|
||||
CHECK(*iter == i);
|
||||
--i;
|
||||
}
|
||||
CHECK_EQ(i, 8); // one past last element.
|
||||
}
|
49
test/unittests/heap/gc-tracer-unittest.cc
Normal file
49
test/unittests/heap/gc-tracer-unittest.cc
Normal file
@ -0,0 +1,49 @@
|
||||
// Copyright 2014 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include <cmath>
|
||||
#include <limits>
|
||||
|
||||
#include "src/heap/gc-tracer.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
TEST(GCTracer, AverageSpeed) {
|
||||
RingBuffer<BytesAndDuration> buffer;
|
||||
EXPECT_EQ(100 / 2,
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 0));
|
||||
buffer.Push(MakeBytesAndDuration(100, 8));
|
||||
EXPECT_EQ(100 / 2,
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 2));
|
||||
EXPECT_EQ(200 / 10,
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(100, 2), 3));
|
||||
const int max_speed = 1024 * MB;
|
||||
buffer.Reset();
|
||||
buffer.Push(MakeBytesAndDuration(max_speed, 0.5));
|
||||
EXPECT_EQ(max_speed,
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 1));
|
||||
const int min_speed = 1;
|
||||
buffer.Reset();
|
||||
buffer.Push(MakeBytesAndDuration(1, 10000));
|
||||
EXPECT_EQ(min_speed,
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), 1));
|
||||
buffer.Reset();
|
||||
int sum = 0;
|
||||
for (int i = 0; i < buffer.kSize; i++) {
|
||||
sum += i + 1;
|
||||
buffer.Push(MakeBytesAndDuration(i + 1, 1));
|
||||
}
|
||||
EXPECT_EQ(
|
||||
static_cast<int>(sum * 1.0 / buffer.kSize + 0.5),
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
|
||||
buffer.Push(MakeBytesAndDuration(100, 1));
|
||||
EXPECT_EQ(
|
||||
static_cast<int>((sum * 1.0 - 1 + 100) / buffer.kSize + 0.5),
|
||||
GCTracer::AverageSpeed(buffer, MakeBytesAndDuration(0, 0), buffer.kSize));
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace v8
|
@ -107,6 +107,7 @@
|
||||
'libplatform/worker-thread-unittest.cc',
|
||||
'heap/bitmap-unittest.cc',
|
||||
'heap/gc-idle-time-handler-unittest.cc',
|
||||
'heap/gc-tracer-unittest.cc',
|
||||
'heap/memory-reducer-unittest.cc',
|
||||
'heap/heap-unittest.cc',
|
||||
'heap/scavenge-job-unittest.cc',
|
||||
|
Loading…
Reference in New Issue
Block a user