Simplify sampling rate calculation

Sampling rate is now calculated as total number of samples divided by profiling time in ms. Before the patch the sampling rate was updated once per 100ms which doesn't have any obvious advantage over the simpler method.

Also we are going to get rid of the profile node self and total time calculation in the v8 CPU profiler and only expose profiling start/end time for CpuProfile and number of ticks on each ProfileNode and let clients do all the math should they need it.

BUG=None
R=bmeurer@chromium.org, loislo@chromium.org

Review URL: https://codereview.chromium.org/21105003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@15944 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
yurys@chromium.org 2013-07-30 07:01:16 +00:00
parent e26131f792
commit 6ba502fa4d
6 changed files with 30 additions and 142 deletions

View File

@ -68,7 +68,6 @@ void ReportBuiltinEventRecord::UpdateCodeMap(CodeMap* code_map) {
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(last_code_event_id_);
return &evt->sample;

View File

@ -452,9 +452,8 @@ void CpuProfiler::StartProcessorIfNotStarted() {
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
StopProcessorIfLastProfile(title);
CpuProfile* result = profiles_->StopProfiling(title, actual_sampling_rate);
CpuProfile* result = profiles_->StopProfiling(title);
if (result != NULL) {
result->Print();
}
@ -464,10 +463,9 @@ CpuProfile* CpuProfiler::StopProfiling(const char* title) {
CpuProfile* CpuProfiler::StopProfiling(String* title) {
if (!is_profiling_) return NULL;
const double actual_sampling_rate = generator_->actual_sampling_rate();
const char* profile_title = profiles_->GetName(title);
StopProcessorIfLastProfile(profile_title);
return profiles_->StopProfiling(profile_title, actual_sampling_rate);
return profiles_->StopProfiling(profile_title);
}

View File

@ -372,19 +372,30 @@ void ProfileTree::ShortPrint() {
}
CpuProfile::CpuProfile(const char* title, unsigned uid, bool record_samples)
: title_(title),
uid_(uid),
record_samples_(record_samples),
start_time_ms_(OS::TimeCurrentMillis()),
end_time_ms_(0) {
}
void CpuProfile::AddPath(const Vector<CodeEntry*>& path) {
ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
if (record_samples_) samples_.Add(top_frame_node);
}
void CpuProfile::CalculateTotalTicks() {
void CpuProfile::CalculateTotalTicksAndSamplingRate() {
end_time_ms_ = OS::TimeCurrentMillis();
top_down_.CalculateTotalTicks();
}
void CpuProfile::SetActualSamplingRate(double actual_sampling_rate) {
top_down_.SetTickRatePerMs(actual_sampling_rate);
double duration = end_time_ms_ - start_time_ms_;
if (duration < 1) duration = 1;
unsigned ticks = top_down_.root()->total_ticks();
double rate = ticks / duration;
top_down_.SetTickRatePerMs(rate);
}
@ -529,8 +540,7 @@ bool CpuProfilesCollection::StartProfiling(const char* title, unsigned uid,
}
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
double actual_sampling_rate) {
CpuProfile* CpuProfilesCollection::StopProfiling(const char* title) {
const int title_len = StrLength(title);
CpuProfile* profile = NULL;
current_profiles_semaphore_->Wait();
@ -543,8 +553,7 @@ CpuProfile* CpuProfilesCollection::StopProfiling(const char* title,
current_profiles_semaphore_->Signal();
if (profile == NULL) return NULL;
profile->CalculateTotalTicks();
profile->SetActualSamplingRate(actual_sampling_rate);
profile->CalculateTotalTicksAndSamplingRate();
finished_profiles_.Add(profile);
return profile;
}
@ -601,29 +610,6 @@ CodeEntry* CpuProfilesCollection::NewCodeEntry(
}
void SampleRateCalculator::Tick() {
if (--wall_time_query_countdown_ == 0)
UpdateMeasurements(OS::TimeCurrentMillis());
}
void SampleRateCalculator::UpdateMeasurements(double current_time) {
if (measurements_count_++ != 0) {
const double measured_ticks_per_ms =
(kWallTimeQueryIntervalMs * ticks_per_ms_) /
(current_time - last_wall_time_);
// Update the average value.
ticks_per_ms_ +=
(measured_ticks_per_ms - ticks_per_ms_) / measurements_count_;
// Update the externally accessible result.
result_ = static_cast<AtomicWord>(ticks_per_ms_ * kResultScale);
}
last_wall_time_ = current_time;
wall_time_query_countdown_ =
static_cast<unsigned>(kWallTimeQueryIntervalMs * ticks_per_ms_);
}
const char* const ProfileGenerator::kAnonymousFunctionName =
"(anonymous function)";
const char* const ProfileGenerator::kProgramEntryName =

View File

@ -203,13 +203,11 @@ class ProfileTree {
class CpuProfile {
public:
CpuProfile(const char* title, unsigned uid, bool record_samples)
: title_(title), uid_(uid), record_samples_(record_samples) { }
CpuProfile(const char* title, unsigned uid, bool record_samples);
// Add pc -> ... -> main() call path to the profile.
void AddPath(const Vector<CodeEntry*>& path);
void CalculateTotalTicks();
void SetActualSamplingRate(double actual_sampling_rate);
void CalculateTotalTicksAndSamplingRate();
INLINE(const char* title() const) { return title_; }
INLINE(unsigned uid() const) { return uid_; }
@ -227,6 +225,8 @@ class CpuProfile {
const char* title_;
unsigned uid_;
bool record_samples_;
double start_time_ms_;
double end_time_ms_;
List<ProfileNode*> samples_;
ProfileTree top_down_;
@ -286,7 +286,7 @@ class CpuProfilesCollection {
~CpuProfilesCollection();
bool StartProfiling(const char* title, unsigned uid, bool record_samples);
CpuProfile* StopProfiling(const char* title, double actual_sampling_rate);
CpuProfile* StopProfiling(const char* title);
List<CpuProfile*>* profiles() { return &finished_profiles_; }
const char* GetName(Name* name) {
return function_and_resource_names_.GetName(name);
@ -329,44 +329,6 @@ class CpuProfilesCollection {
};
class SampleRateCalculator {
public:
SampleRateCalculator()
: result_(Logger::kSamplingIntervalMs * kResultScale),
ticks_per_ms_(Logger::kSamplingIntervalMs),
measurements_count_(0),
wall_time_query_countdown_(1) {
}
double ticks_per_ms() {
return result_ / static_cast<double>(kResultScale);
}
void Tick();
void UpdateMeasurements(double current_time);
// Instead of querying current wall time each tick,
// we use this constant to control query intervals.
static const unsigned kWallTimeQueryIntervalMs = 100;
private:
// As the result needs to be accessed from a different thread, we
// use type that guarantees atomic writes to memory. There should
// be <= 1000 ticks per second, thus storing a value of a 10 ** 5
// order should provide enough precision while keeping away from a
// potential overflow.
static const int kResultScale = 100000;
AtomicWord result_;
// All other fields are accessed only from the sampler thread.
double ticks_per_ms_;
unsigned measurements_count_;
unsigned wall_time_query_countdown_;
double last_wall_time_;
DISALLOW_COPY_AND_ASSIGN(SampleRateCalculator);
};
class ProfileGenerator {
public:
explicit ProfileGenerator(CpuProfilesCollection* profiles);
@ -375,11 +337,6 @@ class ProfileGenerator {
INLINE(CodeMap* code_map()) { return &code_map_; }
INLINE(void Tick()) { sample_rate_calc_.Tick(); }
INLINE(double actual_sampling_rate()) {
return sample_rate_calc_.ticks_per_ms();
}
static const char* const kAnonymousFunctionName;
static const char* const kProgramEntryName;
static const char* const kGarbageCollectorEntryName;
@ -395,7 +352,6 @@ class ProfileGenerator {
CodeEntry* program_entry_;
CodeEntry* gc_entry_;
CodeEntry* unresolved_entry_;
SampleRateCalculator sample_rate_calc_;
DISALLOW_COPY_AND_ASSIGN(ProfileGenerator);
};

View File

@ -222,7 +222,7 @@ TEST(TickEvents) {
processor.StopSynchronously();
processor.Join();
CpuProfile* profile = profiles->StopProfiling("", 1);
CpuProfile* profile = profiles->StopProfiling("");
CHECK_NE(NULL, profile);
// Check call trees.
@ -286,7 +286,7 @@ TEST(Issue1398) {
processor.StopSynchronously();
processor.Join();
CpuProfile* profile = profiles->StopProfiling("", 1);
CpuProfile* profile = profiles->StopProfiling("");
CHECK_NE(NULL, profile);
int actual_depth = 0;

View File

@ -41,7 +41,6 @@ using i::CpuProfilesCollection;
using i::ProfileNode;
using i::ProfileTree;
using i::ProfileGenerator;
using i::SampleRateCalculator;
using i::TickSample;
using i::Vector;
@ -485,7 +484,7 @@ TEST(RecordTickSample) {
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
CpuProfile* profile = profiles.StopProfiling("", 1);
CpuProfile* profile = profiles.StopProfiling("");
CHECK_NE(NULL, profile);
ProfileTreeTestHelper top_down_test_helper(profile->top_down());
CHECK_EQ(NULL, top_down_test_helper.Walk(entry2));
@ -505,56 +504,6 @@ TEST(RecordTickSample) {
}
TEST(SampleRateCalculator) {
const double kSamplingIntervalMs = i::Logger::kSamplingIntervalMs;
// Verify that ticking exactly in query intervals results in the
// initial sampling interval.
double time = 0.0;
SampleRateCalculator calc1;
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs;
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs;
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs;
calc1.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc1.ticks_per_ms());
SampleRateCalculator calc2;
time = 0.0;
CHECK_EQ(kSamplingIntervalMs, calc2.ticks_per_ms());
calc2.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc2.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.5;
calc2.UpdateMeasurements(time);
// (1.0 + 2.0) / 2
CHECK_EQ(kSamplingIntervalMs * 1.5, calc2.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 0.75;
calc2.UpdateMeasurements(time);
// (1.0 + 2.0 + 2.0) / 3
CHECK_EQ(kSamplingIntervalMs * 5.0, floor(calc2.ticks_per_ms() * 3.0 + 0.5));
SampleRateCalculator calc3;
time = 0.0;
CHECK_EQ(kSamplingIntervalMs, calc3.ticks_per_ms());
calc3.UpdateMeasurements(time);
CHECK_EQ(kSamplingIntervalMs, calc3.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 2;
calc3.UpdateMeasurements(time);
// (1.0 + 0.5) / 2
CHECK_EQ(kSamplingIntervalMs * 0.75, calc3.ticks_per_ms());
time += SampleRateCalculator::kWallTimeQueryIntervalMs * 1.5;
calc3.UpdateMeasurements(time);
// (1.0 + 0.5 + 0.5) / 3
CHECK_EQ(kSamplingIntervalMs * 2.0, floor(calc3.ticks_per_ms() * 3.0 + 0.5));
}
static void CheckNodeIds(ProfileNode* node, int* expectedId) {
CHECK_EQ((*expectedId)++, node->id());
for (int i = 0; i < node->children()->length(); i++) {
@ -598,7 +547,7 @@ TEST(SampleIds) {
sample3.frames_count = 2;
generator.RecordTickSample(sample3);
CpuProfile* profile = profiles.StopProfiling("", 1);
CpuProfile* profile = profiles.StopProfiling("");
int nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(7, nodeId - 1);
@ -627,7 +576,7 @@ TEST(NoSamples) {
sample1.frames_count = 1;
generator.RecordTickSample(sample1);
CpuProfile* profile = profiles.StopProfiling("", 1);
CpuProfile* profile = profiles.StopProfiling("");
int nodeId = 1;
CheckNodeIds(profile->top_down()->root(), &nodeId);
CHECK_EQ(3, nodeId - 1);