Add V8.GC.ParallelTaskLatencyMicroSeconds metric.

It will record the time-to-schedule-after-job-start for different
task types to try to highlight use cases where contention might
be a problem (and show improvements to it later).

Also introducing AsyncTimedHistogram to support this use case whose
reported timings go beyond a single scope (i.e. the async version of
ScopedTimedHistogram).

Bug: chromium:807606
Change-Id: Ib4d581fa8b001723dfe8c91102280e9608b4fabb
Reviewed-on: https://chromium-review.googlesource.com/899365
Reviewed-by: Hannes Payer <hpayer@chromium.org>
Reviewed-by: Michael Lippautz <mlippautz@chromium.org>
Commit-Queue: Gabriel Charette <gab@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51131}
This commit is contained in:
Gabriel Charette 2018-02-06 14:51:16 +01:00 committed by Commit Bot
parent f7758dfa02
commit 85b8daed63
6 changed files with 120 additions and 23 deletions

View File

@ -84,7 +84,6 @@ void TimedHistogram::Start(base::ElapsedTimer* timer, Isolate* isolate) {
void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
if (Enabled()) {
// Compute the delta between start and stop, in microseconds.
int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
? timer->Elapsed().InMicroseconds()
: timer->Elapsed().InMilliseconds();
@ -96,6 +95,21 @@ void TimedHistogram::Stop(base::ElapsedTimer* timer, Isolate* isolate) {
}
}
void TimedHistogram::RecordAbandon(base::ElapsedTimer* timer,
Isolate* isolate) {
if (Enabled()) {
DCHECK(timer->IsStarted());
timer->Stop();
int64_t sample = resolution_ == HistogramTimerResolution::MICROSECOND
? base::TimeDelta::Max().InMicroseconds()
: base::TimeDelta::Max().InMilliseconds();
AddSample(static_cast<int>(sample));
}
if (isolate != nullptr) {
Logger::CallEventLogger(isolate, name(), Logger::END, true);
}
}
Counters::Counters(Isolate* isolate)
: isolate_(isolate),
stats_table_(this),

View File

@ -220,6 +220,12 @@ class Histogram {
int max() const { return max_; }
int num_buckets() const { return num_buckets_; }
// Asserts that |expected_counters| are the same as the Counters this
// Histogram reports to.
void AssertReportsToCounters(Counters* expected_counters) {
DCHECK_EQ(counters_, expected_counters);
}
protected:
Histogram() {}
Histogram(const char* name, int min, int max, int num_buckets,
@ -229,7 +235,9 @@ class Histogram {
max_(max),
num_buckets_(num_buckets),
histogram_(nullptr),
counters_(counters) {}
counters_(counters) {
DCHECK(counters_);
}
Counters* counters() const { return counters_; }
@ -253,7 +261,7 @@ enum class HistogramTimerResolution { MILLISECOND, MICROSECOND };
// A thread safe histogram timer. It also allows distributions of
// nested timed results.
class TimedHistogram : public Histogram {
class V8_EXPORT_PRIVATE TimedHistogram : public Histogram {
public:
// Start the timer. Log if isolate non-null.
void Start(base::ElapsedTimer* timer, Isolate* isolate);
@ -261,6 +269,10 @@ class TimedHistogram : public Histogram {
// Stop the timer and record the results. Log if isolate non-null.
void Stop(base::ElapsedTimer* timer, Isolate* isolate);
// Records a TimeDelta::Max() result. Useful to record percentage of tasks
// that never got to run in a given scenario. Log if isolate non-null.
void RecordAbandon(base::ElapsedTimer* timer, Isolate* isolate);
protected:
friend class Counters;
HistogramTimerResolution resolution_;
@ -282,6 +294,7 @@ class TimedHistogramScope {
: histogram_(histogram), isolate_(isolate) {
histogram_->Start(&timer_, isolate);
}
~TimedHistogramScope() { histogram_->Stop(&timer_, isolate_); }
private:
@ -292,6 +305,42 @@ class TimedHistogramScope {
DISALLOW_IMPLICIT_CONSTRUCTORS(TimedHistogramScope);
};
// Helper class for recording a TimedHistogram asynchronously with manual
// controls (it will not generate a report if destroyed without explicitly
// triggering a report). |async_counters| should be a shared_ptr to
// |histogram->counters()|, making it is safe to report to an
// AsyncTimedHistogram after the associated isolate has been destroyed.
// AsyncTimedHistogram can be moved/copied to avoid computing Now() multiple
// times when the times of multiple tasks are identical; each copy will generate
// its own report.
class V8_EXPORT_PRIVATE AsyncTimedHistogram {
public:
explicit AsyncTimedHistogram(TimedHistogram* histogram,
std::shared_ptr<Counters> async_counters)
: histogram_(histogram), async_counters_(std::move(async_counters)) {
histogram_->AssertReportsToCounters(async_counters_.get());
histogram_->Start(&timer_, nullptr);
}
~AsyncTimedHistogram() = default;
AsyncTimedHistogram(const AsyncTimedHistogram& other) = default;
AsyncTimedHistogram& operator=(const AsyncTimedHistogram& other) = default;
AsyncTimedHistogram(AsyncTimedHistogram&& other) = default;
AsyncTimedHistogram& operator=(AsyncTimedHistogram&& other) = default;
// Records the time elapsed to |histogram_| and stops |timer_|.
void RecordDone() { histogram_->Stop(&timer_, nullptr); }
// Records TimeDelta::Max() to |histogram_| and stops |timer_|.
void RecordAbandon() { histogram_->RecordAbandon(&timer_, nullptr); }
private:
base::ElapsedTimer timer_;
TimedHistogram* histogram_;
std::shared_ptr<Counters> async_counters_;
};
// Helper class for scoping a TimedHistogram, where the histogram is selected at
// stop time rather than start time.
// TODO(leszeks): This is heavily reliant on TimedHistogram::Start() doing
@ -1168,7 +1217,9 @@ class RuntimeCallTimerScope {
HT(compile_script_no_cache_because_cache_too_cold, \
V8.CompileScriptMicroSeconds.NoCache.CacheTooCold, 1000000, MICROSECOND) \
HT(compile_script_on_background, \
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND)
V8.CompileScriptMicroSeconds.BackgroundThread, 1000000, MICROSECOND) \
HT(gc_parallel_task_latency, V8.GC.ParallelTaskLatencyMicroSeconds, 1000000, \
MICROSECOND)
#define AGGREGATABLE_HISTOGRAM_TIMER_LIST(AHT) \
AHT(compile_lazy, V8.CompileLazyMicroSeconds)

View File

@ -2100,7 +2100,7 @@ void Heap::Scavenge() {
{
// Parallel phase scavenging all copied and promoted objects.
TRACE_GC(tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
job.Run();
job.Run(isolate()->async_counters());
DCHECK(copied_list.IsGlobalEmpty());
DCHECK(promotion_list.IsGlobalEmpty());
}

View File

@ -8,8 +8,10 @@
#include <vector>
#include "src/base/macros.h"
#include "src/base/optional.h"
#include "src/base/platform/semaphore.h"
#include "src/cancelable-task.h"
#include "src/counters.h"
#include "src/utils.h"
#include "src/v8.h"
@ -27,6 +29,9 @@ class Isolate;
//
// Items need to be marked as finished after processing them. Task and Item
// ownership is transferred to the job.
//
// Each parallel (non-main thread) task will report the time between the job
// being created and it being scheduled to |gc_parallel_task_latency_histogram|.
class ItemParallelJob {
public:
class Task;
@ -63,7 +68,12 @@ class ItemParallelJob {
cur_index_(0),
items_considered_(0),
on_finish_(nullptr) {}
virtual ~Task() {}
virtual ~Task() {
// The histogram is reset in RunInternal(). If it's still around it means
// this task was cancelled before being scheduled.
if (gc_parallel_task_latency_histogram_)
gc_parallel_task_latency_histogram_->RecordAbandon();
}
virtual void RunInParallel() = 0;
@ -90,19 +100,33 @@ class ItemParallelJob {
// Sets up state required before invoking Run(). If
// |start_index is >= items_.size()|, this task will not process work items
// (some jobs have more tasks than work items in order to parallelize post-
// processing, e.g. scavenging).
// processing, e.g. scavenging). If |gc_parallel_task_latency_histogram| is
// provided, it will be used to report histograms on the latency between
// posting the task and it being scheduled.
void SetupInternal(base::Semaphore* on_finish, std::vector<Item*>* items,
size_t start_index) {
size_t start_index,
base::Optional<AsyncTimedHistogram>
gc_parallel_task_latency_histogram) {
on_finish_ = on_finish;
items_ = items;
if (start_index < items->size())
if (start_index < items->size()) {
cur_index_ = start_index;
else
} else {
items_considered_ = items_->size();
}
gc_parallel_task_latency_histogram_ =
std::move(gc_parallel_task_latency_histogram);
}
// We don't allow overriding this method any further.
void RunInternal() final {
if (gc_parallel_task_latency_histogram_) {
gc_parallel_task_latency_histogram_->RecordDone();
gc_parallel_task_latency_histogram_.reset();
}
RunInParallel();
on_finish_->Signal();
}
@ -111,6 +135,7 @@ class ItemParallelJob {
size_t cur_index_;
size_t items_considered_;
base::Semaphore* on_finish_;
base::Optional<AsyncTimedHistogram> gc_parallel_task_latency_histogram_;
friend class ItemParallelJob;
friend class Item;
@ -140,11 +165,16 @@ class ItemParallelJob {
int NumberOfItems() const { return static_cast<int>(items_.size()); }
int NumberOfTasks() const { return static_cast<int>(tasks_.size()); }
void Run() {
// Runs this job. Reporting metrics in a thread-safe manner to
// |async_counters|.
void Run(std::shared_ptr<Counters> async_counters) {
DCHECK_GT(tasks_.size(), 0);
const size_t num_items = items_.size();
const size_t num_tasks = tasks_.size();
AsyncTimedHistogram gc_parallel_task_latency_histogram(
async_counters->gc_parallel_task_latency(), async_counters);
// Some jobs have more tasks than items (when the items are mere coarse
// grain tasks that generate work dynamically for a second phase which all
// tasks participate in). Some jobs even have 0 items to preprocess but
@ -174,7 +204,9 @@ class ItemParallelJob {
// assigning work items.
DCHECK_IMPLIES(start_index >= num_items, i >= num_tasks_processing_items);
task->SetupInternal(pending_tasks_, &items_, start_index);
task->SetupInternal(pending_tasks_, &items_, start_index,
i > 0 ? gc_parallel_task_latency_histogram
: base::Optional<AsyncTimedHistogram>());
task_ids[i] = task->id();
if (i > 0) {
V8::GetCurrentPlatform()->CallOnBackgroundThread(

View File

@ -2072,7 +2072,7 @@ void MinorMarkCompactCollector::MarkRootSetInParallel() {
job.AddTask(
new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
}
job.Run();
job.Run(isolate()->async_counters());
DCHECK(worklist()->IsGlobalEmpty());
}
}
@ -3253,7 +3253,7 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
evacuators[i]->AddObserver(migration_observer);
job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
}
job->Run();
job->Run(isolate()->async_counters());
for (int i = 0; i < wanted_num_tasks; i++) {
evacuators[i]->Finalize();
delete evacuators[i];
@ -3936,7 +3936,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
updating_job.Run(isolate()->async_counters());
}
{
@ -3968,7 +3968,7 @@ void MarkCompactCollector::UpdatePointersAfterEvacuation() {
isolate(),
GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
}
updating_job.Run();
updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}
}
@ -4030,7 +4030,7 @@ void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
{
TRACE_GC(heap()->tracer(),
GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
updating_job.Run();
updating_job.Run(isolate()->async_counters());
heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
}

View File

@ -202,7 +202,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithNoItemsRuns) {
parallel_job_semaphore());
job.AddTask(new SimpleTask(i_isolate(), &did_run));
job.Run();
job.Run(i_isolate()->async_counters());
EXPECT_TRUE(did_run);
}
@ -214,7 +214,7 @@ TEST_F(ItemParallelJobTest, SimpleTaskWithSimpleItemRuns) {
job.AddItem(new ItemParallelJob::Item);
job.Run();
job.Run(i_isolate()->async_counters());
EXPECT_TRUE(did_run);
}
@ -244,7 +244,7 @@ TEST_F(ItemParallelJobTest, MoreTasksThanItems) {
job.AddItem(new SimpleItem);
}
job.Run();
job.Run(i_isolate()->async_counters());
for (int i = 0; i < kNumTasks; i++) {
// Only the first kNumItems tasks should have been assigned a work item.
@ -261,7 +261,7 @@ TEST_F(ItemParallelJobTest, SingleThreadProcessing) {
for (int i = 0; i < kItems; i++) {
job.AddItem(new SimpleItem(&was_processed[i]));
}
job.Run();
job.Run(i_isolate()->async_counters());
for (int i = 0; i < kItems; i++) {
EXPECT_TRUE(was_processed[i]);
}
@ -282,7 +282,7 @@ TEST_F(ItemParallelJobTest, DistributeItemsMultipleTasks) {
job.AddTask(
new TaskProcessingOneItem(i_isolate(), &barrier, wait_when_done));
}
job.Run();
job.Run(i_isolate()->async_counters());
for (int i = 0; i < kItemsAndTasks; i++) {
EXPECT_TRUE(was_processed[i]);
}
@ -296,7 +296,7 @@ TEST_F(ItemParallelJobTest, DifferentItems) {
job.AddItem(new ItemA());
job.AddItem(new ItemB());
job.AddTask(new TaskForDifferentItems(i_isolate(), &item_a, &item_b));
job.Run();
job.Run(i_isolate()->async_counters());
EXPECT_TRUE(item_a);
EXPECT_TRUE(item_b);
}