[v8 platform] Rename BackgroundThread methods to WorkerThreads method.

Follow-up to https://chromium-review.googlesource.com/c/v8/v8/+/941442.

"background" refers to a priority and is inappropriate to refer to
worker threads as many tasks posted to worker threads by v8 are in
fact high priority.

Also took advantage of this rename to make NumberOfWorkerThreads()
return an int instead of size_t. While it is never negative, int is
simpler and Google C++ style guide states to avoid unsigned integers in
such cases (ref. "On Unsigned Integers" @
https://google.github.io/styleguide/cppguide.html#Integer_Types).

The Chromium embedder for that call provided an int which was converted
to size_t for this override and most often casted back down to int on the
v8 side, adding churn, and readability overhead.

R=ahaas@chromium.org

Bug: v8:7310
Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
Change-Id: Ib5280df73d2846b111d985be65a10b049995ea6a
Reviewed-on: https://chromium-review.googlesource.com/941944
Commit-Queue: Gabriel Charette <gab@chromium.org>
Reviewed-by: Andreas Haas <ahaas@chromium.org>
Cr-Commit-Position: refs/heads/master@{#51662}
This commit is contained in:
Gabriel Charette 2018-03-01 13:05:15 +01:00 committed by Commit Bot
parent 16a3a4e946
commit 70222a9d03
15 changed files with 95 additions and 74 deletions

View File

@ -288,13 +288,27 @@ class Platform {
virtual bool OnCriticalMemoryPressure(size_t length) { return false; }
/**
* Gets the number of threads that are used to execute background tasks. Is
* used to estimate the number of tasks a work package should be split into.
* A return value of 0 means that there are no background threads available.
* Note that a value of 0 won't prohibit V8 from posting tasks using
* |CallOnWorkerThread|.
* Gets the number of worker threads used by GetWorkerThreadsTaskRunner() and
* CallOnWorkerThread(). This can be used to estimate the number of tasks a
* work package should be split into. A return value of 0 means that there are
* no worker threads available. Note that a value of 0 won't prohibit V8 from
* posting tasks using |CallOnWorkerThread|.
*/
virtual size_t NumberOfAvailableBackgroundThreads() { return 0; }
virtual int NumberOfWorkerThreads() {
return static_cast<int>(NumberOfAvailableBackgroundThreads());
}
/**
* Deprecated. Use NumberOfWorkerThreads() instead.
* TODO(gab): Remove this when all embedders override
* NumberOfWorkerThreads() instead.
*/
V8_DEPRECATE_SOON(
"NumberOfAvailableBackgroundThreads() is deprecated, use "
"NumberOfAvailableBackgroundThreads() instead.",
virtual size_t NumberOfAvailableBackgroundThreads()) {
return 0;
}
/**
* Returns a TaskRunner which can be used to post a task on the foreground.
@ -311,11 +325,34 @@ class Platform {
* Returns a TaskRunner which can be used to post a task on a background.
* This function should only be called from a foreground thread.
*/
virtual std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
V8_DEPRECATE_SOON(
"GetBackgroundTaskRunner() is deprecated, use "
"GetWorkerThreadsTaskRunner() "
"instead.",
virtual std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
Isolate* isolate)) {
// TODO(gab): Remove this method when all embedders have moved to
// GetWorkerThreadsTaskRunner().
// An implementation needs to be provided here because this is called by the
// default GetWorkerThreadsTaskRunner() implementation below. In practice
// however, all code either:
// - Overrides GetWorkerThreadsTaskRunner() (thus not making this call) --
// i.e. all v8 code.
// - Overrides this method (thus not making this call) -- i.e. all
// unadapted embedders.
abort();
}
/**
* Returns a TaskRunner which can be used to post async tasks on a worker.
* This function should only be called from a foreground thread.
*/
virtual std::shared_ptr<v8::TaskRunner> GetWorkerThreadsTaskRunner(
Isolate* isolate) {
// TODO(ahaas): Make this function abstract after it got implemented on all
// TODO(gab): Make this function abstract after it got implemented on all
// platforms.
return {};
return GetBackgroundTaskRunner(isolate);
}
/**

View File

@ -520,8 +520,7 @@ void CompilerDispatcher::ScheduleMoreBackgroundTasksIfNeeded() {
{
base::LockGuard<base::Mutex> lock(&mutex_);
if (pending_background_jobs_.empty()) return;
if (platform_->NumberOfAvailableBackgroundThreads() <=
num_background_tasks_) {
if (platform_->NumberOfWorkerThreads() <= num_background_tasks_) {
return;
}
++num_background_tasks_;

View File

@ -184,7 +184,7 @@ class V8_EXPORT_PRIVATE CompilerDispatcher {
bool idle_task_scheduled_;
// Number of scheduled or running BackgroundTask objects.
size_t num_background_tasks_;
int num_background_tasks_;
// The set of CompilerDispatcherJobs that can be advanced on any thread.
std::unordered_set<CompilerDispatcherJob*> pending_background_jobs_;

View File

@ -197,7 +197,7 @@ class PredictablePlatform : public Platform {
return platform_->GetForegroundTaskRunner(isolate);
}
std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
std::shared_ptr<TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override {
// Return the foreground task runner here, so that all tasks get executed
// sequentially in a predictable order.

View File

@ -507,9 +507,7 @@ void ConcurrentMarking::ScheduleTasks() {
DCHECK_EQ(0, pending_task_count_);
if (task_count_ == 0) {
task_count_ = Max(
1, Min(kMaxTasks,
static_cast<int>(V8::GetCurrentPlatform()
->NumberOfAvailableBackgroundThreads())));
1, Min(kMaxTasks, V8::GetCurrentPlatform()->NumberOfWorkerThreads()));
}
// Task id 0 is for the main thread.
for (int i = 1; i <= task_count_; i++) {

View File

@ -1978,9 +1978,7 @@ int Heap::NumberOfScavengeTasks() {
if (!FLAG_parallel_scavenge) return 1;
const int num_scavenge_tasks =
static_cast<int>(new_space()->TotalCapacity()) / MB;
static int num_cores =
1 + static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads());
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
return Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
}

View File

@ -408,16 +408,10 @@ class RootMarkingVisitorSeedOnly : public RootVisitor {
};
int NumberOfAvailableCores() {
static int num_cores =
static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) +
1;
static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
// This number of cores should be greater than zero and never change.
DCHECK_GE(num_cores, 1);
DCHECK_EQ(
num_cores,
1 + static_cast<int>(
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
return num_cores;
}
@ -3286,16 +3280,15 @@ void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
delete[] evacuators;
if (FLAG_trace_evacuation) {
PrintIsolate(
isolate(),
"%8.0f ms: evacuation-summary: parallel=%s pages=%d "
"wanted_tasks=%d tasks=%d cores=%" PRIuS " live_bytes=%" V8PRIdPTR
" compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
wanted_num_tasks, job->NumberOfTasks(),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() + 1,
live_bytes, compaction_speed);
PrintIsolate(isolate(),
"%8.0f ms: evacuation-summary: parallel=%s pages=%d "
"wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
" compaction_speed=%.f\n",
isolate()->time_millis_since_init(),
FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
wanted_num_tasks, job->NumberOfTasks(),
V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
live_bytes, compaction_speed);
}
}

View File

@ -196,14 +196,14 @@ std::shared_ptr<TaskRunner> DefaultPlatform::GetForegroundTaskRunner(
return foreground_task_runner_map_[isolate];
}
std::shared_ptr<TaskRunner> DefaultPlatform::GetBackgroundTaskRunner(
std::shared_ptr<TaskRunner> DefaultPlatform::GetWorkerThreadsTaskRunner(
v8::Isolate*) {
EnsureBackgroundTaskRunnerInitialized();
return background_task_runner_;
}
void DefaultPlatform::CallOnWorkerThread(Task* task) {
GetBackgroundTaskRunner(nullptr)->PostTask(std::unique_ptr<Task>(task));
GetWorkerThreadsTaskRunner(nullptr)->PostTask(std::unique_ptr<Task>(task));
}
void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
@ -247,9 +247,7 @@ void DefaultPlatform::SetTracingController(
tracing_controller_ = std::move(tracing_controller);
}
size_t DefaultPlatform::NumberOfAvailableBackgroundThreads() {
return static_cast<size_t>(thread_pool_size_);
}
int DefaultPlatform::NumberOfWorkerThreads() { return thread_pool_size_; }
Platform::StackTracePrinter DefaultPlatform::GetStackTracePrinter() {
return PrintStackTrace;

View File

@ -55,10 +55,10 @@ class V8_PLATFORM_EXPORT DefaultPlatform : public NON_EXPORTED_BASE(Platform) {
void SetTimeFunctionForTesting(TimeFunction time_function);
// v8::Platform implementation.
size_t NumberOfAvailableBackgroundThreads() override;
int NumberOfWorkerThreads() override;
std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override;
std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
std::shared_ptr<TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override;
void CallOnWorkerThread(Task* task) override;
void CallOnForegroundThread(v8::Isolate* isolate, Task* task) override;

View File

@ -221,11 +221,11 @@ class ModuleCompiler {
base::Mutex compilation_units_mutex_;
CodeGenerationSchedule executed_units_;
base::Mutex result_mutex_;
const size_t num_background_tasks_;
const int num_background_tasks_;
// This flag should only be set while holding result_mutex_.
bool finisher_is_running_ = false;
CancelableTaskManager background_task_manager_;
size_t stopped_compilation_tasks_ = 0;
int stopped_compilation_tasks_ = 0;
base::Mutex tasks_mutex_;
Handle<Code> centry_stub_;
wasm::NativeModule* native_module_;
@ -1303,8 +1303,8 @@ ModuleCompiler::ModuleCompiler(Isolate* isolate, WasmModule* module,
: isolate->heap()->code_space()->Capacity()) /
2),
num_background_tasks_(
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())),
Min(FLAG_wasm_num_compilation_tasks,
V8::GetCurrentPlatform()->NumberOfWorkerThreads())),
stopped_compilation_tasks_(num_background_tasks_),
centry_stub_(centry_stub),
native_module_(native_module) {}
@ -1371,7 +1371,7 @@ size_t ModuleCompiler::InitializeCompilationUnits(
void ModuleCompiler::RestartCompilationTasks() {
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate_);
std::shared_ptr<v8::TaskRunner> task_runner =
V8::GetCurrentPlatform()->GetBackgroundTaskRunner(v8_isolate);
V8::GetCurrentPlatform()->GetWorkerThreadsTaskRunner(v8_isolate);
base::LockGuard<base::Mutex> guard(&tasks_mutex_);
for (; stopped_compilation_tasks_ > 0; --stopped_compilation_tasks_) {
@ -1981,7 +1981,7 @@ MaybeHandle<WasmModuleObject> ModuleCompiler::CompileToModuleObjectInternal(
bool compile_parallel =
!FLAG_trace_wasm_decoder && FLAG_wasm_num_compilation_tasks > 0 &&
funcs_to_compile > 1 &&
V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads() > 0;
V8::GetCurrentPlatform()->NumberOfWorkerThreads() > 0;
// Avoid a race condition by collecting results into a second vector.
std::vector<Handle<Code>> results(
FLAG_wasm_jit_to_native ? 0 : env->module->functions.size());
@ -3417,7 +3417,7 @@ AsyncCompileJob::AsyncCompileJob(Isolate* isolate,
v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
v8::Platform* platform = V8::GetCurrentPlatform();
foreground_task_runner_ = platform->GetForegroundTaskRunner(v8_isolate);
background_task_runner_ = platform->GetBackgroundTaskRunner(v8_isolate);
background_task_runner_ = platform->GetWorkerThreadsTaskRunner(v8_isolate);
// The handles for the context and promise must be deferred.
DeferredHandleScope deferred(isolate);
context_ = Handle<Context>(*context);
@ -3515,7 +3515,7 @@ void AsyncCompileJob::AsyncCompileSucceeded(Handle<Object> result) {
// task) and schedule the next step(s), if any.
class AsyncCompileJob::CompileStep {
public:
explicit CompileStep(size_t num_background_tasks = 0)
explicit CompileStep(int num_background_tasks = 0)
: num_background_tasks_(num_background_tasks) {}
virtual ~CompileStep() {}
@ -3536,10 +3536,10 @@ class AsyncCompileJob::CompileStep {
virtual void RunInForeground() { UNREACHABLE(); }
virtual void RunInBackground() { UNREACHABLE(); }
size_t NumberOfBackgroundTasks() { return num_background_tasks_; }
int NumberOfBackgroundTasks() { return num_background_tasks_; }
AsyncCompileJob* job_ = nullptr;
const size_t num_background_tasks_;
const int num_background_tasks_;
};
class AsyncCompileJob::CompileTask : public CancelableTask {
@ -3580,10 +3580,10 @@ void AsyncCompileJob::StartBackgroundTask() {
}
void AsyncCompileJob::RestartBackgroundTasks() {
size_t num_restarts = stopped_tasks_.Value();
int num_restarts = stopped_tasks_.Value();
stopped_tasks_.Decrement(num_restarts);
for (size_t i = 0; i < num_restarts; ++i) {
for (int i = 0; i < num_restarts; ++i) {
StartBackgroundTask();
}
}
@ -3591,8 +3591,8 @@ void AsyncCompileJob::RestartBackgroundTasks() {
template <typename Step, typename... Args>
void AsyncCompileJob::DoAsync(Args&&... args) {
NextStep<Step>(std::forward<Args>(args)...);
size_t end = step_->NumberOfBackgroundTasks();
for (size_t i = 0; i < end; ++i) {
int end = step_->NumberOfBackgroundTasks();
for (int i = 0; i < end; ++i) {
StartBackgroundTask();
}
}
@ -3741,12 +3741,10 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
}
// Start asynchronous compilation tasks.
size_t num_background_tasks =
Max(static_cast<size_t>(1),
Min(num_functions,
Min(static_cast<size_t>(FLAG_wasm_num_compilation_tasks),
V8::GetCurrentPlatform()
->NumberOfAvailableBackgroundThreads())));
int num_background_tasks =
Max(1, Min(static_cast<int>(num_functions),
Min(FLAG_wasm_num_compilation_tasks,
V8::GetCurrentPlatform()->NumberOfWorkerThreads())));
if (start_compilation_) {
// TODO(ahaas): Try to remove the {start_compilation_} check when
// streaming decoding is done in the background. If
@ -3768,7 +3766,7 @@ class AsyncCompileJob::PrepareAndStartCompile : public CompileStep {
//==========================================================================
class AsyncCompileJob::ExecuteAndFinishCompilationUnits : public CompileStep {
public:
explicit ExecuteAndFinishCompilationUnits(size_t num_compile_tasks)
explicit ExecuteAndFinishCompilationUnits(int num_compile_tasks)
: CompileStep(num_compile_tasks) {}
void RunInBackground() override {

View File

@ -187,7 +187,7 @@ class AsyncCompileJob {
std::shared_ptr<v8::TaskRunner> foreground_task_runner_;
std::shared_ptr<v8::TaskRunner> background_task_runner_;
// The number of background tasks which stopped executing within a step.
base::AtomicNumber<size_t> stopped_tasks_{0};
base::AtomicNumber<int32_t> stopped_tasks_{0};
// For async compilation the AsyncCompileJob is the only finisher. For
// streaming compilation also the AsyncStreamingProcessor has to finish before

View File

@ -678,9 +678,9 @@ class TestPlatform : public v8::Platform {
return old_platform_->GetForegroundTaskRunner(isolate);
}
std::shared_ptr<v8::TaskRunner> GetBackgroundTaskRunner(
std::shared_ptr<v8::TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override {
return old_platform_->GetBackgroundTaskRunner(isolate);
return old_platform_->GetWorkerThreadsTaskRunner(isolate);
}
void CallOnWorkerThread(v8::Task* task) override {

View File

@ -35,7 +35,7 @@ class MockPlatform final : public TestPlatform {
return task_runner_;
}
std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
std::shared_ptr<TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override {
return task_runner_;
}

View File

@ -97,7 +97,7 @@ class MockPlatform : public v8::Platform {
EXPECT_TRUE(idle_task_ == nullptr);
}
size_t NumberOfAvailableBackgroundThreads() override { return 1; }
int NumberOfWorkerThreads() override { return 1; }
std::shared_ptr<TaskRunner> GetForegroundTaskRunner(
v8::Isolate* isolate) override {
@ -105,7 +105,7 @@ class MockPlatform : public v8::Platform {
return std::make_shared<MockTaskRunner>(this, is_foreground_task_runner);
}
std::shared_ptr<TaskRunner> GetBackgroundTaskRunner(
std::shared_ptr<TaskRunner> GetWorkerThreadsTaskRunner(
v8::Isolate* isolate) override {
constexpr bool is_foreground_task_runner = false;
return std::make_shared<MockTaskRunner>(this, is_foreground_task_runner);

View File

@ -264,7 +264,7 @@ TEST(DefaultPlatformTest, RunBackgroundTask) {
DefaultPlatform platform;
platform.SetThreadPoolSize(1);
std::shared_ptr<TaskRunner> taskrunner =
platform.GetBackgroundTaskRunner(isolate);
platform.GetWorkerThreadsTaskRunner(isolate);
base::Semaphore sem(0);
bool task_executed = false;
@ -282,7 +282,7 @@ TEST(DefaultPlatformTest, NoIdleTasksInBackground) {
DefaultPlatform platform;
platform.SetThreadPoolSize(1);
std::shared_ptr<TaskRunner> taskrunner =
platform.GetBackgroundTaskRunner(isolate);
platform.GetWorkerThreadsTaskRunner(isolate);
EXPECT_FALSE(taskrunner->IdleTasksEnabled());
}
@ -296,7 +296,7 @@ TEST(DefaultPlatformTest, PostTaskAfterPlatformTermination) {
DefaultPlatformWithMockTime platform;
platform.SetThreadPoolSize(1);
foreground_taskrunner = platform.GetForegroundTaskRunner(isolate);
background_taskrunner = platform.GetBackgroundTaskRunner(isolate);
background_taskrunner = platform.GetWorkerThreadsTaskRunner(isolate);
}
// It should still be possible to post tasks, even when the platform does not
// exist anymore.