Implement missing functionality for job based recompilation
BUG=v8:3608 R=bmeurer@chromium.org LOG=n Review URL: https://codereview.chromium.org/816363003 Cr-Commit-Position: refs/heads/master@{#25925}
This commit is contained in:
parent
1ec1f5957f
commit
88feffc2ab
@ -356,7 +356,6 @@ DEFINE_BOOL(job_based_recompilation, false,
|
||||
"post tasks to v8::Platform instead of using a thread for "
|
||||
"concurrent recompilation")
|
||||
DEFINE_IMPLICATION(job_based_recompilation, concurrent_recompilation)
|
||||
DEFINE_NEG_IMPLICATION(job_based_recompilation, block_concurrent_recompilation)
|
||||
DEFINE_BOOL(trace_concurrent_recompilation, false,
|
||||
"track concurrent recompilation")
|
||||
DEFINE_INT(concurrent_recompilation_queue_length, 8,
|
||||
|
@ -15,10 +15,34 @@
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
|
||||
bool restore_function_code) {
|
||||
// The recompile job is allocated in the CompilationInfo's zone.
|
||||
CompilationInfo* info = job->info();
|
||||
if (restore_function_code) {
|
||||
if (info->is_osr()) {
|
||||
if (!job->IsWaitingForInstall()) {
|
||||
// Remove stack check that guards OSR entry on original code.
|
||||
Handle<Code> code = info->unoptimized_code();
|
||||
uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
|
||||
BackEdgeTable::RemoveStackCheck(code, offset);
|
||||
}
|
||||
} else {
|
||||
Handle<JSFunction> function = info->closure();
|
||||
function->ReplaceCode(function->shared()->code());
|
||||
}
|
||||
}
|
||||
delete info;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
class OptimizingCompilerThread::CompileTask : public v8::Task {
|
||||
public:
|
||||
CompileTask(Isolate* isolate, OptimizedCompileJob* job)
|
||||
: isolate_(isolate), job_(job) {}
|
||||
explicit CompileTask(Isolate* isolate) : isolate_(isolate) {}
|
||||
|
||||
virtual ~CompileTask() {}
|
||||
|
||||
@ -29,30 +53,41 @@ class OptimizingCompilerThread::CompileTask : public v8::Task {
|
||||
DisallowHandleAllocation no_handles;
|
||||
DisallowHandleDereference no_deref;
|
||||
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
OptimizedCompileJob::Status status = job_->OptimizeGraph();
|
||||
USE(status); // Prevent an unused-variable error in release mode.
|
||||
DCHECK(status != OptimizedCompileJob::FAILED);
|
||||
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
|
||||
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
// Use a mutex to make sure that functions marked for install
|
||||
// are always also queued.
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(
|
||||
&isolate_->optimizing_compiler_thread()->output_queue_mutex_);
|
||||
isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_);
|
||||
OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
|
||||
|
||||
if (thread->recompilation_delay_ != 0) {
|
||||
base::OS::Sleep(thread->recompilation_delay_);
|
||||
}
|
||||
isolate_->stack_guard()->RequestInstallCode();
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(
|
||||
&isolate_->optimizing_compiler_thread()->input_queue_mutex_);
|
||||
isolate_->optimizing_compiler_thread()->input_queue_length_--;
|
||||
|
||||
StopFlag flag;
|
||||
OptimizedCompileJob* job = thread->NextInput(&flag);
|
||||
|
||||
if (flag == CONTINUE) {
|
||||
thread->CompileNext(job);
|
||||
} else {
|
||||
AllowHandleDereference allow_handle_dereference;
|
||||
if (!job->info()->is_osr()) {
|
||||
DisposeOptimizedCompileJob(job, true);
|
||||
}
|
||||
}
|
||||
isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal();
|
||||
bool signal = false;
|
||||
{
|
||||
base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
|
||||
if (--thread->task_count_ == 0) {
|
||||
if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) ==
|
||||
FLUSH) {
|
||||
base::Release_Store(&thread->stop_thread_,
|
||||
static_cast<base::AtomicWord>(CONTINUE));
|
||||
signal = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (signal) thread->stop_semaphore_.Signal();
|
||||
}
|
||||
|
||||
Isolate* isolate_;
|
||||
OptimizedCompileJob* job_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(CompileTask);
|
||||
};
|
||||
@ -93,8 +128,8 @@ void OptimizingCompilerThread::Run() {
|
||||
input_queue_semaphore_.Wait();
|
||||
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
|
||||
|
||||
if (FLAG_concurrent_recompilation_delay != 0) {
|
||||
base::OS::Sleep(FLAG_concurrent_recompilation_delay);
|
||||
if (recompilation_delay_ != 0) {
|
||||
base::OS::Sleep(recompilation_delay_);
|
||||
}
|
||||
|
||||
switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
|
||||
@ -121,7 +156,7 @@ void OptimizingCompilerThread::Run() {
|
||||
base::ElapsedTimer compiling_timer;
|
||||
if (tracing_enabled_) compiling_timer.Start();
|
||||
|
||||
CompileNext();
|
||||
CompileNext(NextInput());
|
||||
|
||||
if (tracing_enabled_) {
|
||||
time_spent_compiling_ += compiling_timer.Elapsed();
|
||||
@ -130,20 +165,27 @@ void OptimizingCompilerThread::Run() {
|
||||
}
|
||||
|
||||
|
||||
OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
|
||||
OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
|
||||
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
|
||||
DCHECK(!job_based_recompilation_);
|
||||
if (input_queue_length_ == 0) return NULL;
|
||||
if (input_queue_length_ == 0) {
|
||||
if (flag) {
|
||||
UNREACHABLE();
|
||||
*flag = CONTINUE;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
|
||||
DCHECK_NE(NULL, job);
|
||||
input_queue_shift_ = InputQueueIndex(1);
|
||||
input_queue_length_--;
|
||||
if (flag) {
|
||||
*flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_));
|
||||
}
|
||||
return job;
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::CompileNext() {
|
||||
OptimizedCompileJob* job = NextInput();
|
||||
void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
|
||||
DCHECK_NE(NULL, job);
|
||||
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
@ -154,36 +196,17 @@ void OptimizingCompilerThread::CompileNext() {
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
// Use a mutex to make sure that functions marked for install
|
||||
// are always also queued.
|
||||
if (job_based_recompilation_) output_queue_mutex_.Lock();
|
||||
output_queue_.Enqueue(job);
|
||||
if (job_based_recompilation_) output_queue_mutex_.Unlock();
|
||||
isolate_->stack_guard()->RequestInstallCode();
|
||||
}
|
||||
|
||||
|
||||
static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
|
||||
bool restore_function_code) {
|
||||
// The recompile job is allocated in the CompilationInfo's zone.
|
||||
CompilationInfo* info = job->info();
|
||||
if (restore_function_code) {
|
||||
if (info->is_osr()) {
|
||||
if (!job->IsWaitingForInstall()) {
|
||||
// Remove stack check that guards OSR entry on original code.
|
||||
Handle<Code> code = info->unoptimized_code();
|
||||
uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
|
||||
BackEdgeTable::RemoveStackCheck(code, offset);
|
||||
}
|
||||
} else {
|
||||
Handle<JSFunction> function = info->closure();
|
||||
function->ReplaceCode(function->shared()->code());
|
||||
}
|
||||
}
|
||||
delete info;
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
|
||||
DCHECK(!job_based_recompilation_);
|
||||
OptimizedCompileJob* job;
|
||||
while ((job = NextInput())) {
|
||||
DCHECK(!job_based_recompilation_);
|
||||
// This should not block, since we have one signal on the input queue
|
||||
// semaphore corresponding to each element in the input queue.
|
||||
input_queue_semaphore_.Wait();
|
||||
@ -196,6 +219,7 @@ void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
|
||||
|
||||
|
||||
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
OptimizedCompileJob* job;
|
||||
while (output_queue_.Dequeue(&job)) {
|
||||
// OSR jobs are dealt with separately.
|
||||
@ -218,12 +242,20 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
|
||||
|
||||
void OptimizingCompilerThread::Flush() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
if (!job_based_recompilation_) {
|
||||
input_queue_semaphore_.Signal();
|
||||
stop_semaphore_.Wait();
|
||||
bool block = true;
|
||||
if (job_based_recompilation_) {
|
||||
base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
|
||||
block = task_count_ > 0 || blocked_jobs_ > 0;
|
||||
if (block) {
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
|
||||
}
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
} else {
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
}
|
||||
if (!job_based_recompilation_) input_queue_semaphore_.Signal();
|
||||
if (block) stop_semaphore_.Wait();
|
||||
FlushOutputQueue(true);
|
||||
if (FLAG_concurrent_osr) FlushOsrBuffer(true);
|
||||
if (tracing_enabled_) {
|
||||
@ -234,25 +266,25 @@ void OptimizingCompilerThread::Flush() {
|
||||
|
||||
void OptimizingCompilerThread::Stop() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
if (!job_based_recompilation_) {
|
||||
input_queue_semaphore_.Signal();
|
||||
stop_semaphore_.Wait();
|
||||
}
|
||||
|
||||
bool block = true;
|
||||
if (job_based_recompilation_) {
|
||||
while (true) {
|
||||
{
|
||||
base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
|
||||
if (!input_queue_length_) break;
|
||||
}
|
||||
input_queue_semaphore_.Wait();
|
||||
base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
|
||||
block = task_count_ > 0 || blocked_jobs_ > 0;
|
||||
if (block) {
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
|
||||
}
|
||||
} else if (FLAG_concurrent_recompilation_delay != 0) {
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
} else {
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
}
|
||||
if (!job_based_recompilation_) input_queue_semaphore_.Signal();
|
||||
if (block) stop_semaphore_.Wait();
|
||||
|
||||
if (recompilation_delay_ != 0) {
|
||||
// At this point the optimizing compiler thread's event loop has stopped.
|
||||
// There is no need for a mutex when reading input_queue_length_.
|
||||
while (input_queue_length_ > 0) CompileNext();
|
||||
while (input_queue_length_ > 0) CompileNext(NextInput());
|
||||
InstallOptimizedFunctions();
|
||||
} else {
|
||||
FlushInputQueue(false);
|
||||
@ -263,6 +295,7 @@ void OptimizingCompilerThread::Stop() {
|
||||
|
||||
if (tracing_enabled_) {
|
||||
double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
|
||||
if (job_based_recompilation_) percentage = 100.0;
|
||||
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
|
||||
}
|
||||
|
||||
@ -333,11 +366,13 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
|
||||
input_queue_[InputQueueIndex(input_queue_length_)] = job;
|
||||
input_queue_length_++;
|
||||
}
|
||||
if (job_based_recompilation_) {
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
new CompileTask(isolate_, job), v8::Platform::kShortRunningTask);
|
||||
} else if (FLAG_block_concurrent_recompilation) {
|
||||
if (FLAG_block_concurrent_recompilation) {
|
||||
blocked_jobs_++;
|
||||
} else if (job_based_recompilation_) {
|
||||
base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
|
||||
++task_count_;
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
|
||||
} else {
|
||||
input_queue_semaphore_.Signal();
|
||||
}
|
||||
@ -346,11 +381,17 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
|
||||
|
||||
void OptimizingCompilerThread::Unblock() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
if (job_based_recompilation_) {
|
||||
return;
|
||||
{
|
||||
base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
|
||||
task_count_ += blocked_jobs_;
|
||||
}
|
||||
while (blocked_jobs_ > 0) {
|
||||
input_queue_semaphore_.Signal();
|
||||
if (job_based_recompilation_) {
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
|
||||
} else {
|
||||
input_queue_semaphore_.Signal();
|
||||
}
|
||||
blocked_jobs_--;
|
||||
}
|
||||
}
|
||||
|
@ -35,11 +35,13 @@ class OptimizingCompilerThread : public base::Thread {
|
||||
input_queue_shift_(0),
|
||||
osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
|
||||
osr_buffer_cursor_(0),
|
||||
task_count_(0),
|
||||
osr_hits_(0),
|
||||
osr_attempts_(0),
|
||||
blocked_jobs_(0),
|
||||
tracing_enabled_(FLAG_trace_concurrent_recompilation),
|
||||
job_based_recompilation_(FLAG_job_based_recompilation) {
|
||||
job_based_recompilation_(FLAG_job_based_recompilation),
|
||||
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
|
||||
base::NoBarrier_Store(&stop_thread_,
|
||||
static_cast<base::AtomicWord>(CONTINUE));
|
||||
input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
|
||||
@ -93,8 +95,8 @@ class OptimizingCompilerThread : public base::Thread {
|
||||
void FlushInputQueue(bool restore_function_code);
|
||||
void FlushOutputQueue(bool restore_function_code);
|
||||
void FlushOsrBuffer(bool restore_function_code);
|
||||
void CompileNext();
|
||||
OptimizedCompileJob* NextInput();
|
||||
void CompileNext(OptimizedCompileJob* job);
|
||||
OptimizedCompileJob* NextInput(StopFlag* flag = NULL);
|
||||
|
||||
// Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
|
||||
// Tasks evicted from the cyclic buffer are discarded.
|
||||
@ -138,18 +140,27 @@ class OptimizingCompilerThread : public base::Thread {
|
||||
base::TimeDelta time_spent_compiling_;
|
||||
base::TimeDelta time_spent_total_;
|
||||
|
||||
int task_count_;
|
||||
// TODO(jochen): This is currently a RecursiveMutex since both Flush/Stop and
|
||||
// Unblock try to get it, but the former methods both can call Unblock. Once
|
||||
// job based recompilation is on by default, and the dedicated thread can be
|
||||
// removed, this should be refactored to not use a RecursiveMutex.
|
||||
base::RecursiveMutex task_count_mutex_;
|
||||
|
||||
int osr_hits_;
|
||||
int osr_attempts_;
|
||||
|
||||
int blocked_jobs_;
|
||||
|
||||
// Copies of FLAG_trace_concurrent_recompilation and
|
||||
// Copies of FLAG_trace_concurrent_recompilation,
|
||||
// FLAG_concurrent_recompilation_delay and
|
||||
// FLAG_job_based_recompilation that will be used from the background thread.
|
||||
//
|
||||
// Since flags might get modified while the background thread is running, it
|
||||
// is not safe to access them directly.
|
||||
bool tracing_enabled_;
|
||||
bool job_based_recompilation_;
|
||||
int recompilation_delay_;
|
||||
};
|
||||
|
||||
} } // namespace v8::internal
|
||||
|
Loading…
Reference in New Issue
Block a user