Revert of Reland "Remove support for thread-based recompilation" (patchset #1 id:1 of https://codereview.chromium.org/1059853004/)
Reason for revert: still times out Original issue's description: > Reland "Remove support for thread-based recompilation" > > Original issue's description: > > Remove support for thread-based recompilation > > > > BUG=v8:3608 > > R=yangguo@chromium.org > > LOG=y > > > > Committed: https://crrev.com/ed5db223a19dfe126af012e894582251aa3635d7 > > Cr-Commit-Position: refs/heads/master@{#27619} > > BUG=v8:3608 > R=yangguo@chromium.org > LOG=y > > Committed: https://crrev.com/f1ceccb8b8b352a91e6366e3e3103f1db0df6afb > Cr-Commit-Position: refs/heads/master@{#27813} TBR=yangguo@chromium.org NOPRESUBMIT=true NOTREECHECKS=true NOTRY=true BUG=v8:3608 Review URL: https://codereview.chromium.org/1082183003 Cr-Commit-Position: refs/heads/master@{#27816}
This commit is contained in:
parent
2c01bd34b0
commit
cf663c487f
4
BUILD.gn
4
BUILD.gn
@ -893,8 +893,8 @@ source_set("v8_base") {
|
||||
"src/objects-printer.cc",
|
||||
"src/objects.cc",
|
||||
"src/objects.h",
|
||||
"src/optimizing-compile-dispatcher.cc",
|
||||
"src/optimizing-compile-dispatcher.h",
|
||||
"src/optimizing-compiler-thread.cc",
|
||||
"src/optimizing-compiler-thread.h",
|
||||
"src/ostreams.cc",
|
||||
"src/ostreams.h",
|
||||
"src/parser.cc",
|
||||
|
@ -819,7 +819,7 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
|
||||
|
||||
static bool GetOptimizedCodeLater(CompilationInfo* info) {
|
||||
Isolate* isolate = info->isolate();
|
||||
if (!isolate->optimizing_compile_dispatcher()->IsQueueAvailable()) {
|
||||
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
|
||||
if (FLAG_trace_concurrent_recompilation) {
|
||||
PrintF(" ** Compilation queue full, will retry optimizing ");
|
||||
info->closure()->ShortPrint();
|
||||
@ -840,7 +840,7 @@ static bool GetOptimizedCodeLater(CompilationInfo* info) {
|
||||
OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
|
||||
OptimizedCompileJob::Status status = job->CreateGraph();
|
||||
if (status != OptimizedCompileJob::SUCCEEDED) return false;
|
||||
isolate->optimizing_compile_dispatcher()->QueueForOptimization(job);
|
||||
isolate->optimizing_compiler_thread()->QueueForOptimization(job);
|
||||
|
||||
if (FLAG_trace_concurrent_recompilation) {
|
||||
PrintF(" ** Queued ");
|
||||
|
@ -372,10 +372,12 @@ class CompilationInfo {
|
||||
}
|
||||
|
||||
void AbortDueToDependencyChange() {
|
||||
DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
|
||||
aborted_due_to_dependency_change_ = true;
|
||||
}
|
||||
|
||||
bool HasAbortedDueToDependencyChange() const {
|
||||
DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
|
||||
return aborted_due_to_dependency_change_;
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
#include "src/circular-queue.h"
|
||||
#include "src/compiler.h"
|
||||
#include "src/sampler.h"
|
||||
#include "src/unbound-queue-inl.h"
|
||||
#include "src/unbound-queue.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
|
@ -1843,7 +1843,7 @@ void Debug::PrepareForBreakPoints() {
|
||||
// functions as debugging does not work with optimized code.
|
||||
if (!has_break_points_) {
|
||||
if (isolate_->concurrent_recompilation_enabled()) {
|
||||
isolate_->optimizing_compile_dispatcher()->Flush();
|
||||
isolate_->optimizing_compiler_thread()->Flush();
|
||||
}
|
||||
|
||||
Deoptimizer::DeoptimizeAll(isolate_);
|
||||
|
@ -677,7 +677,7 @@ Object* StackGuard::HandleInterrupts() {
|
||||
|
||||
if (CheckAndClearInterrupt(INSTALL_CODE)) {
|
||||
DCHECK(isolate_->concurrent_recompilation_enabled());
|
||||
isolate_->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
|
||||
isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
|
||||
}
|
||||
|
||||
if (CheckAndClearInterrupt(API_INTERRUPT)) {
|
||||
|
@ -364,6 +364,9 @@ DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops")
|
||||
|
||||
DEFINE_BOOL(concurrent_recompilation, true,
|
||||
"optimizing hot functions asynchronously on a separate thread")
|
||||
DEFINE_BOOL(job_based_recompilation, true,
|
||||
"post tasks to v8::Platform instead of using a thread for "
|
||||
"concurrent recompilation")
|
||||
DEFINE_BOOL(trace_concurrent_recompilation, false,
|
||||
"track concurrent recompilation")
|
||||
DEFINE_INT(concurrent_recompilation_queue_length, 8,
|
||||
|
@ -430,7 +430,7 @@ void Heap::GarbageCollectionPrologue() {
|
||||
store_buffer()->GCPrologue();
|
||||
|
||||
if (isolate()->concurrent_osr_enabled()) {
|
||||
isolate()->optimizing_compile_dispatcher()->AgeBufferedOsrJobs();
|
||||
isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
|
||||
}
|
||||
|
||||
if (new_space_.IsAtMaximumCapacity()) {
|
||||
@ -767,7 +767,7 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
|
||||
if (isolate()->concurrent_recompilation_enabled()) {
|
||||
// The optimizing compiler may be unnecessarily holding on to memory.
|
||||
DisallowHeapAllocation no_recursive_gc;
|
||||
isolate()->optimizing_compile_dispatcher()->Flush();
|
||||
isolate()->optimizing_compiler_thread()->Flush();
|
||||
}
|
||||
isolate()->ClearSerializerData();
|
||||
mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
|
||||
@ -885,7 +885,7 @@ int Heap::NotifyContextDisposed(bool dependant_context) {
|
||||
}
|
||||
if (isolate()->concurrent_recompilation_enabled()) {
|
||||
// Flush the queued recompilation tasks.
|
||||
isolate()->optimizing_compile_dispatcher()->Flush();
|
||||
isolate()->optimizing_compiler_thread()->Flush();
|
||||
}
|
||||
AgeInlineCaches();
|
||||
set_retained_maps(ArrayList::cast(empty_fixed_array()));
|
||||
|
@ -3475,6 +3475,7 @@ HBasicBlock* HGraph::CreateBasicBlock() {
|
||||
|
||||
void HGraph::FinalizeUniqueness() {
|
||||
DisallowHeapAllocation no_gc;
|
||||
DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
|
||||
for (int i = 0; i < blocks()->length(); ++i) {
|
||||
for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
|
||||
it.Current()->FinalizeUniqueness();
|
||||
|
@ -1736,7 +1736,7 @@ Isolate::Isolate(bool enable_serializer)
|
||||
heap_profiler_(NULL),
|
||||
function_entry_hook_(NULL),
|
||||
deferred_handles_head_(NULL),
|
||||
optimizing_compile_dispatcher_(NULL),
|
||||
optimizing_compiler_thread_(NULL),
|
||||
stress_deopt_count_(0),
|
||||
next_optimization_id_(0),
|
||||
#if TRACE_MAPS
|
||||
@ -1833,9 +1833,9 @@ void Isolate::Deinit() {
|
||||
FreeThreadResources();
|
||||
|
||||
if (concurrent_recompilation_enabled()) {
|
||||
optimizing_compile_dispatcher_->Stop();
|
||||
delete optimizing_compile_dispatcher_;
|
||||
optimizing_compile_dispatcher_ = NULL;
|
||||
optimizing_compiler_thread_->Stop();
|
||||
delete optimizing_compiler_thread_;
|
||||
optimizing_compiler_thread_ = NULL;
|
||||
}
|
||||
|
||||
if (heap_.mark_compact_collector()->sweeping_in_progress()) {
|
||||
@ -2133,8 +2133,9 @@ bool Isolate::Init(Deserializer* des) {
|
||||
|
||||
if (FLAG_trace_hydrogen || FLAG_trace_hydrogen_stubs) {
|
||||
PrintF("Concurrent recompilation has been disabled for tracing.\n");
|
||||
} else if (OptimizingCompileDispatcher::Enabled(max_available_threads_)) {
|
||||
optimizing_compile_dispatcher_ = new OptimizingCompileDispatcher(this);
|
||||
} else if (OptimizingCompilerThread::Enabled(max_available_threads_)) {
|
||||
optimizing_compiler_thread_ = new OptimizingCompilerThread(this);
|
||||
optimizing_compiler_thread_->Start();
|
||||
}
|
||||
|
||||
// Initialize runtime profiler before deserialization, because collections may
|
||||
|
@ -19,7 +19,7 @@
|
||||
#include "src/handles.h"
|
||||
#include "src/hashmap.h"
|
||||
#include "src/heap/heap.h"
|
||||
#include "src/optimizing-compile-dispatcher.h"
|
||||
#include "src/optimizing-compiler-thread.h"
|
||||
#include "src/regexp-stack.h"
|
||||
#include "src/runtime/runtime.h"
|
||||
#include "src/runtime-profiler.h"
|
||||
@ -1028,20 +1028,20 @@ class Isolate {
|
||||
|
||||
bool concurrent_recompilation_enabled() {
|
||||
// Thread is only available with flag enabled.
|
||||
DCHECK(optimizing_compile_dispatcher_ == NULL ||
|
||||
DCHECK(optimizing_compiler_thread_ == NULL ||
|
||||
FLAG_concurrent_recompilation);
|
||||
return optimizing_compile_dispatcher_ != NULL;
|
||||
return optimizing_compiler_thread_ != NULL;
|
||||
}
|
||||
|
||||
bool concurrent_osr_enabled() const {
|
||||
// Thread is only available with flag enabled.
|
||||
DCHECK(optimizing_compile_dispatcher_ == NULL ||
|
||||
DCHECK(optimizing_compiler_thread_ == NULL ||
|
||||
FLAG_concurrent_recompilation);
|
||||
return optimizing_compile_dispatcher_ != NULL && FLAG_concurrent_osr;
|
||||
return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
|
||||
}
|
||||
|
||||
OptimizingCompileDispatcher* optimizing_compile_dispatcher() {
|
||||
return optimizing_compile_dispatcher_;
|
||||
OptimizingCompilerThread* optimizing_compiler_thread() {
|
||||
return optimizing_compiler_thread_;
|
||||
}
|
||||
|
||||
int id() const { return static_cast<int>(id_); }
|
||||
@ -1329,7 +1329,7 @@ class Isolate {
|
||||
#endif
|
||||
|
||||
DeferredHandles* deferred_handles_head_;
|
||||
OptimizingCompileDispatcher* optimizing_compile_dispatcher_;
|
||||
OptimizingCompilerThread* optimizing_compiler_thread_;
|
||||
|
||||
// Counts deopt points if deopt_every_n_times is enabled.
|
||||
unsigned int stress_deopt_count_;
|
||||
@ -1350,7 +1350,7 @@ class Isolate {
|
||||
|
||||
friend class ExecutionAccess;
|
||||
friend class HandleScopeImplementer;
|
||||
friend class OptimizingCompileDispatcher;
|
||||
friend class OptimizingCompilerThread;
|
||||
friend class SweeperThread;
|
||||
friend class ThreadManager;
|
||||
friend class Simulator;
|
||||
|
@ -9708,7 +9708,7 @@ void JSFunction::AttemptConcurrentOptimization() {
|
||||
return;
|
||||
}
|
||||
if (isolate->concurrent_osr_enabled() &&
|
||||
isolate->optimizing_compile_dispatcher()->IsQueuedForOSR(this)) {
|
||||
isolate->optimizing_compiler_thread()->IsQueuedForOSR(this)) {
|
||||
// Do not attempt regular recompilation if we already queued this for OSR.
|
||||
// TODO(yangguo): This is necessary so that we don't install optimized
|
||||
// code on a function that is already optimized, since OSR and regular
|
||||
|
@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "src/optimizing-compile-dispatcher.h"
|
||||
#include "src/optimizing-compiler-thread.h"
|
||||
|
||||
#include "src/v8.h"
|
||||
|
||||
@ -10,6 +10,7 @@
|
||||
#include "src/full-codegen.h"
|
||||
#include "src/hydrogen.h"
|
||||
#include "src/isolate.h"
|
||||
#include "src/v8threads.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -39,13 +40,12 @@ void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
|
||||
} // namespace
|
||||
|
||||
|
||||
class OptimizingCompileDispatcher::CompileTask : public v8::Task {
|
||||
class OptimizingCompilerThread::CompileTask : public v8::Task {
|
||||
public:
|
||||
explicit CompileTask(Isolate* isolate) : isolate_(isolate) {
|
||||
OptimizingCompileDispatcher* dispatcher =
|
||||
isolate_->optimizing_compile_dispatcher();
|
||||
base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
|
||||
++dispatcher->ref_count_;
|
||||
OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
|
||||
++thread->ref_count_;
|
||||
}
|
||||
|
||||
virtual ~CompileTask() {}
|
||||
@ -57,21 +57,20 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
|
||||
DisallowHandleAllocation no_handles;
|
||||
DisallowHandleDereference no_deref;
|
||||
|
||||
OptimizingCompileDispatcher* dispatcher =
|
||||
isolate_->optimizing_compile_dispatcher();
|
||||
OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
|
||||
{
|
||||
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
|
||||
|
||||
if (dispatcher->recompilation_delay_ != 0) {
|
||||
base::OS::Sleep(dispatcher->recompilation_delay_);
|
||||
if (thread->recompilation_delay_ != 0) {
|
||||
base::OS::Sleep(thread->recompilation_delay_);
|
||||
}
|
||||
|
||||
dispatcher->CompileNext(dispatcher->NextInput(true));
|
||||
thread->CompileNext(thread->NextInput(true));
|
||||
}
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&dispatcher->ref_count_mutex_);
|
||||
if (--dispatcher->ref_count_ == 0) {
|
||||
dispatcher->ref_count_zero_.NotifyOne();
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread->ref_count_mutex_);
|
||||
if (--thread->ref_count_ == 0) {
|
||||
thread->ref_count_zero_.NotifyOne();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -82,7 +81,7 @@ class OptimizingCompileDispatcher::CompileTask : public v8::Task {
|
||||
};
|
||||
|
||||
|
||||
OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
|
||||
OptimizingCompilerThread::~OptimizingCompilerThread() {
|
||||
#ifdef DEBUG
|
||||
{
|
||||
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
|
||||
@ -102,7 +101,65 @@ OptimizingCompileDispatcher::~OptimizingCompileDispatcher() {
|
||||
}
|
||||
|
||||
|
||||
OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
|
||||
void OptimizingCompilerThread::Run() {
|
||||
#ifdef DEBUG
|
||||
{ base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
|
||||
thread_id_ = ThreadId::Current().ToInteger();
|
||||
}
|
||||
#endif
|
||||
DisallowHeapAllocation no_allocation;
|
||||
DisallowHandleAllocation no_handles;
|
||||
DisallowHandleDereference no_deref;
|
||||
|
||||
if (job_based_recompilation_) {
|
||||
return;
|
||||
}
|
||||
|
||||
base::ElapsedTimer total_timer;
|
||||
if (tracing_enabled_) total_timer.Start();
|
||||
|
||||
while (true) {
|
||||
input_queue_semaphore_.Wait();
|
||||
TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
|
||||
|
||||
if (recompilation_delay_ != 0) {
|
||||
base::OS::Sleep(recompilation_delay_);
|
||||
}
|
||||
|
||||
switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
|
||||
case CONTINUE:
|
||||
break;
|
||||
case STOP:
|
||||
if (tracing_enabled_) {
|
||||
time_spent_total_ = total_timer.Elapsed();
|
||||
}
|
||||
stop_semaphore_.Signal();
|
||||
return;
|
||||
case FLUSH:
|
||||
// The main thread is blocked, waiting for the stop semaphore.
|
||||
{ AllowHandleDereference allow_handle_dereference;
|
||||
FlushInputQueue(true);
|
||||
}
|
||||
base::Release_Store(&stop_thread_,
|
||||
static_cast<base::AtomicWord>(CONTINUE));
|
||||
stop_semaphore_.Signal();
|
||||
// Return to start of consumer loop.
|
||||
continue;
|
||||
}
|
||||
|
||||
base::ElapsedTimer compiling_timer;
|
||||
if (tracing_enabled_) compiling_timer.Start();
|
||||
|
||||
CompileNext(NextInput());
|
||||
|
||||
if (tracing_enabled_) {
|
||||
time_spent_compiling_ += compiling_timer.Elapsed();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
OptimizedCompileJob* OptimizingCompilerThread::NextInput(
|
||||
bool check_if_flushing) {
|
||||
base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
|
||||
if (input_queue_length_ == 0) return NULL;
|
||||
@ -111,7 +168,7 @@ OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
|
||||
input_queue_shift_ = InputQueueIndex(1);
|
||||
input_queue_length_--;
|
||||
if (check_if_flushing) {
|
||||
if (static_cast<ModeFlag>(base::Acquire_Load(&mode_)) == FLUSH) {
|
||||
if (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_)) != CONTINUE) {
|
||||
if (!job->info()->is_osr()) {
|
||||
AllowHandleDereference allow_handle_dereference;
|
||||
DisposeOptimizedCompileJob(job, true);
|
||||
@ -123,29 +180,31 @@ OptimizedCompileJob* OptimizingCompileDispatcher::NextInput(
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::CompileNext(OptimizedCompileJob* job) {
|
||||
void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
|
||||
if (!job) return;
|
||||
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
OptimizedCompileJob::Status status = job->OptimizeGraph();
|
||||
USE(status); // Prevent an unused-variable error in release mode.
|
||||
USE(status); // Prevent an unused-variable error in release mode.
|
||||
DCHECK(status != OptimizedCompileJob::FAILED);
|
||||
|
||||
// The function may have already been optimized by OSR. Simply continue.
|
||||
// Use a mutex to make sure that functions marked for install
|
||||
// are always also queued.
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
output_queue_.push(job);
|
||||
if (job_based_recompilation_) output_queue_mutex_.Lock();
|
||||
output_queue_.Enqueue(job);
|
||||
if (job_based_recompilation_) output_queue_mutex_.Unlock();
|
||||
isolate_->stack_guard()->RequestInstallCode();
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
while (!output_queue_.empty()) {
|
||||
OptimizedCompileJob* job = output_queue_.front();
|
||||
output_queue_.pop();
|
||||
|
||||
void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
|
||||
OptimizedCompileJob* job;
|
||||
while ((job = NextInput())) {
|
||||
DCHECK(!job_based_recompilation_);
|
||||
// This should not block, since we have one signal on the input queue
|
||||
// semaphore corresponding to each element in the input queue.
|
||||
input_queue_semaphore_.Wait();
|
||||
// OSR jobs are dealt with separately.
|
||||
if (!job->info()->is_osr()) {
|
||||
DisposeOptimizedCompileJob(job, restore_function_code);
|
||||
@ -154,7 +213,18 @@ void OptimizingCompileDispatcher::FlushOutputQueue(bool restore_function_code) {
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
|
||||
void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
|
||||
OptimizedCompileJob* job;
|
||||
while (output_queue_.Dequeue(&job)) {
|
||||
// OSR jobs are dealt with separately.
|
||||
if (!job->info()->is_osr()) {
|
||||
DisposeOptimizedCompileJob(job, restore_function_code);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
|
||||
for (int i = 0; i < osr_buffer_capacity_; i++) {
|
||||
if (osr_buffer_[i] != NULL) {
|
||||
DisposeOptimizedCompileJob(osr_buffer_[i], restore_function_code);
|
||||
@ -164,29 +234,37 @@ void OptimizingCompileDispatcher::FlushOsrBuffer(bool restore_function_code) {
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::Flush() {
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
|
||||
void OptimizingCompilerThread::Flush() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
{
|
||||
if (!job_based_recompilation_) {
|
||||
input_queue_semaphore_.Signal();
|
||||
stop_semaphore_.Wait();
|
||||
} else {
|
||||
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
|
||||
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
|
||||
}
|
||||
FlushOutputQueue(true);
|
||||
if (FLAG_concurrent_osr) FlushOsrBuffer(true);
|
||||
if (FLAG_trace_concurrent_recompilation) {
|
||||
if (tracing_enabled_) {
|
||||
PrintF(" ** Flushed concurrent recompilation queues.\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::Stop() {
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(FLUSH));
|
||||
void OptimizingCompilerThread::Stop() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
|
||||
if (FLAG_block_concurrent_recompilation) Unblock();
|
||||
{
|
||||
if (!job_based_recompilation_) {
|
||||
input_queue_semaphore_.Signal();
|
||||
stop_semaphore_.Wait();
|
||||
} else {
|
||||
base::LockGuard<base::Mutex> lock_guard(&ref_count_mutex_);
|
||||
while (ref_count_ > 0) ref_count_zero_.Wait(&ref_count_mutex_);
|
||||
base::Release_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
|
||||
base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(CONTINUE));
|
||||
}
|
||||
|
||||
if (recompilation_delay_ != 0) {
|
||||
@ -195,25 +273,32 @@ void OptimizingCompileDispatcher::Stop() {
|
||||
while (input_queue_length_ > 0) CompileNext(NextInput());
|
||||
InstallOptimizedFunctions();
|
||||
} else {
|
||||
FlushInputQueue(false);
|
||||
FlushOutputQueue(false);
|
||||
}
|
||||
|
||||
if (FLAG_concurrent_osr) FlushOsrBuffer(false);
|
||||
|
||||
if ((FLAG_trace_osr || FLAG_trace_concurrent_recompilation) &&
|
||||
FLAG_concurrent_osr) {
|
||||
if (tracing_enabled_) {
|
||||
double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
|
||||
if (job_based_recompilation_) percentage = 100.0;
|
||||
PrintF(" ** Compiler thread did %.2f%% useful work\n", percentage);
|
||||
}
|
||||
|
||||
if ((FLAG_trace_osr || tracing_enabled_) && FLAG_concurrent_osr) {
|
||||
PrintF("[COSR hit rate %d / %d]\n", osr_hits_, osr_attempts_);
|
||||
}
|
||||
|
||||
Join();
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
|
||||
void OptimizingCompilerThread::InstallOptimizedFunctions() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
HandleScope handle_scope(isolate_);
|
||||
|
||||
base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
|
||||
while (!output_queue_.empty()) {
|
||||
OptimizedCompileJob* job = output_queue_.front();
|
||||
output_queue_.pop();
|
||||
OptimizedCompileJob* job;
|
||||
while (output_queue_.Dequeue(&job)) {
|
||||
CompilationInfo* info = job->info();
|
||||
Handle<JSFunction> function(*info->closure());
|
||||
if (info->is_osr()) {
|
||||
@ -230,7 +315,7 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
|
||||
BackEdgeTable::RemoveStackCheck(code, offset);
|
||||
} else {
|
||||
if (function->IsOptimized()) {
|
||||
if (FLAG_trace_concurrent_recompilation) {
|
||||
if (tracing_enabled_) {
|
||||
PrintF(" ** Aborting compilation for ");
|
||||
function->ShortPrint();
|
||||
PrintF(" as it has already been optimized.\n");
|
||||
@ -238,17 +323,17 @@ void OptimizingCompileDispatcher::InstallOptimizedFunctions() {
|
||||
DisposeOptimizedCompileJob(job, false);
|
||||
} else {
|
||||
Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
|
||||
function->ReplaceCode(code.is_null() ? function->shared()->code()
|
||||
: *code);
|
||||
function->ReplaceCode(
|
||||
code.is_null() ? function->shared()->code() : *code);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::QueueForOptimization(
|
||||
OptimizedCompileJob* job) {
|
||||
void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
|
||||
DCHECK(IsQueueAvailable());
|
||||
DCHECK(!IsOptimizerThread());
|
||||
CompilationInfo* info = job->info();
|
||||
if (info->is_osr()) {
|
||||
osr_attempts_++;
|
||||
@ -269,27 +354,36 @@ void OptimizingCompileDispatcher::QueueForOptimization(
|
||||
}
|
||||
if (FLAG_block_concurrent_recompilation) {
|
||||
blocked_jobs_++;
|
||||
} else {
|
||||
} else if (job_based_recompilation_) {
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
|
||||
} else {
|
||||
input_queue_semaphore_.Signal();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::Unblock() {
|
||||
void OptimizingCompilerThread::Unblock() {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
while (blocked_jobs_ > 0) {
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
|
||||
if (job_based_recompilation_) {
|
||||
V8::GetCurrentPlatform()->CallOnBackgroundThread(
|
||||
new CompileTask(isolate_), v8::Platform::kShortRunningTask);
|
||||
} else {
|
||||
input_queue_semaphore_.Signal();
|
||||
}
|
||||
blocked_jobs_--;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
|
||||
OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
|
||||
Handle<JSFunction> function, BailoutId osr_ast_id) {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
for (int i = 0; i < osr_buffer_capacity_; i++) {
|
||||
OptimizedCompileJob* current = osr_buffer_[i];
|
||||
if (current != NULL && current->IsWaitingForInstall() &&
|
||||
if (current != NULL &&
|
||||
current->IsWaitingForInstall() &&
|
||||
current->info()->HasSameOsrEntry(function, osr_ast_id)) {
|
||||
osr_hits_++;
|
||||
osr_buffer_[i] = NULL;
|
||||
@ -300,8 +394,9 @@ OptimizedCompileJob* OptimizingCompileDispatcher::FindReadyOSRCandidate(
|
||||
}
|
||||
|
||||
|
||||
bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
|
||||
BailoutId osr_ast_id) {
|
||||
bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
|
||||
BailoutId osr_ast_id) {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
for (int i = 0; i < osr_buffer_capacity_; i++) {
|
||||
OptimizedCompileJob* current = osr_buffer_[i];
|
||||
if (current != NULL &&
|
||||
@ -313,7 +408,8 @@ bool OptimizingCompileDispatcher::IsQueuedForOSR(Handle<JSFunction> function,
|
||||
}
|
||||
|
||||
|
||||
bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
|
||||
bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
for (int i = 0; i < osr_buffer_capacity_; i++) {
|
||||
OptimizedCompileJob* current = osr_buffer_[i];
|
||||
if (current != NULL && *current->info()->closure() == function) {
|
||||
@ -324,7 +420,8 @@ bool OptimizingCompileDispatcher::IsQueuedForOSR(JSFunction* function) {
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
|
||||
void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
|
||||
DCHECK(!IsOptimizerThread());
|
||||
// Find the next slot that is empty or has a stale job.
|
||||
OptimizedCompileJob* stale = NULL;
|
||||
while (true) {
|
||||
@ -347,5 +444,20 @@ void OptimizingCompileDispatcher::AddToOsrBuffer(OptimizedCompileJob* job) {
|
||||
osr_buffer_[osr_buffer_cursor_] = job;
|
||||
osr_buffer_cursor_ = (osr_buffer_cursor_ + 1) % osr_buffer_capacity_;
|
||||
}
|
||||
|
||||
|
||||
#ifdef DEBUG
|
||||
bool OptimizingCompilerThread::IsOptimizerThread(Isolate* isolate) {
|
||||
return isolate->concurrent_recompilation_enabled() &&
|
||||
isolate->optimizing_compiler_thread()->IsOptimizerThread();
|
||||
}
|
||||
} // namespace v8::internal
|
||||
|
||||
|
||||
bool OptimizingCompilerThread::IsOptimizerThread() {
|
||||
base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
|
||||
return ThreadId::Current().ToInteger() == thread_id_;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
} } // namespace v8::internal
|
@ -2,17 +2,17 @@
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_OPTIMIZING_COMPILE_DISPATCHER_H_
|
||||
#define V8_OPTIMIZING_COMPILE_DISPATCHER_H_
|
||||
|
||||
#include <queue>
|
||||
#ifndef V8_OPTIMIZING_COMPILER_THREAD_H_
|
||||
#define V8_OPTIMIZING_COMPILER_THREAD_H_
|
||||
|
||||
#include "src/base/atomicops.h"
|
||||
#include "src/base/platform/condition-variable.h"
|
||||
#include "src/base/platform/mutex.h"
|
||||
#include "src/base/platform/platform.h"
|
||||
#include "src/base/platform/time.h"
|
||||
#include "src/flags.h"
|
||||
#include "src/list.h"
|
||||
#include "src/unbound-queue-inl.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
@ -21,10 +21,16 @@ class HOptimizedGraphBuilder;
|
||||
class OptimizedCompileJob;
|
||||
class SharedFunctionInfo;
|
||||
|
||||
class OptimizingCompileDispatcher {
|
||||
class OptimizingCompilerThread : public base::Thread {
|
||||
public:
|
||||
explicit OptimizingCompileDispatcher(Isolate* isolate)
|
||||
: isolate_(isolate),
|
||||
explicit OptimizingCompilerThread(Isolate* isolate)
|
||||
: Thread(Options("OptimizingCompilerThread")),
|
||||
#ifdef DEBUG
|
||||
thread_id_(0),
|
||||
#endif
|
||||
isolate_(isolate),
|
||||
stop_semaphore_(0),
|
||||
input_queue_semaphore_(0),
|
||||
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
|
||||
input_queue_length_(0),
|
||||
input_queue_shift_(0),
|
||||
@ -34,8 +40,11 @@ class OptimizingCompileDispatcher {
|
||||
osr_attempts_(0),
|
||||
blocked_jobs_(0),
|
||||
ref_count_(0),
|
||||
tracing_enabled_(FLAG_trace_concurrent_recompilation),
|
||||
job_based_recompilation_(FLAG_job_based_recompilation),
|
||||
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
|
||||
base::NoBarrier_Store(&mode_, static_cast<base::AtomicWord>(COMPILE));
|
||||
base::NoBarrier_Store(&stop_thread_,
|
||||
static_cast<base::AtomicWord>(CONTINUE));
|
||||
input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
|
||||
if (FLAG_concurrent_osr) {
|
||||
// Allocate and mark OSR buffer slots as empty.
|
||||
@ -44,7 +53,7 @@ class OptimizingCompileDispatcher {
|
||||
}
|
||||
}
|
||||
|
||||
~OptimizingCompileDispatcher();
|
||||
~OptimizingCompilerThread();
|
||||
|
||||
void Run();
|
||||
void Stop();
|
||||
@ -74,11 +83,17 @@ class OptimizingCompileDispatcher {
|
||||
return (FLAG_concurrent_recompilation && max_available > 1);
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool IsOptimizerThread(Isolate* isolate);
|
||||
bool IsOptimizerThread();
|
||||
#endif
|
||||
|
||||
private:
|
||||
class CompileTask;
|
||||
|
||||
enum ModeFlag { COMPILE, FLUSH };
|
||||
enum StopFlag { CONTINUE, STOP, FLUSH };
|
||||
|
||||
void FlushInputQueue(bool restore_function_code);
|
||||
void FlushOutputQueue(bool restore_function_code);
|
||||
void FlushOsrBuffer(bool restore_function_code);
|
||||
void CompileNext(OptimizedCompileJob* job);
|
||||
@ -95,7 +110,14 @@ class OptimizingCompileDispatcher {
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
int thread_id_;
|
||||
base::Mutex thread_id_mutex_;
|
||||
#endif
|
||||
|
||||
Isolate* isolate_;
|
||||
base::Semaphore stop_semaphore_;
|
||||
base::Semaphore input_queue_semaphore_;
|
||||
|
||||
// Circular queue of incoming recompilation tasks (including OSR).
|
||||
OptimizedCompileJob** input_queue_;
|
||||
@ -105,7 +127,7 @@ class OptimizingCompileDispatcher {
|
||||
base::Mutex input_queue_mutex_;
|
||||
|
||||
// Queue of recompilation tasks ready to be installed (excluding OSR).
|
||||
std::queue<OptimizedCompileJob*> output_queue_;
|
||||
UnboundQueue<OptimizedCompileJob*> output_queue_;
|
||||
// Used for job based recompilation which has multiple producers on
|
||||
// different threads.
|
||||
base::Mutex output_queue_mutex_;
|
||||
@ -115,7 +137,9 @@ class OptimizingCompileDispatcher {
|
||||
int osr_buffer_capacity_;
|
||||
int osr_buffer_cursor_;
|
||||
|
||||
volatile base::AtomicWord mode_;
|
||||
volatile base::AtomicWord stop_thread_;
|
||||
base::TimeDelta time_spent_compiling_;
|
||||
base::TimeDelta time_spent_total_;
|
||||
|
||||
int osr_hits_;
|
||||
int osr_attempts_;
|
||||
@ -126,14 +150,17 @@ class OptimizingCompileDispatcher {
|
||||
base::Mutex ref_count_mutex_;
|
||||
base::ConditionVariable ref_count_zero_;
|
||||
|
||||
// Copy of FLAG_concurrent_recompilation_delay that will be used from the
|
||||
// background thread.
|
||||
// Copies of FLAG_trace_concurrent_recompilation,
|
||||
// FLAG_concurrent_recompilation_delay and
|
||||
// FLAG_job_based_recompilation that will be used from the background thread.
|
||||
//
|
||||
// Since flags might get modified while the background thread is running, it
|
||||
// is not safe to access them directly.
|
||||
bool tracing_enabled_;
|
||||
bool job_based_recompilation_;
|
||||
int recompilation_delay_;
|
||||
};
|
||||
}
|
||||
} // namespace v8::internal
|
||||
|
||||
#endif // V8_OPTIMIZING_COMPILE_DISPATCHER_H_
|
||||
} } // namespace v8::internal
|
||||
|
||||
#endif // V8_OPTIMIZING_COMPILER_THREAD_H_
|
@ -232,9 +232,8 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
|
||||
// Gate the OSR entry with a stack check.
|
||||
BackEdgeTable::AddStackCheck(caller_code, pc_offset);
|
||||
// Poll already queued compilation jobs.
|
||||
OptimizingCompileDispatcher* dispatcher =
|
||||
isolate->optimizing_compile_dispatcher();
|
||||
if (dispatcher->IsQueuedForOSR(function, ast_id)) {
|
||||
OptimizingCompilerThread* thread = isolate->optimizing_compiler_thread();
|
||||
if (thread->IsQueuedForOSR(function, ast_id)) {
|
||||
if (FLAG_trace_osr) {
|
||||
PrintF("[OSR - Still waiting for queued: ");
|
||||
function->PrintName();
|
||||
@ -243,7 +242,7 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
job = dispatcher->FindReadyOSRCandidate(function, ast_id);
|
||||
job = thread->FindReadyOSRCandidate(function, ast_id);
|
||||
}
|
||||
|
||||
if (job != NULL) {
|
||||
@ -325,7 +324,7 @@ RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
|
||||
return isolate->StackOverflow();
|
||||
}
|
||||
|
||||
isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
|
||||
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
|
||||
return (function->IsOptimized()) ? function->code()
|
||||
: function->shared()->code();
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
|
||||
if (isolate->concurrent_recompilation_enabled() &&
|
||||
sync_with_compiler_thread) {
|
||||
while (function->IsInOptimizationQueue()) {
|
||||
isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
|
||||
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
|
||||
base::OS::Sleep(50);
|
||||
}
|
||||
}
|
||||
@ -200,7 +200,7 @@ RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
|
||||
DCHECK(args.length() == 0);
|
||||
RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
|
||||
RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
|
||||
isolate->optimizing_compile_dispatcher()->Unblock();
|
||||
isolate->optimizing_compiler_thread()->Unblock();
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
|
@ -780,8 +780,8 @@
|
||||
'../../src/objects-printer.cc',
|
||||
'../../src/objects.cc',
|
||||
'../../src/objects.h',
|
||||
'../../src/optimizing-compile-dispatcher.cc',
|
||||
'../../src/optimizing-compile-dispatcher.h',
|
||||
'../../src/optimizing-compiler-thread.cc',
|
||||
'../../src/optimizing-compiler-thread.h',
|
||||
'../../src/ostreams.cc',
|
||||
'../../src/ostreams.h',
|
||||
'../../src/parser.cc',
|
||||
|
Loading…
Reference in New Issue
Block a user