Details wrt parallel recompilation.
This includes: - actually release handles kept by compilation info when compilation completes. - do not use parallel recompilation on single core CPUs. - artificially delay parallel recompilation for debugging. - fix outdated assertions wrt optimization status. - add "parallel" option to %OptimizeFunctionOnNextCall. R=jkummerow@chromium.org BUG= Review URL: https://chromiumcodereview.appspot.com/12442002 git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@13827 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
parent
24abac9f02
commit
03375a68d7
@ -963,6 +963,17 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
|
||||
|
||||
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
|
||||
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
|
||||
// Function may have been optimized meanwhile by OSR.
|
||||
if (FLAG_use_osr) {
|
||||
// Function may have already been optimized meanwhile by OSR.
|
||||
if (!info->code().is_null() &&
|
||||
info->code()->kind() == Code::OPTIMIZED_FUNCTION) {
|
||||
return;
|
||||
}
|
||||
// OSR may also have caused optimization to be disabled.
|
||||
if (info->shared_info()->optimization_disabled()) return;
|
||||
}
|
||||
|
||||
Isolate* isolate = info->isolate();
|
||||
VMState state(isolate, PARALLEL_COMPILER);
|
||||
Logger::TimerEventScope timer(
|
||||
|
@ -49,7 +49,7 @@ class CompilationInfo {
|
||||
CompilationInfo(Handle<JSFunction> closure, Zone* zone);
|
||||
CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate, Zone* zone);
|
||||
|
||||
virtual ~CompilationInfo();
|
||||
~CompilationInfo();
|
||||
|
||||
Isolate* isolate() {
|
||||
ASSERT(Isolate::Current() == isolate_);
|
||||
@ -349,8 +349,6 @@ class CompilationInfo {
|
||||
// Zone on construction and deallocates it on exit.
|
||||
class CompilationInfoWithZone: public CompilationInfo {
|
||||
public:
|
||||
INLINE(void* operator new(size_t size)) { return Malloced::New(size); }
|
||||
|
||||
explicit CompilationInfoWithZone(Handle<Script> script)
|
||||
: CompilationInfo(script, &zone_),
|
||||
zone_(script->GetIsolate()),
|
||||
|
@ -248,6 +248,8 @@ DEFINE_bool(parallel_recompilation, false,
|
||||
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
|
||||
DEFINE_int(parallel_recompilation_queue_length, 2,
|
||||
"the length of the parallel compilation queue")
|
||||
DEFINE_int(parallel_recompilation_delay, 0,
|
||||
"artificial compilation delay in ms")
|
||||
DEFINE_bool(manual_parallel_recompilation, false,
|
||||
"disable automatic optimization")
|
||||
DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
|
||||
|
@ -1772,6 +1772,8 @@ void Isolate::Deinit() {
|
||||
if (state_ == INITIALIZED) {
|
||||
TRACE_ISOLATE(deinit);
|
||||
|
||||
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
|
||||
|
||||
if (FLAG_sweeper_threads > 0) {
|
||||
for (int i = 0; i < FLAG_sweeper_threads; i++) {
|
||||
sweeper_thread_[i]->Stop();
|
||||
@ -1788,8 +1790,6 @@ void Isolate::Deinit() {
|
||||
delete[] marking_thread_;
|
||||
}
|
||||
|
||||
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Stop();
|
||||
|
||||
if (FLAG_hydrogen_stats) HStatistics::Instance()->Print();
|
||||
|
||||
// We must stop the logger before we tear down other components.
|
||||
@ -2198,6 +2198,11 @@ bool Isolate::Init(Deserializer* des) {
|
||||
FLAG_concurrent_sweeping = false;
|
||||
FLAG_parallel_sweeping = false;
|
||||
}
|
||||
if (FLAG_parallel_recompilation &&
|
||||
SystemThreadManager::NumberOfParallelSystemThreads(
|
||||
SystemThreadManager::PARALLEL_RECOMPILATION) == 0) {
|
||||
FLAG_parallel_recompilation = false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -292,7 +292,8 @@ class SystemThreadManager {
|
||||
enum ParallelSystemComponent {
|
||||
PARALLEL_SWEEPING,
|
||||
CONCURRENT_SWEEPING,
|
||||
PARALLEL_MARKING
|
||||
PARALLEL_MARKING,
|
||||
PARALLEL_RECOMPILATION
|
||||
};
|
||||
|
||||
static int NumberOfParallelSystemThreads(ParallelSystemComponent type);
|
||||
|
@ -50,6 +50,11 @@ void OptimizingCompilerThread::Run() {
|
||||
input_queue_semaphore_->Wait();
|
||||
Logger::TimerEventScope timer(
|
||||
isolate_, Logger::TimerEventScope::v8_recompile_parallel);
|
||||
|
||||
if (FLAG_parallel_recompilation_delay != 0) {
|
||||
OS::Sleep(FLAG_parallel_recompilation_delay);
|
||||
}
|
||||
|
||||
if (Acquire_Load(&stop_thread_)) {
|
||||
stop_semaphore_->Signal();
|
||||
if (FLAG_trace_parallel_recompilation) {
|
||||
@ -61,19 +66,8 @@ void OptimizingCompilerThread::Run() {
|
||||
int64_t compiling_start = 0;
|
||||
if (FLAG_trace_parallel_recompilation) compiling_start = OS::Ticks();
|
||||
|
||||
Heap::RelocationLock relocation_lock(isolate_->heap());
|
||||
OptimizingCompiler* optimizing_compiler = NULL;
|
||||
input_queue_.Dequeue(&optimizing_compiler);
|
||||
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
|
||||
CompileNext();
|
||||
|
||||
ASSERT(!optimizing_compiler->info()->closure()->IsOptimized());
|
||||
|
||||
OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
|
||||
ASSERT(status != OptimizingCompiler::FAILED);
|
||||
// Prevent an unused-variable error in release mode.
|
||||
USE(status);
|
||||
|
||||
output_queue_.Enqueue(optimizing_compiler);
|
||||
if (!FLAG_manual_parallel_recompilation) {
|
||||
isolate_->stack_guard()->RequestCodeReadyEvent();
|
||||
} else {
|
||||
@ -89,11 +83,46 @@ void OptimizingCompilerThread::Run() {
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::CompileNext() {
|
||||
Heap::RelocationLock relocation_lock(isolate_->heap());
|
||||
OptimizingCompiler* optimizing_compiler = NULL;
|
||||
input_queue_.Dequeue(&optimizing_compiler);
|
||||
Barrier_AtomicIncrement(&queue_length_, static_cast<Atomic32>(-1));
|
||||
|
||||
// Function may have been optimized meanwhile by OSR.
|
||||
if (FLAG_use_osr &&
|
||||
optimizing_compiler->info()->closure()->IsOptimized()) {
|
||||
return;
|
||||
}
|
||||
|
||||
OptimizingCompiler::Status status = optimizing_compiler->OptimizeGraph();
|
||||
ASSERT(status != OptimizingCompiler::FAILED);
|
||||
// Prevent an unused-variable error in release mode.
|
||||
USE(status);
|
||||
|
||||
output_queue_.Enqueue(optimizing_compiler);
|
||||
}
|
||||
|
||||
|
||||
void OptimizingCompilerThread::Stop() {
|
||||
Release_Store(&stop_thread_, static_cast<AtomicWord>(true));
|
||||
input_queue_semaphore_->Signal();
|
||||
stop_semaphore_->Wait();
|
||||
|
||||
if (FLAG_parallel_recompilation_delay != 0) {
|
||||
// Execution ended before we managed to compile and install the remaining
|
||||
// functions in the queue. We still want to do that for debugging though.
|
||||
// At this point the optimizing thread already stopped, so we finish
|
||||
// processing the queue in the main thread.
|
||||
InstallOptimizedFunctions();
|
||||
// Barrier when loading queue length is not necessary since the write
|
||||
// happens in CompileNext on the same thread.
|
||||
while (NoBarrier_Load(&queue_length_) > 0) {
|
||||
CompileNext();
|
||||
InstallOptimizedFunctions();
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_trace_parallel_recompilation) {
|
||||
double compile_time = static_cast<double>(time_spent_compiling_);
|
||||
double total_time = static_cast<double>(time_spent_total_);
|
||||
@ -123,11 +152,13 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
|
||||
|
||||
Handle<SharedFunctionInfo>
|
||||
OptimizingCompilerThread::InstallNextOptimizedFunction() {
|
||||
ASSERT(FLAG_manual_parallel_recompilation);
|
||||
ASSERT(FLAG_manual_parallel_recompilation ||
|
||||
FLAG_parallel_recompilation_delay != 0);
|
||||
output_queue_semaphore_->Wait();
|
||||
OptimizingCompiler* compiler = NULL;
|
||||
output_queue_.Dequeue(&compiler);
|
||||
Handle<SharedFunctionInfo> shared = compiler->info()->shared_info();
|
||||
// Copy a handle from deferred handle scope to the normal handle scope.
|
||||
Handle<SharedFunctionInfo> shared(*compiler->info()->shared_info());
|
||||
Compiler::InstallOptimizedCode(compiler);
|
||||
return shared;
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ class OptimizingCompilerThread : public Thread {
|
||||
|
||||
void Run();
|
||||
void Stop();
|
||||
void CompileNext();
|
||||
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
|
||||
void InstallOptimizedFunctions();
|
||||
|
||||
|
@ -7682,31 +7682,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyCompile) {
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
|
||||
HandleScope scope(isolate);
|
||||
ASSERT(args.length() == 1);
|
||||
Handle<JSFunction> function = args.at<JSFunction>(0);
|
||||
|
||||
bool AllowOptimization(Isolate* isolate, Handle<JSFunction> function) {
|
||||
// If the function is not compiled ignore the lazy
|
||||
// recompilation. This can happen if the debugger is activated and
|
||||
// the function is returned to the not compiled state.
|
||||
if (!function->shared()->is_compiled()) {
|
||||
function->ReplaceCode(function->shared()->code());
|
||||
return function->code();
|
||||
}
|
||||
if (!function->shared()->is_compiled()) return false;
|
||||
|
||||
// If the function is not optimizable or debugger is active continue using the
|
||||
// code from the full compiler.
|
||||
if (!FLAG_crankshaft ||
|
||||
!function->shared()->code()->optimizable() ||
|
||||
function->shared()->optimization_disabled() ||
|
||||
isolate->DebuggerHasBreakPoints()) {
|
||||
if (FLAG_trace_opt) {
|
||||
PrintF("[failed to optimize ");
|
||||
function->PrintName();
|
||||
PrintF(": is code optimizable: %s, is debugger enabled: %s]\n",
|
||||
function->shared()->code()->optimizable() ? "T" : "F",
|
||||
function->shared()->optimization_disabled() ? "F" : "T",
|
||||
isolate->DebuggerHasBreakPoints() ? "T" : "F");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
|
||||
HandleScope scope(isolate);
|
||||
ASSERT(args.length() == 1);
|
||||
Handle<JSFunction> function = args.at<JSFunction>(0);
|
||||
|
||||
if (!AllowOptimization(isolate, function)) {
|
||||
function->ReplaceCode(function->shared()->code());
|
||||
return function->code();
|
||||
}
|
||||
@ -7728,8 +7733,14 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_LazyRecompile) {
|
||||
|
||||
RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
|
||||
HandleScope handle_scope(isolate);
|
||||
Handle<JSFunction> function = args.at<JSFunction>(0);
|
||||
if (!AllowOptimization(isolate, function)) {
|
||||
function->ReplaceCode(function->shared()->code());
|
||||
return function->code();
|
||||
}
|
||||
function->shared()->code()->set_profiler_ticks(0);
|
||||
ASSERT(FLAG_parallel_recompilation);
|
||||
Compiler::RecompileParallel(args.at<JSFunction>(0));
|
||||
Compiler::RecompileParallel(function);
|
||||
return isolate->heap()->undefined_value();
|
||||
}
|
||||
|
||||
@ -7913,10 +7924,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
|
||||
if (args.length() == 2 &&
|
||||
unoptimized->kind() == Code::FUNCTION) {
|
||||
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
|
||||
CHECK(type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")));
|
||||
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
|
||||
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
|
||||
unoptimized->set_allow_osr_at_loop_nesting_level(
|
||||
Code::kMaxLoopNestingMarker);
|
||||
} else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("parallel"))) {
|
||||
function->MarkForParallelRecompilation();
|
||||
}
|
||||
}
|
||||
|
||||
return isolate->heap()->undefined_value();
|
||||
|
@ -129,11 +129,12 @@ class SmartArrayPointer: public SmartPointerBase<ArrayDeallocator<T>, T> {
|
||||
|
||||
template<typename T>
|
||||
struct ObjectDeallocator {
|
||||
static void Delete(T* array) {
|
||||
Malloced::Delete(array);
|
||||
static void Delete(T* object) {
|
||||
delete object;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
template<typename T>
|
||||
class SmartPointer: public SmartPointerBase<ObjectDeallocator<T>, T> {
|
||||
public:
|
||||
|
46
test/mjsunit/parallel-optimize-disabled.js
Normal file
46
test/mjsunit/parallel-optimize-disabled.js
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2013 the V8 project authors. All rights reserved.
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following
|
||||
// disclaimer in the documentation and/or other materials provided
|
||||
// with the distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived
|
||||
// from this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Flags: --nodead-code-elimination --parallel-recompilation
|
||||
// Flags: --allow-natives-syntax
|
||||
|
||||
function g() { // g() cannot be optimized.
|
||||
const x = 1;
|
||||
x++;
|
||||
}
|
||||
|
||||
function f(x) {
|
||||
g();
|
||||
}
|
||||
|
||||
f();
|
||||
f();
|
||||
%OptimizeFunctionOnNextCall(g, "parallel");
|
||||
%OptimizeFunctionOnNextCall(f);
|
||||
f(0); // g() is disabled for optimization on inlining attempt.
|
||||
g(); // Attempt to optimize g() should not run into any assertion.
|
||||
|
Loading…
Reference in New Issue
Block a user