Introduce helper functions to test parallel recompilation.

BUG=

Review URL: https://chromiumcodereview.appspot.com/11419012

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@12986 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
yangguo@chromium.org 2012-11-16 10:57:50 +00:00
parent 7f824867f6
commit 63f109aaa5
8 changed files with 151 additions and 6 deletions

View File

@ -937,7 +937,8 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
} }
stack_guard->Continue(CODE_READY); stack_guard->Continue(CODE_READY);
} }
if (!stack_guard->IsTerminateExecution()) { if (!stack_guard->IsTerminateExecution() &&
!FLAG_manual_parallel_recompilation) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
} }

View File

@ -225,7 +225,7 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true, DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops") "optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true, DEFINE_bool(opt_safe_uint32_operations, true,
"allow uint32 values on optimize frames if they are used only in" "allow uint32 values on optimize frames if they are used only in "
"safe operations") "safe operations")
DEFINE_bool(parallel_recompilation, false, DEFINE_bool(parallel_recompilation, false,
@ -233,6 +233,9 @@ DEFINE_bool(parallel_recompilation, false,
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation") DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2, DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue") "the length of the parallel compilation queue")
DEFINE_bool(manual_parallel_recompilation, false,
"disable automatic optimization")
DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
// Experimental profiler changes. // Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments") DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")

View File

@ -72,7 +72,13 @@ void OptimizingCompilerThread::Run() {
USE(status); USE(status);
output_queue_.Enqueue(optimizing_compiler); output_queue_.Enqueue(optimizing_compiler);
isolate_->stack_guard()->RequestCodeReadyEvent(); if (!FLAG_manual_parallel_recompilation) {
isolate_->stack_guard()->RequestCodeReadyEvent();
} else {
// In manual mode, do not trigger a code ready event.
// Instead, wait for the optimized functions to be installed manually.
output_queue_semaphore_->Signal();
}
if (FLAG_trace_parallel_recompilation) { if (FLAG_trace_parallel_recompilation) {
time_spent_compiling_ += OS::Ticks() - compiling_start; time_spent_compiling_ += OS::Ticks() - compiling_start;
@ -99,6 +105,9 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_); HandleScope handle_scope(isolate_);
int functions_installed = 0; int functions_installed = 0;
while (!output_queue_.IsEmpty()) { while (!output_queue_.IsEmpty()) {
if (FLAG_manual_parallel_recompilation) {
output_queue_semaphore_->Wait();
}
OptimizingCompiler* compiler = NULL; OptimizingCompiler* compiler = NULL;
output_queue_.Dequeue(&compiler); output_queue_.Dequeue(&compiler);
Compiler::InstallOptimizedCode(compiler); Compiler::InstallOptimizedCode(compiler);
@ -110,6 +119,17 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
} }
Handle<SharedFunctionInfo>
OptimizingCompilerThread::InstallNextOptimizedFunction() {
ASSERT(FLAG_manual_parallel_recompilation);
output_queue_semaphore_->Wait();
OptimizingCompiler* compiler = NULL;
output_queue_.Dequeue(&compiler);
Compiler::InstallOptimizedCode(compiler);
return compiler->info()->shared_info();
}
void OptimizingCompilerThread::QueueForOptimization( void OptimizingCompilerThread::QueueForOptimization(
OptimizingCompiler* optimizing_compiler) { OptimizingCompiler* optimizing_compiler) {
input_queue_.Enqueue(optimizing_compiler); input_queue_.Enqueue(optimizing_compiler);

View File

@ -29,8 +29,8 @@
#define V8_OPTIMIZING_COMPILER_THREAD_H_ #define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "atomicops.h" #include "atomicops.h"
#include "platform.h"
#include "flags.h" #include "flags.h"
#include "platform.h"
#include "unbound-queue.h" #include "unbound-queue.h"
namespace v8 { namespace v8 {
@ -38,6 +38,7 @@ namespace internal {
class HGraphBuilder; class HGraphBuilder;
class OptimizingCompiler; class OptimizingCompiler;
class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread { class OptimizingCompilerThread : public Thread {
public: public:
@ -46,6 +47,7 @@ class OptimizingCompilerThread : public Thread {
isolate_(isolate), isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)), stop_semaphore_(OS::CreateSemaphore(0)),
input_queue_semaphore_(OS::CreateSemaphore(0)), input_queue_semaphore_(OS::CreateSemaphore(0)),
output_queue_semaphore_(OS::CreateSemaphore(0)),
time_spent_compiling_(0), time_spent_compiling_(0),
time_spent_total_(0) { time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false)); NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
@ -57,6 +59,9 @@ class OptimizingCompilerThread : public Thread {
void QueueForOptimization(OptimizingCompiler* optimizing_compiler); void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions(); void InstallOptimizedFunctions();
// Wait for the next optimized function and install it.
Handle<SharedFunctionInfo> InstallNextOptimizedFunction();
inline bool IsQueueAvailable() { inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right // We don't need a barrier since we have a data dependency right
// after. // after.
@ -77,6 +82,7 @@ class OptimizingCompilerThread : public Thread {
~OptimizingCompilerThread() { ~OptimizingCompilerThread() {
delete input_queue_semaphore_; delete input_queue_semaphore_;
delete output_queue_semaphore_; // Only used for manual mode.
delete stop_semaphore_; delete stop_semaphore_;
} }
@ -84,6 +90,7 @@ class OptimizingCompilerThread : public Thread {
Isolate* isolate_; Isolate* isolate_;
Semaphore* stop_semaphore_; Semaphore* stop_semaphore_;
Semaphore* input_queue_semaphore_; Semaphore* input_queue_semaphore_;
Semaphore* output_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_; UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_; UnboundQueue<OptimizingCompiler*> output_queue_;
volatile AtomicWord stop_thread_; volatile AtomicWord stop_thread_;

View File

@ -140,6 +140,9 @@ static void GetICCounts(JSFunction* function,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) { void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable()); ASSERT(function->IsOptimizable());
// If we are in manual mode, don't auto-optimize anything.
if (FLAG_manual_parallel_recompilation) return;
if (FLAG_trace_opt) { if (FLAG_trace_opt) {
PrintF("[marking "); PrintF("[marking ");
function->PrintName(); function->PrintName();

View File

@ -7990,7 +7990,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
HandleScope handle_scope(isolate); HandleScope handle_scope(isolate);
ASSERT(FLAG_parallel_recompilation); ASSERT(FLAG_parallel_recompilation);
Compiler::RecompileParallel(args.at<JSFunction>(0)); Compiler::RecompileParallel(args.at<JSFunction>(0));
return *isolate->factory()->undefined_value(); return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_ForceParallelRecompile) {
HandleScope handle_scope(isolate);
ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
return isolate->Throw(
*isolate->factory()->LookupAsciiSymbol("Recompile queue is full."));
}
CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
Compiler::RecompileParallel(fun);
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
HandleScope handle_scope(isolate);
ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
CONVERT_ARG_HANDLE_CHECKED(HeapObject, arg, 0);
OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
if (!arg->IsJSFunction()) {
opt_thread->InstallOptimizedFunctions();
} else if (!JSFunction::cast(*arg)->IsOptimized()) {
Handle<SharedFunctionInfo> shared(JSFunction::cast(*arg)->shared());
while (*opt_thread->InstallNextOptimizedFunction() != *shared) { }
}
return isolate->heap()->undefined_value();
} }

View File

@ -85,7 +85,9 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \ F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \ F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \ F(LazyRecompile, 1, 1) \
F(ParallelRecompile, 1, 1) \ F(ParallelRecompile, 1, 1) \
F(ForceParallelRecompile, 1, 1) \
F(InstallRecompiledCode, 1, 1) \
F(NotifyDeoptimized, 1, 1) \ F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \ F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \ F(DeoptimizeFunction, 1, 1) \

View File

@ -0,0 +1,80 @@
// Copyright 2012 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
// Flags: --parallel-recompilation --manual-parallel-recompilation
function assertOptimized(fun) {
// This assertion takes --always-opt and --nocrankshaft flags into account.
assertTrue(%GetOptimizationStatus(fun) != 2);
}
function assertUnoptimized(fun) {
assertTrue(%GetOptimizationStatus(fun) != 1);
}
function f(x) {
var xx = x * x;
var xxstr = xx.toString();
return xxstr.length;
}
function g(x) {
var xxx = Math.sqrt(x) | 0;
var xxxstr = xxx.toString();
return xxxstr.length;
}
function k(x) {
return x * x;
}
f(g(1));
f(g(2));
assertUnoptimized(f);
assertUnoptimized(g);
%ForceParallelRecompile(f);
%ForceParallelRecompile(g);
assertUnoptimized(f);
assertUnoptimized(g);
var sum = 0;
for (var i = 0; i < 10000; i++) sum += f(i) + g(i);
gc();
assertEquals(95274, sum);
assertUnoptimized(f);
assertUnoptimized(g);
%InstallRecompiledCode(f);
assertOptimized(f);
assertUnoptimized(g);
%InstallRecompiledCode("the rest");
assertOptimized(g);