diff --git a/src/api.cc b/src/api.cc index c93b23c471..5978ed270e 100644 --- a/src/api.cc +++ b/src/api.cc @@ -770,7 +770,6 @@ void Context::Exit() { i::Context* last_context = isolate->handle_scope_implementer()->RestoreContext(); isolate->set_context(last_context); - isolate->set_context_exit_happened(true); } diff --git a/src/factory.cc b/src/factory.cc index b135a9c670..ab17eb5b4f 100644 --- a/src/factory.cc +++ b/src/factory.cc @@ -673,7 +673,11 @@ Handle Factory::NewFunctionFromSharedFunctionInfo( function_info->allows_lazy_compilation() && !function_info->optimization_disabled() && !isolate()->DebuggerHasBreakPoints()) { - result->MarkForLazyRecompilation(); + if (FLAG_parallel_recompilation) { + result->MarkForParallelRecompilation(); + } else { + result->MarkForLazyRecompilation(); + } } return result; } diff --git a/src/heap.cc b/src/heap.cc index fd0dc41380..b2c12183cb 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -703,6 +703,16 @@ bool Heap::CollectGarbage(AllocationSpace space, } +int Heap::NotifyContextDisposed() { + if (FLAG_parallel_recompilation) { + // Flush the queued recompilation tasks. + isolate()->optimizing_compiler_thread()->Flush(); + } + flush_monomorphic_ics_ = true; + return ++contexts_disposed_; +} + + void Heap::PerformScavenge() { GCTracer tracer(this, NULL, NULL); if (incremental_marking()->IsStopped()) { diff --git a/src/heap.h b/src/heap.h index 78ed21abef..f96d3b404e 100644 --- a/src/heap.h +++ b/src/heap.h @@ -1252,10 +1252,7 @@ class Heap { void EnsureHeapIsIterable(); // Notify the heap that a context has been disposed. - int NotifyContextDisposed() { - flush_monomorphic_ics_ = true; - return ++contexts_disposed_; - } + int NotifyContextDisposed(); // Utility to invoke the scavenger. This is needed in test code to // ensure correct callback for weak global handles. diff --git a/src/isolate.cc b/src/isolate.cc index 4cf0252d93..ccd6f280dc 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -1777,7 +1777,6 @@ Isolate::Isolate() regexp_stack_(NULL), date_cache_(NULL), code_stub_interface_descriptors_(NULL), - context_exit_happened_(false), initialized_from_snapshot_(false), cpu_profiler_(NULL), heap_profiler_(NULL), diff --git a/src/isolate.h b/src/isolate.h index 2612242464..065277093e 100644 --- a/src/isolate.h +++ b/src/isolate.h @@ -1059,13 +1059,6 @@ class Isolate { thread_local_top_.top_lookup_result_ = top; } - bool context_exit_happened() { - return context_exit_happened_; - } - void set_context_exit_happened(bool context_exit_happened) { - context_exit_happened_ = context_exit_happened; - } - bool initialized_from_snapshot() { return initialized_from_snapshot_; } double time_millis_since_init() { @@ -1313,10 +1306,6 @@ class Isolate { unibrow::Mapping interp_canonicalize_mapping_; CodeStubInterfaceDescriptor* code_stub_interface_descriptors_; - // The garbage collector should be a little more aggressive when it knows - // that a context was recently exited. - bool context_exit_happened_; - // True if this isolate was initialized from a snapshot. bool initialized_from_snapshot_; diff --git a/src/liveedit.cc b/src/liveedit.cc index bab2e101bc..b998a26dd7 100644 --- a/src/liveedit.cc +++ b/src/liveedit.cc @@ -1290,6 +1290,7 @@ MaybeObject* LiveEdit::ReplaceFunctionCode( if (code_scope_info->IsFixedArray()) { shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info)); } + shared_info->DisableOptimization("LiveEdit"); } if (shared_info->debug_info()->IsDebugInfo()) { diff --git a/src/objects.cc b/src/objects.cc index 2ecc57b5de..e904612566 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -9234,10 +9234,7 @@ void JSFunction::MarkForParallelRecompilation() { ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints()); ASSERT(!IsOptimized()); ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - if (!FLAG_parallel_recompilation) { - JSFunction::MarkForLazyRecompilation(); - return; - } + ASSERT(FLAG_parallel_recompilation); if (FLAG_trace_parallel_recompilation) { PrintF(" ** Marking "); PrintName(); diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc index 21ef237107..11d60c33d2 100644 --- a/src/optimizing-compiler-thread.cc +++ b/src/optimizing-compiler-thread.cc @@ -60,12 +60,23 @@ void OptimizingCompilerThread::Run() { OS::Sleep(FLAG_parallel_recompilation_delay); } - if (Acquire_Load(&stop_thread_)) { - stop_semaphore_->Signal(); - if (FLAG_trace_parallel_recompilation) { - time_spent_total_ = OS::Ticks() - epoch; - } - return; + switch (static_cast(Acquire_Load(&stop_thread_))) { + case CONTINUE: + break; + case STOP: + if (FLAG_trace_parallel_recompilation) { + time_spent_total_ = OS::Ticks() - epoch; + } + stop_semaphore_->Signal(); + return; + case FLUSH: + // Reset input queue semaphore. + delete input_queue_semaphore_; + input_queue_semaphore_ = OS::CreateSemaphore(0); + // Signal for main thread to start flushing. + stop_semaphore_->Signal(); + // Return to start of consumer loop. + continue; } int64_t compiling_start = 0; @@ -102,9 +113,41 @@ void OptimizingCompilerThread::CompileNext() { } +void OptimizingCompilerThread::FlushQueue( + UnboundQueue* queue, + bool restore_function_code) { + ASSERT(!IsOptimizerThread()); + OptimizingCompiler* optimizing_compiler; + // The optimizing compiler is allocated in the CompilationInfo's zone. + while (queue->Dequeue(&optimizing_compiler)) { + CompilationInfo* info = optimizing_compiler->info(); + if (restore_function_code) { + Handle function = info->closure(); + function->ReplaceCode(function->shared()->code()); + } + delete info; + } +} + + +void OptimizingCompilerThread::Flush() { + ASSERT(!IsOptimizerThread()); + Release_Store(&stop_thread_, static_cast(FLUSH)); + input_queue_semaphore_->Signal(); + + FlushQueue(&input_queue_, true); + NoBarrier_Store(&queue_length_, static_cast(0)); + + stop_semaphore_->Wait(); + Release_Store(&stop_thread_, static_cast(CONTINUE)); + + FlushQueue(&output_queue_, true); +} + + void OptimizingCompilerThread::Stop() { ASSERT(!IsOptimizerThread()); - Release_Store(&stop_thread_, static_cast(true)); + Release_Store(&stop_thread_, static_cast(STOP)); input_queue_semaphore_->Signal(); stop_semaphore_->Wait(); @@ -114,14 +157,8 @@ void OptimizingCompilerThread::Stop() { while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); InstallOptimizedFunctions(); } else { - OptimizingCompiler* optimizing_compiler; - // The optimizing compiler is allocated in the CompilationInfo's zone. - while (input_queue_.Dequeue(&optimizing_compiler)) { - delete optimizing_compiler->info(); - } - while (output_queue_.Dequeue(&optimizing_compiler)) { - delete optimizing_compiler->info(); - } + FlushQueue(&input_queue_, false); + FlushQueue(&output_queue_, false); } if (FLAG_trace_parallel_recompilation) { diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h index 275ceb40b7..5a87a975e5 100644 --- a/src/optimizing-compiler-thread.h +++ b/src/optimizing-compiler-thread.h @@ -54,13 +54,13 @@ class OptimizingCompilerThread : public Thread { install_mutex_(OS::CreateMutex()), time_spent_compiling_(0), time_spent_total_(0) { - NoBarrier_Store(&stop_thread_, static_cast(false)); + NoBarrier_Store(&stop_thread_, static_cast(CONTINUE)); NoBarrier_Store(&queue_length_, static_cast(0)); } void Run(); void Stop(); - void CompileNext(); + void Flush(); void QueueForOptimization(OptimizingCompiler* optimizing_compiler); void InstallOptimizedFunctions(); @@ -92,6 +92,12 @@ class OptimizingCompilerThread : public Thread { } private: + enum StopFlag { CONTINUE, STOP, FLUSH }; + + void FlushQueue(UnboundQueue* queue, + bool restore_function_code); + void CompileNext(); + #ifdef DEBUG int thread_id_; Mutex* thread_id_mutex_; diff --git a/src/runtime.cc b/src/runtime.cc index 0c88cae9c3..3c3665ceb7 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -8460,8 +8460,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { } CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); if (FLAG_parallel_recompilation && sync_with_compiler_thread) { - while (function->IsMarkedForParallelRecompilation() || - function->IsInRecompileQueue() || + while (function->IsInRecompileQueue() || function->IsMarkedForInstallingRecompiledCode()) { isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); OS::Sleep(50); diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc index dfc27548b5..c164193ee5 100644 --- a/test/cctest/test-deoptimization.cc +++ b/test/cctest/test-deoptimization.cc @@ -77,23 +77,27 @@ class AlwaysOptimizeAllowNativesSyntaxNoInlining { // Utility class to set --allow-natives-syntax and --nouse-inlining when // constructed and return to their default state when destroyed. -class AllowNativesSyntaxNoInlining { +class AllowNativesSyntaxNoInliningNoParallel { public: - AllowNativesSyntaxNoInlining() + AllowNativesSyntaxNoInliningNoParallel() : allow_natives_syntax_(i::FLAG_allow_natives_syntax), - use_inlining_(i::FLAG_use_inlining) { + use_inlining_(i::FLAG_use_inlining), + parallel_recompilation_(i::FLAG_parallel_recompilation) { i::FLAG_allow_natives_syntax = true; i::FLAG_use_inlining = false; + i::FLAG_parallel_recompilation = false; } - ~AllowNativesSyntaxNoInlining() { + ~AllowNativesSyntaxNoInliningNoParallel() { i::FLAG_allow_natives_syntax = allow_natives_syntax_; i::FLAG_use_inlining = use_inlining_; + i::FLAG_parallel_recompilation = parallel_recompilation_; } private: bool allow_natives_syntax_; bool use_inlining_; + bool parallel_recompilation_; }; @@ -343,7 +347,7 @@ TEST(DeoptimizeBinaryOperationADDString) { const char* f_source = "function f(x, y) { return x + y; };"; { - AllowNativesSyntaxNoInlining options; + AllowNativesSyntaxNoInliningNoParallel options; // Compile function f and collect to type feedback to insert binary op stub // call in the optimized code. i::FLAG_prepare_always_opt = true; @@ -401,7 +405,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env, binary_op); char* f_source = f_source_buffer.start(); - AllowNativesSyntaxNoInlining options; + AllowNativesSyntaxNoInliningNoParallel options; // Compile function f and collect to type feedback to insert binary op stub // call in the optimized code. i::FLAG_prepare_always_opt = true; @@ -493,7 +497,7 @@ TEST(DeoptimizeCompare) { const char* f_source = "function f(x, y) { return x < y; };"; { - AllowNativesSyntaxNoInlining options; + AllowNativesSyntaxNoInliningNoParallel options; // Compile function f and collect to type feedback to insert compare ic // call in the optimized code. i::FLAG_prepare_always_opt = true; @@ -540,7 +544,7 @@ TEST(DeoptimizeLoadICStoreIC) { const char* g2_source = "function g2(x, y) { x[y] = 1; };"; { - AllowNativesSyntaxNoInlining options; + AllowNativesSyntaxNoInliningNoParallel options; // Compile functions and collect to type feedback to insert ic // calls in the optimized code. i::FLAG_prepare_always_opt = true; @@ -620,7 +624,7 @@ TEST(DeoptimizeLoadICStoreICNested) { const char* g2_source = "function g2(x, y) { x[y] = 1; };"; { - AllowNativesSyntaxNoInlining options; + AllowNativesSyntaxNoInliningNoParallel options; // Compile functions and collect to type feedback to insert ic // calls in the optimized code. i::FLAG_prepare_always_opt = true;