diff --git a/src/api.cc b/src/api.cc index a6f7d9d511..d587b81fd3 100644 --- a/src/api.cc +++ b/src/api.cc @@ -770,6 +770,7 @@ void Context::Exit() { i::Context* last_context = isolate->handle_scope_implementer()->RestoreContext(); isolate->set_context(last_context); + isolate->set_context_exit_happened(true); } diff --git a/src/compiler.cc b/src/compiler.cc index f4112d7e96..4cac73f7b6 100644 --- a/src/compiler.cc +++ b/src/compiler.cc @@ -967,9 +967,7 @@ void Compiler::RecompileParallel(Handle closure) { if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) { if (FLAG_trace_parallel_recompilation) { - PrintF(" ** Compilation queue full, will retry optimizing "); - closure->PrintName(); - PrintF(" on next run.\n"); + PrintF(" ** Compilation queue, will retry opting on next run.\n"); } return; } diff --git a/src/debug.cc b/src/debug.cc index 8454438815..04f8a7a027 100644 --- a/src/debug.cc +++ b/src/debug.cc @@ -2044,10 +2044,6 @@ void Debug::PrepareForBreakPoints() { // If preparing for the first break point make sure to deoptimize all // functions as debugging does not work with optimized code. if (!has_break_points_) { - if (FLAG_parallel_recompilation) { - isolate_->optimizing_compiler_thread()->Flush(); - } - Deoptimizer::DeoptimizeAll(isolate_); Handle lazy_compile = diff --git a/src/factory.cc b/src/factory.cc index e2a38c8209..c5a1fddb88 100644 --- a/src/factory.cc +++ b/src/factory.cc @@ -1219,7 +1219,6 @@ Handle Factory::NewSharedFunctionInfo( shared->set_num_literals(literals_array_size); if (is_generator) { shared->set_instance_class_name(isolate()->heap()->Generator_string()); - shared->DisableOptimization("generator"); } return shared; } diff --git a/src/heap.cc b/src/heap.cc index 5cd85445b4..692ec21820 100644 --- a/src/heap.cc +++ b/src/heap.cc @@ -703,16 +703,6 @@ bool Heap::CollectGarbage(AllocationSpace space, } -int Heap::NotifyContextDisposed() { - if (FLAG_parallel_recompilation) { - // Flush the queued recompilation tasks. - isolate()->optimizing_compiler_thread()->Flush(); - } - flush_monomorphic_ics_ = true; - return ++contexts_disposed_; -} - - void Heap::PerformScavenge() { GCTracer tracer(this, NULL, NULL); if (incremental_marking()->IsStopped()) { diff --git a/src/heap.h b/src/heap.h index 5e8a2e516d..fbe0531014 100644 --- a/src/heap.h +++ b/src/heap.h @@ -1254,7 +1254,10 @@ class Heap { void EnsureHeapIsIterable(); // Notify the heap that a context has been disposed. - int NotifyContextDisposed(); + int NotifyContextDisposed() { + flush_monomorphic_ics_ = true; + return ++contexts_disposed_; + } // Utility to invoke the scavenger. This is needed in test code to // ensure correct callback for weak global handles. diff --git a/src/isolate.cc b/src/isolate.cc index 346ece86fe..61f1e2dcfa 100644 --- a/src/isolate.cc +++ b/src/isolate.cc @@ -1782,6 +1782,7 @@ Isolate::Isolate() regexp_stack_(NULL), date_cache_(NULL), code_stub_interface_descriptors_(NULL), + context_exit_happened_(false), initialized_from_snapshot_(false), cpu_profiler_(NULL), heap_profiler_(NULL), diff --git a/src/isolate.h b/src/isolate.h index 894d25d6f0..c008317737 100644 --- a/src/isolate.h +++ b/src/isolate.h @@ -1060,6 +1060,13 @@ class Isolate { thread_local_top_.top_lookup_result_ = top; } + bool context_exit_happened() { + return context_exit_happened_; + } + void set_context_exit_happened(bool context_exit_happened) { + context_exit_happened_ = context_exit_happened; + } + bool initialized_from_snapshot() { return initialized_from_snapshot_; } double time_millis_since_init() { @@ -1307,6 +1314,10 @@ class Isolate { unibrow::Mapping interp_canonicalize_mapping_; CodeStubInterfaceDescriptor* code_stub_interface_descriptors_; + // The garbage collector should be a little more aggressive when it knows + // that a context was recently exited. + bool context_exit_happened_; + // True if this isolate was initialized from a snapshot. bool initialized_from_snapshot_; diff --git a/src/liveedit.cc b/src/liveedit.cc index 33586de4b9..859cf2b94f 100644 --- a/src/liveedit.cc +++ b/src/liveedit.cc @@ -1290,7 +1290,6 @@ MaybeObject* LiveEdit::ReplaceFunctionCode( if (code_scope_info->IsFixedArray()) { shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info)); } - shared_info->DisableOptimization("LiveEdit"); } if (shared_info->debug_info()->IsDebugInfo()) { diff --git a/src/objects.cc b/src/objects.cc index d2ffb9c3c8..7839faaddf 100644 --- a/src/objects.cc +++ b/src/objects.cc @@ -9234,7 +9234,6 @@ void JSFunction::MarkForLazyRecompilation() { ASSERT(!IsOptimized()); ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - ASSERT(!shared()->is_generator()); set_code_no_write_barrier( GetIsolate()->builtins()->builtin(Builtins::kLazyRecompile)); // No write barrier required, since the builtin is part of the root set. @@ -9245,8 +9244,10 @@ void JSFunction::MarkForParallelRecompilation() { ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints()); ASSERT(!IsOptimized()); ASSERT(shared()->allows_lazy_compilation() || code()->optimizable()); - ASSERT(!shared()->is_generator()); - ASSERT(FLAG_parallel_recompilation); + if (!FLAG_parallel_recompilation) { + JSFunction::MarkForLazyRecompilation(); + return; + } if (FLAG_trace_parallel_recompilation) { PrintF(" ** Marking "); PrintName(); diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc index 11d60c33d2..21ef237107 100644 --- a/src/optimizing-compiler-thread.cc +++ b/src/optimizing-compiler-thread.cc @@ -60,23 +60,12 @@ void OptimizingCompilerThread::Run() { OS::Sleep(FLAG_parallel_recompilation_delay); } - switch (static_cast(Acquire_Load(&stop_thread_))) { - case CONTINUE: - break; - case STOP: - if (FLAG_trace_parallel_recompilation) { - time_spent_total_ = OS::Ticks() - epoch; - } - stop_semaphore_->Signal(); - return; - case FLUSH: - // Reset input queue semaphore. - delete input_queue_semaphore_; - input_queue_semaphore_ = OS::CreateSemaphore(0); - // Signal for main thread to start flushing. - stop_semaphore_->Signal(); - // Return to start of consumer loop. - continue; + if (Acquire_Load(&stop_thread_)) { + stop_semaphore_->Signal(); + if (FLAG_trace_parallel_recompilation) { + time_spent_total_ = OS::Ticks() - epoch; + } + return; } int64_t compiling_start = 0; @@ -113,41 +102,9 @@ void OptimizingCompilerThread::CompileNext() { } -void OptimizingCompilerThread::FlushQueue( - UnboundQueue* queue, - bool restore_function_code) { - ASSERT(!IsOptimizerThread()); - OptimizingCompiler* optimizing_compiler; - // The optimizing compiler is allocated in the CompilationInfo's zone. - while (queue->Dequeue(&optimizing_compiler)) { - CompilationInfo* info = optimizing_compiler->info(); - if (restore_function_code) { - Handle function = info->closure(); - function->ReplaceCode(function->shared()->code()); - } - delete info; - } -} - - -void OptimizingCompilerThread::Flush() { - ASSERT(!IsOptimizerThread()); - Release_Store(&stop_thread_, static_cast(FLUSH)); - input_queue_semaphore_->Signal(); - - FlushQueue(&input_queue_, true); - NoBarrier_Store(&queue_length_, static_cast(0)); - - stop_semaphore_->Wait(); - Release_Store(&stop_thread_, static_cast(CONTINUE)); - - FlushQueue(&output_queue_, true); -} - - void OptimizingCompilerThread::Stop() { ASSERT(!IsOptimizerThread()); - Release_Store(&stop_thread_, static_cast(STOP)); + Release_Store(&stop_thread_, static_cast(true)); input_queue_semaphore_->Signal(); stop_semaphore_->Wait(); @@ -157,8 +114,14 @@ void OptimizingCompilerThread::Stop() { while (NoBarrier_Load(&queue_length_) > 0) CompileNext(); InstallOptimizedFunctions(); } else { - FlushQueue(&input_queue_, false); - FlushQueue(&output_queue_, false); + OptimizingCompiler* optimizing_compiler; + // The optimizing compiler is allocated in the CompilationInfo's zone. + while (input_queue_.Dequeue(&optimizing_compiler)) { + delete optimizing_compiler->info(); + } + while (output_queue_.Dequeue(&optimizing_compiler)) { + delete optimizing_compiler->info(); + } } if (FLAG_trace_parallel_recompilation) { diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h index 5a87a975e5..275ceb40b7 100644 --- a/src/optimizing-compiler-thread.h +++ b/src/optimizing-compiler-thread.h @@ -54,13 +54,13 @@ class OptimizingCompilerThread : public Thread { install_mutex_(OS::CreateMutex()), time_spent_compiling_(0), time_spent_total_(0) { - NoBarrier_Store(&stop_thread_, static_cast(CONTINUE)); + NoBarrier_Store(&stop_thread_, static_cast(false)); NoBarrier_Store(&queue_length_, static_cast(0)); } void Run(); void Stop(); - void Flush(); + void CompileNext(); void QueueForOptimization(OptimizingCompiler* optimizing_compiler); void InstallOptimizedFunctions(); @@ -92,12 +92,6 @@ class OptimizingCompilerThread : public Thread { } private: - enum StopFlag { CONTINUE, STOP, FLUSH }; - - void FlushQueue(UnboundQueue* queue, - bool restore_function_code); - void CompileNext(); - #ifdef DEBUG int thread_id_; Mutex* thread_id_mutex_; diff --git a/src/runtime.cc b/src/runtime.cc index afa728ded1..ed3527fa92 100644 --- a/src/runtime.cc +++ b/src/runtime.cc @@ -2948,7 +2948,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ResumeJSGeneratorObject) { JavaScriptFrame* frame = stack_iterator.frame(); ASSERT_EQ(frame->function(), generator_object->function()); - ASSERT(frame->function()->is_compiled()); STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting <= 0); STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed <= 0); @@ -8432,7 +8431,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) { } CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0); if (FLAG_parallel_recompilation && sync_with_compiler_thread) { - while (function->IsInRecompileQueue() || + while (function->IsMarkedForParallelRecompilation() || + function->IsInRecompileQueue() || function->IsMarkedForInstallingRecompiledCode()) { isolate->optimizing_compiler_thread()->InstallOptimizedFunctions(); OS::Sleep(50); diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc index c164193ee5..dfc27548b5 100644 --- a/test/cctest/test-deoptimization.cc +++ b/test/cctest/test-deoptimization.cc @@ -77,27 +77,23 @@ class AlwaysOptimizeAllowNativesSyntaxNoInlining { // Utility class to set --allow-natives-syntax and --nouse-inlining when // constructed and return to their default state when destroyed. -class AllowNativesSyntaxNoInliningNoParallel { +class AllowNativesSyntaxNoInlining { public: - AllowNativesSyntaxNoInliningNoParallel() + AllowNativesSyntaxNoInlining() : allow_natives_syntax_(i::FLAG_allow_natives_syntax), - use_inlining_(i::FLAG_use_inlining), - parallel_recompilation_(i::FLAG_parallel_recompilation) { + use_inlining_(i::FLAG_use_inlining) { i::FLAG_allow_natives_syntax = true; i::FLAG_use_inlining = false; - i::FLAG_parallel_recompilation = false; } - ~AllowNativesSyntaxNoInliningNoParallel() { + ~AllowNativesSyntaxNoInlining() { i::FLAG_allow_natives_syntax = allow_natives_syntax_; i::FLAG_use_inlining = use_inlining_; - i::FLAG_parallel_recompilation = parallel_recompilation_; } private: bool allow_natives_syntax_; bool use_inlining_; - bool parallel_recompilation_; }; @@ -347,7 +343,7 @@ TEST(DeoptimizeBinaryOperationADDString) { const char* f_source = "function f(x, y) { return x + y; };"; { - AllowNativesSyntaxNoInliningNoParallel options; + AllowNativesSyntaxNoInlining options; // Compile function f and collect to type feedback to insert binary op stub // call in the optimized code. i::FLAG_prepare_always_opt = true; @@ -405,7 +401,7 @@ static void TestDeoptimizeBinaryOpHelper(LocalContext* env, binary_op); char* f_source = f_source_buffer.start(); - AllowNativesSyntaxNoInliningNoParallel options; + AllowNativesSyntaxNoInlining options; // Compile function f and collect to type feedback to insert binary op stub // call in the optimized code. i::FLAG_prepare_always_opt = true; @@ -497,7 +493,7 @@ TEST(DeoptimizeCompare) { const char* f_source = "function f(x, y) { return x < y; };"; { - AllowNativesSyntaxNoInliningNoParallel options; + AllowNativesSyntaxNoInlining options; // Compile function f and collect to type feedback to insert compare ic // call in the optimized code. i::FLAG_prepare_always_opt = true; @@ -544,7 +540,7 @@ TEST(DeoptimizeLoadICStoreIC) { const char* g2_source = "function g2(x, y) { x[y] = 1; };"; { - AllowNativesSyntaxNoInliningNoParallel options; + AllowNativesSyntaxNoInlining options; // Compile functions and collect to type feedback to insert ic // calls in the optimized code. i::FLAG_prepare_always_opt = true; @@ -624,7 +620,7 @@ TEST(DeoptimizeLoadICStoreICNested) { const char* g2_source = "function g2(x, y) { x[y] = 1; };"; { - AllowNativesSyntaxNoInliningNoParallel options; + AllowNativesSyntaxNoInlining options; // Compile functions and collect to type feedback to insert ic // calls in the optimized code. i::FLAG_prepare_always_opt = true; diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc index 4d312f6665..6af9962bd1 100644 --- a/test/cctest/test-heap.cc +++ b/test/cctest/test-heap.cc @@ -2826,7 +2826,6 @@ void ReleaseStackTraceDataTest(const char* source, const char* accessor) { // to check whether the data is being released since the external string // resource's callback is fired when the external string is GC'ed. FLAG_use_ic = false; // ICs retain objects. - FLAG_parallel_recompilation = false; CcTest::InitializeVM(); v8::HandleScope scope(CcTest::isolate()); SourceResource* resource = new SourceResource(i::StrDup(source)); diff --git a/test/mjsunit/regress/regress-prepare-break-while-recompile.js b/test/mjsunit/regress/regress-prepare-break-while-recompile.js deleted file mode 100644 index e494173856..0000000000 --- a/test/mjsunit/regress/regress-prepare-break-while-recompile.js +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2013 the V8 project authors. All rights reserved. -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Flags: --expose-debug-as debug --allow-natives-syntax -// Flags: --parallel-recompilation-delay=300 - -if (!%IsParallelRecompilationSupported()) { - print("Parallel recompilation is disabled. Skipping this test."); - quit(); -} - -Debug = debug.Debug - -function foo() { - var x = 1; - return x; -} - -function bar() { - var x = 2; - return x; -} - -foo(); -// Mark and trigger parallel optimization. -%OptimizeFunctionOnNextCall(foo, "parallel"); -foo(); - -// Set break points on an unrelated function. This clears both optimized -// and (shared) unoptimized code on foo, and sets both to lazy-compile builtin. -// Clear the break point immediately after to deactivate the debugger. -Debug.setBreakPoint(bar, 0, 0); -Debug.clearAllBreakPoints(); - -// Install optimized code when parallel optimization finishes. -// This needs to be able to deal with shared code being a builtin. -assertUnoptimized(foo, "sync"); -