Retire concurrent recompilation delay for non-stress testing.

Instead, we block concurrent recompilation until unblocked. This makes
affected tests more predictable and run shorter.

R=jkummerow@chromium.org
BUG=

Review URL: https://codereview.chromium.org/26758003

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@17199 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
yangguo@chromium.org 2013-10-14 14:15:22 +00:00
parent 2d0d210eca
commit 71ba8c5fb4
12 changed files with 80 additions and 26 deletions

View File

@ -328,6 +328,8 @@ DEFINE_int(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_int(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
DEFINE_bool(block_concurrent_recompilation, false,
"block queued jobs until released")
DEFINE_bool(concurrent_osr, false,
"concurrent on-stack replacement")
DEFINE_implication(concurrent_osr, concurrent_recompilation)

View File

@ -168,6 +168,7 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
void OptimizingCompilerThread::Flush() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(FLUSH));
if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
FlushOutputQueue(true);
@ -181,6 +182,7 @@ void OptimizingCompilerThread::Flush() {
void OptimizingCompilerThread::Stop() {
ASSERT(!IsOptimizerThread());
Release_Store(&stop_thread_, static_cast<AtomicWord>(STOP));
if (FLAG_block_concurrent_recompilation) Unblock();
input_queue_semaphore_.Signal();
stop_semaphore_.Wait();
@ -252,7 +254,20 @@ void OptimizingCompilerThread::QueueForOptimization(RecompileJob* job) {
info->closure()->MarkInRecompileQueue();
}
input_queue_.Enqueue(job);
input_queue_semaphore_.Signal();
if (FLAG_block_concurrent_recompilation) {
blocked_jobs_++;
} else {
input_queue_semaphore_.Signal();
}
}
void OptimizingCompilerThread::Unblock() {
ASSERT(!IsOptimizerThread());
while (blocked_jobs_ > 0) {
input_queue_semaphore_.Signal();
blocked_jobs_--;
}
}

View File

@ -55,7 +55,8 @@ class OptimizingCompilerThread : public Thread {
input_queue_semaphore_(0),
osr_cursor_(0),
osr_hits_(0),
osr_attempts_(0) {
osr_attempts_(0),
blocked_jobs_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(CONTINUE));
NoBarrier_Store(&queue_length_, static_cast<AtomicWord>(0));
if (FLAG_concurrent_osr) {
@ -73,6 +74,7 @@ class OptimizingCompilerThread : public Thread {
void Stop();
void Flush();
void QueueForOptimization(RecompileJob* optimizing_compiler);
void Unblock();
void InstallOptimizedFunctions();
RecompileJob* FindReadyOSRCandidate(Handle<JSFunction> function,
uint32_t osr_pc_offset);
@ -141,6 +143,8 @@ class OptimizingCompilerThread : public Thread {
int osr_hits_;
int osr_attempts_;
int blocked_jobs_;
};
} } // namespace v8::internal

View File

@ -8579,6 +8579,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationStatus) {
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_UnblockConcurrentRecompilation) {
RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
isolate->optimizing_compiler_thread()->Unblock();
return isolate->heap()->undefined_value();
}
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOptimizationCount) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);

View File

@ -99,6 +99,7 @@ namespace internal {
F(NeverOptimizeFunction, 1, 1) \
F(GetOptimizationStatus, -1, 1) \
F(GetOptimizationCount, 1, 1) \
F(UnblockConcurrentRecompilation, 0, 1) \
F(CompileForOnStackReplacement, 2, 1) \
F(SetAllocationTimeout, 2, 1) \
F(AllocateInNewSpace, 1, 1) \

View File

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --track-fields --track-double-fields --allow-natives-syntax
// Flags: --concurrent-recompilation --concurrent-recompilation-delay=100
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -49,9 +49,13 @@ add_field(new_object());
%OptimizeFunctionOnNextCall(add_field, "concurrent");
var o = new_object();
// Trigger optimization in the background thread.
// Kick off recompilation.
add_field(o);
// Invalidate transition map while optimization is underway.
// Invalidate transition map after compile graph has been created.
o.c = 2.2;
// In the mean time, concurrent recompiling is still blocked.
assertUnoptimized(add_field, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization that bailed out.
assertUnoptimized(add_field, "sync");

View File

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --concurrent-recompilation-delay=50
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -43,12 +43,14 @@ assertEquals(1, f(o));
// Mark for concurrent optimization.
%OptimizeFunctionOnNextCall(f, "concurrent");
// Trigger optimization in the background thread.
// Kick off recompilation.
assertEquals(1, f(o));
// While concurrent recompilation is running, optimization not yet done.
assertUnoptimized(f, "no sync");
// Change the prototype chain during optimization to trigger map invalidation.
// Change the prototype chain after compile graph has been created.
o.__proto__.__proto__ = { bar: function() { return 2; } };
// At this point, concurrent recompilation thread has not yet done its job.
assertUnoptimized(f, "no sync");
// Let the background thread proceed.
%UnblockConcurrentRecompilation();
// Optimization eventually bails out due to map dependency.
assertUnoptimized(f, "sync");
assertEquals(2, f(o));

View File

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
// Flags: --concurrent-recompilation --concurrent-recompilation-delay=50
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -55,10 +55,13 @@ assertUnoptimized(g);
%OptimizeFunctionOnNextCall(f, "concurrent");
%OptimizeFunctionOnNextCall(g, "concurrent");
f(g(2)); // Trigger optimization.
f(g(2)); // Kick off recompilation.
assertUnoptimized(f, "no sync"); // Not yet optimized while background thread
assertUnoptimized(g, "no sync"); // is running.
assertUnoptimized(f, "no sync"); // Not yet optimized since recompilation
assertUnoptimized(g, "no sync"); // is still blocked.
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
assertOptimized(f, "sync"); // Optimized once we sync with the
assertOptimized(g, "sync"); // background thread.

View File

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax
// Flags: --concurrent-recompilation --concurrent-recompilation-delay=100
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -43,12 +43,15 @@ assertEquals(0.5, f1(arr, 0));
// Optimized code of f1 depends on initial object and array maps.
%OptimizeFunctionOnNextCall(f1, "concurrent");
// Trigger optimization in the background thread
// Kick off recompilation;
assertEquals(0.5, f1(arr, 0));
Object.prototype[1] = 1.5; // Invalidate current initial object map.
// Invalidate current initial object map after compile graph has been created.
Object.prototype[1] = 1.5;
assertEquals(2, f1(arr, 1));
// Not yet optimized while background thread is running.
// Not yet optimized since concurrent recompilation is blocked.
assertUnoptimized(f1, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Sync with background thread to conclude optimization, which bails out
// due to map dependency.
assertUnoptimized(f1, "sync");

View File

@ -27,7 +27,7 @@
// Flags: --fold-constants --nodead-code-elimination
// Flags: --expose-gc --allow-natives-syntax
// Flags: --concurrent-recompilation --concurrent-recompilation-delay=600
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -39,12 +39,14 @@ function test(fun) {
fun();
// Mark for concurrent optimization.
%OptimizeFunctionOnNextCall(fun, "concurrent");
//Trigger optimization in the background.
// Kick off recompilation.
fun();
//Tenure cons string.
// Tenure cons string after compile graph has been created.
gc();
// In the mean time, concurrent recompiling is not complete yet.
// In the mean time, concurrent recompiling is still blocked.
assertUnoptimized(fun, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Concurrent recompilation eventually finishes, embeds tenured cons string.
assertOptimized(fun, "sync");
// Visit embedded cons string during mark compact.

View File

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
// Flags: --concurrent-recompilation --concurrent-recompilation-delay=100
// Flags: --concurrent-recompilation --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -60,8 +60,14 @@ f();
%OptimizeFunctionOnNextCall(f, "concurrent"); // Mark with builtin.
f(); // Kick off concurrent recompilation.
// After compile graph has been created...
Debug.setListener(listener); // Activate debugger.
Debug.setBreakPoint(f, 2, 0); // Force deopt.
// At this point, concurrent recompilation is still being blocked.
assertUnoptimized(f, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Sync with optimization thread. But no optimized code is installed.
assertUnoptimized(f, "sync");

View File

@ -26,7 +26,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --expose-debug-as debug --allow-natives-syntax
// Flags: --concurrent-recompilation-delay=300
// Flags: --block-concurrent-recompilation
if (!%IsConcurrentRecompilationSupported()) {
print("Concurrent recompilation is disabled. Skipping this test.");
@ -46,17 +46,22 @@ function bar() {
}
foo();
// Mark and trigger concurrent optimization.
// Mark and kick off recompilation.
%OptimizeFunctionOnNextCall(foo, "concurrent");
foo();
// Set break points on an unrelated function. This clears both optimized
// and (shared) unoptimized code on foo, and sets both to lazy-compile builtin.
// Clear the break point immediately after to deactivate the debugger.
// Do all of this after compile graph has been created.
Debug.setBreakPoint(bar, 0, 0);
Debug.clearAllBreakPoints();
// At this point, concurrent recompilation is still blocked.
assertUnoptimized(foo, "no sync");
// Let concurrent recompilation proceed.
%UnblockConcurrentRecompilation();
// Install optimized code when concurrent optimization finishes.
// This needs to be able to deal with shared code being a builtin.
assertUnoptimized(foo, "sync");