Do not eagerly update allow_osr_at_loop_nesting_level.

Having debug break points prevents OSR. That causes
allow_osr_at_loop_nesting_level and the actually patched state
to go out of sync.

R=jkummerow@chromium.org
BUG=387599
LOG=Y

Review URL: https://codereview.chromium.org/346223007

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@21958 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
yangguo@chromium.org 2014-06-24 09:31:30 +00:00
parent 00617033dd
commit 438f49a322
9 changed files with 62 additions and 67 deletions

View File

@ -328,7 +328,6 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_back_edge_table_offset(table_offset);
code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
info->SetCode(code);
#ifdef ENABLE_GDB_JIT_INTERFACE
@ -348,7 +347,7 @@ unsigned FullCodeGenerator::EmitBackEdgeTable() {
// The back edge table consists of a length (in number of entries)
// field, and then a sequence of entries. Each entry is a pair of AST id
// and code-relative pc offset.
masm()->Align(kIntSize);
masm()->Align(kPointerSize);
unsigned offset = masm()->pc_offset();
unsigned length = back_edges_.length();
__ dd(length);
@ -1617,9 +1616,11 @@ void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
// Iterate over the back edge table and patch every interrupt
// Increment loop nesting level by one and iterate over the back edge table
// to find the matching loops to patch the interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1;
if (loop_nesting_level > Code::kMaxLoopNestingMarker) return;
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
@ -1631,8 +1632,8 @@ void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
}
}
unoptimized->set_back_edges_patched_for_osr(true);
ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
unoptimized->set_allow_osr_at_loop_nesting_level(loop_nesting_level);
ASSERT(Verify(isolate, unoptimized));
}
@ -1641,7 +1642,6 @@ void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) {
Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
BackEdgeTable back_edges(unoptimized, &no_gc);
@ -1654,10 +1654,9 @@ void BackEdgeTable::Revert(Isolate* isolate, Code* unoptimized) {
}
}
unoptimized->set_back_edges_patched_for_osr(false);
unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
ASSERT(Verify(isolate, unoptimized, -1));
ASSERT(Verify(isolate, unoptimized));
}
@ -1683,10 +1682,9 @@ void BackEdgeTable::RemoveStackCheck(Handle<Code> code, uint32_t pc_offset) {
#ifdef DEBUG
bool BackEdgeTable::Verify(Isolate* isolate,
Code* unoptimized,
int loop_nesting_level) {
bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
DisallowHeapAllocation no_gc;
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
uint32_t loop_depth = back_edges.loop_depth(i);

View File

@ -890,10 +890,8 @@ class BackEdgeTable {
OSR_AFTER_STACK_CHECK
};
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void Patch(Isolate* isolate,
Code* unoptimized_code);
// Increase allowed loop nesting level by one and patch those matching loops.
static void Patch(Isolate* isolate, Code* unoptimized_code);
// Patch the back edge to the target state, provided the correct callee.
static void PatchAt(Code* unoptimized_code,
@ -919,9 +917,7 @@ class BackEdgeTable {
#ifdef DEBUG
// Verify that all back edges of a certain loop depth are patched.
static bool Verify(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level);
static bool Verify(Isolate* isolate, Code* unoptimized_code);
#endif // DEBUG
private:

View File

@ -168,6 +168,8 @@ const size_t kMaximalCodeRangeSize = 0 * MB;
#endif
#endif
STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
const int kBitsPerByte = 8;
const int kBitsPerByteLog2 = 3;
const int kBitsPerPointer = kPointerSize * kBitsPerByte;

View File

@ -4651,14 +4651,17 @@ void Code::set_compiled_optimizable(bool value) {
int Code::allow_osr_at_loop_nesting_level() {
ASSERT_EQ(FUNCTION, kind());
return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
return AllowOSRAtLoopNestingLevelField::decode(fields);
}
void Code::set_allow_osr_at_loop_nesting_level(int level) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
@ -4711,13 +4714,14 @@ void Code::set_safepoint_table_offset(unsigned offset) {
unsigned Code::back_edge_table_offset() {
ASSERT_EQ(FUNCTION, kind());
return BackEdgeTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2;
}
void Code::set_back_edge_table_offset(unsigned offset) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
ASSERT(IsAligned(offset, static_cast<unsigned>(kPointerSize)));
offset = offset >> kPointerSizeLog2;
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = BackEdgeTableOffsetField::update(previous, offset);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
@ -4726,20 +4730,10 @@ void Code::set_back_edge_table_offset(unsigned offset) {
bool Code::back_edges_patched_for_osr() {
ASSERT_EQ(FUNCTION, kind());
return BackEdgesPatchedForOSRField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
return allow_osr_at_loop_nesting_level() > 0;
}
void Code::set_back_edges_patched_for_osr(bool value) {
ASSERT_EQ(FUNCTION, kind());
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = BackEdgesPatchedForOSRField::update(previous, value);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
byte Code::to_boolean_state() {
return extra_ic_state();
}

View File

@ -5569,7 +5569,6 @@ class Code: public HeapObject {
inline void set_back_edge_table_offset(unsigned offset);
inline bool back_edges_patched_for_osr();
inline void set_back_edges_patched_for_osr(bool value);
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
@ -5814,8 +5813,7 @@ class Code: public HeapObject {
class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
static const int kProfilerTicksOffset = kFullCodeFlags + 1;
// Flags layout. BitField<type, shift, size>.
class ICStateField: public BitField<InlineCacheState, 0, 3> {};
@ -5886,9 +5884,10 @@ class Code: public HeapObject {
// KindSpecificFlags2 layout (FUNCTION)
class BackEdgeTableOffsetField: public BitField<int,
kIsCrankshaftedBit + 1, 29> {}; // NOLINT
class BackEdgesPatchedForOSRField: public BitField<bool,
kIsCrankshaftedBit + 1 + 29, 1> {}; // NOLINT
kIsCrankshaftedBit + 1, 27> {}; // NOLINT
class AllowOSRAtLoopNestingLevelField: public BitField<int,
kIsCrankshaftedBit + 1 + 27, 4> {}; // NOLINT
STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker);
static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;

View File

@ -110,7 +110,9 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
}
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
int loop_nesting_levels) {
SharedFunctionInfo* shared = function->shared();
// See AlwaysFullCompiler (in compiler.cc) comment on why we need
// Debug::has_break_points().
if (!FLAG_use_osr ||
@ -119,7 +121,6 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
return;
}
SharedFunctionInfo* shared = function->shared();
// If the code is not optimizable, don't try OSR.
if (!shared->code()->optimizable()) return;
@ -137,7 +138,9 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
PrintF("]\n");
}
BackEdgeTable::Patch(isolate_, shared->code());
for (int i = 0; i < loop_nesting_levels; i++) {
BackEdgeTable::Patch(isolate_, shared->code());
}
}
@ -175,14 +178,8 @@ void RuntimeProfiler::OptimizeNow() {
if (shared_code->kind() != Code::FUNCTION) continue;
if (function->IsInOptimizationQueue()) continue;
if (FLAG_always_osr &&
shared_code->allow_osr_at_loop_nesting_level() == 0) {
// Testing mode: always try an OSR compile for every function.
for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
// TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
shared_code->set_allow_osr_at_loop_nesting_level(i);
AttemptOnStackReplacement(function);
}
if (FLAG_always_osr) {
AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
@ -196,12 +193,7 @@ void RuntimeProfiler::OptimizeNow() {
if (shared_code->CodeSize() > allowance) {
if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
} else {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
if (nesting < Code::kMaxLoopNestingMarker) {
int new_nesting = nesting + 1;
shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
AttemptOnStackReplacement(function);
}
AttemptOnStackReplacement(function);
}
continue;
}

View File

@ -23,7 +23,7 @@ class RuntimeProfiler {
void NotifyICChanged() { any_ic_changed_ = true; }
void AttemptOnStackReplacement(JSFunction* function);
void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
private:
void Optimize(JSFunction* function, const char* reason);

View File

@ -8530,16 +8530,11 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
if (args.length() == 2 &&
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr")) && FLAG_use_osr) {
// Start patching from the currently patched loop nesting level.
int current_level = unoptimized->allow_osr_at_loop_nesting_level();
ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
if (FLAG_use_osr) {
for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
}
}
ASSERT(BackEdgeTable::Verify(isolate, unoptimized));
isolate->runtime_profiler()->AttemptOnStackReplacement(
*function, Code::kMaxLoopNestingMarker);
} else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
isolate->concurrent_recompilation_enabled()) {
function->MarkForConcurrentOptimization();

View File

@ -0,0 +1,19 @@
// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --expose-debug-as debug
Debug = debug.Debug;
Debug.setListener(function() {});
function f() {
for (var i = 0; i < 100; i++) {
%OptimizeFunctionOnNextCall(f, "osr");
}
}
Debug.setBreakPoint(f, 0, 0);
f();
f();
Debug.setListener(null);