Refactor back edge table related code into a new class.

This is mostly moving and renaming, except for the BackEdgeTableIterator.
Motivation is that the back edges in unoptimized code has nothing to do with the deoptimizer.

R=titzer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/23526069

git-svn-id: http://v8.googlecode.com/svn/branches/bleeding_edge@16815 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
yangguo@chromium.org 2013-09-19 09:08:08 +00:00
parent 8aafabc24c
commit 32ceb91735
15 changed files with 513 additions and 529 deletions

View File

@ -81,100 +81,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
//
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
}
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
// Restore the original call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
}
#ifdef DEBUG
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return NOT_PATCHED;
}
}
#endif // DEBUG
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are

View File

@ -4892,6 +4892,101 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
//
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, replacement_code);
}
void BackEdgeTable::RevertAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
// Restore the original call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
}
#ifdef DEBUG
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(osr_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return ON_STACK_REPLACEMENT;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_builtin->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return INTERRUPT;
}
}
#endif // DEBUG
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

View File

@ -2338,85 +2338,6 @@ void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
}
void Deoptimizer::PatchInterruptCode(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* replacement_code =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) == loop_nesting_level) {
ASSERT_EQ(NOT_PATCHED, GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()));
PatchInterruptCodeAt(unoptimized,
back_edges.pc(),
replacement_code);
}
}
unoptimized->set_back_edges_patched_for_osr(true);
ASSERT(Deoptimizer::VerifyInterruptCode(
isolate, unoptimized, loop_nesting_level));
}
void Deoptimizer::RevertInterruptCode(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* interrupt_code =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
if (static_cast<int>(back_edges.loop_depth()) <= loop_nesting_level) {
ASSERT_EQ(PATCHED_FOR_OSR, GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()));
RevertInterruptCodeAt(unoptimized, back_edges.pc(), interrupt_code);
}
}
unoptimized->set_back_edges_patched_for_osr(false);
unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
ASSERT(Deoptimizer::VerifyInterruptCode(isolate, unoptimized, -1));
}
#ifdef DEBUG
bool Deoptimizer::VerifyInterruptCode(Isolate* isolate,
Code* unoptimized,
int loop_nesting_level) {
DisallowHeapAllocation no_gc;
for (FullCodeGenerator::BackEdgeTableIterator back_edges(unoptimized, &no_gc);
!back_edges.Done();
back_edges.Next()) {
uint32_t loop_depth = back_edges.loop_depth();
CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
GetInterruptPatchState(isolate,
unoptimized,
back_edges.pc()) != NOT_PATCHED);
}
return true;
}
#endif // DEBUG
unsigned Deoptimizer::ComputeInputFrameSize() const {
unsigned fixed_size = ComputeFixedSize(function_);
// The fp-to-sp delta already takes the context and the function

View File

@ -131,11 +131,6 @@ class Deoptimizer : public Malloced {
DEBUGGER
};
enum InterruptPatchState {
NOT_PATCHED,
PATCHED_FOR_OSR
};
static const int kBailoutTypesWithCodeEntry = SOFT + 1;
struct JumpTableEntry {
@ -213,39 +208,6 @@ class Deoptimizer : public Malloced {
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void PatchInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
static void RevertInterruptCode(Isolate* isolate,
Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code);
#ifdef DEBUG
static InterruptPatchState GetInterruptPatchState(Isolate* isolate,
Code* unoptimized_code,
Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
static bool VerifyInterruptCode(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);

View File

@ -1615,6 +1615,79 @@ bool FullCodeGenerator::TryLiteralCompare(CompareOperation* expr) {
}
void BackEdgeTable::Patch(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* replacement_code =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) {
ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
unoptimized,
back_edges.pc(i)));
PatchAt(unoptimized, back_edges.pc(i), replacement_code);
}
}
unoptimized->set_back_edges_patched_for_osr(true);
ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
}
void BackEdgeTable::Revert(Isolate* isolate,
Code* unoptimized) {
DisallowHeapAllocation no_gc;
Code* interrupt_code =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
ASSERT_EQ(ON_STACK_REPLACEMENT, GetBackEdgeState(isolate,
unoptimized,
back_edges.pc(i)));
RevertAt(unoptimized, back_edges.pc(i), interrupt_code);
}
}
unoptimized->set_back_edges_patched_for_osr(false);
unoptimized->set_allow_osr_at_loop_nesting_level(0);
// Assert that none of the back edges are patched anymore.
ASSERT(Verify(isolate, unoptimized, -1));
}
#ifdef DEBUG
bool BackEdgeTable::Verify(Isolate* isolate,
Code* unoptimized,
int loop_nesting_level) {
DisallowHeapAllocation no_gc;
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
uint32_t loop_depth = back_edges.loop_depth(i);
CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
GetBackEdgeState(isolate,
unoptimized,
back_edges.pc(i)) != INTERRUPT);
}
return true;
}
#endif // DEBUG
#undef __

View File

@ -139,65 +139,6 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
class BackEdgeTableIterator {
public:
explicit BackEdgeTableIterator(Code* unoptimized,
DisallowHeapAllocation* required) {
ASSERT(unoptimized->kind() == Code::FUNCTION);
instruction_start_ = unoptimized->instruction_start();
cursor_ = instruction_start_ + unoptimized->back_edge_table_offset();
ASSERT(cursor_ < instruction_start_ + unoptimized->instruction_size());
table_length_ = Memory::uint32_at(cursor_);
cursor_ += kTableLengthSize;
end_ = cursor_ + table_length_ * kEntrySize;
}
bool Done() { return cursor_ >= end_; }
void Next() {
ASSERT(!Done());
cursor_ += kEntrySize;
}
BailoutId ast_id() {
ASSERT(!Done());
return BailoutId(static_cast<int>(
Memory::uint32_at(cursor_ + kAstIdOffset)));
}
uint32_t loop_depth() {
ASSERT(!Done());
return Memory::uint32_at(cursor_ + kLoopDepthOffset);
}
uint32_t pc_offset() {
ASSERT(!Done());
return Memory::uint32_at(cursor_ + kPcOffsetOffset);
}
Address pc() {
ASSERT(!Done());
return instruction_start_ + pc_offset();
}
uint32_t table_length() { return table_length_; }
private:
static const int kTableLengthSize = kIntSize;
static const int kAstIdOffset = 0 * kIntSize;
static const int kPcOffsetOffset = 1 * kIntSize;
static const int kLoopDepthOffset = 2 * kIntSize;
static const int kEntrySize = 3 * kIntSize;
Address cursor_;
Address end_;
Address instruction_start_;
uint32_t table_length_;
DISALLOW_COPY_AND_ASSIGN(BackEdgeTableIterator);
};
private:
class Breakable;
class Iteration;
@ -940,6 +881,91 @@ class AccessorTable: public TemplateHashMap<Literal,
};
class BackEdgeTable {
public:
BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
ASSERT(code->kind() == Code::FUNCTION);
instruction_start_ = code->instruction_start();
Address table_address = instruction_start_ + code->back_edge_table_offset();
length_ = Memory::uint32_at(table_address);
start_ = table_address + kTableLengthSize;
}
uint32_t length() { return length_; }
BailoutId ast_id(uint32_t index) {
return BailoutId(static_cast<int>(
Memory::uint32_at(entry_at(index) + kAstIdOffset)));
}
uint32_t loop_depth(uint32_t index) {
return Memory::uint32_at(entry_at(index) + kLoopDepthOffset);
}
uint32_t pc_offset(uint32_t index) {
return Memory::uint32_at(entry_at(index) + kPcOffsetOffset);
}
Address pc(uint32_t index) {
return instruction_start_ + pc_offset(index);
}
enum BackEdgeState {
INTERRUPT,
ON_STACK_REPLACEMENT
};
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void Patch(Isolate* isolate,
Code* unoptimized_code);
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
static void Revert(Isolate* isolate,
Code* unoptimized_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code);
#ifdef DEBUG
static BackEdgeState GetBackEdgeState(Isolate* isolate,
Code* unoptimized_code,
Address pc_after);
// Verify that all back edges of a certain loop depth are patched.
static bool Verify(Isolate* isolate,
Code* unoptimized_code,
int loop_nesting_level);
#endif // DEBUG
private:
Address entry_at(uint32_t index) {
ASSERT(index < length_);
return start_ + index * kEntrySize;
}
static const int kTableLengthSize = kIntSize;
static const int kAstIdOffset = 0 * kIntSize;
static const int kPcOffsetOffset = 1 * kIntSize;
static const int kLoopDepthOffset = 2 * kIntSize;
static const int kEntrySize = 3 * kIntSize;
Address start_;
Address instruction_start_;
uint32_t length_;
};
} } // namespace v8::internal
#endif // V8_FULL_CODEGEN_H_

View File

@ -177,87 +177,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
// The back edge bookkeeping code matches the pattern:
//
// sub <profiling_counter>, <delta>
// jns ok
// call <interrupt stub>
// ok:
//
// The patched back edge looks like this:
//
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// ok:
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
// Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
}
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
// Restore the original call address.
Assembler::set_target_address_at(call_target_address,
interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, interrupt_code);
}
#ifdef DEBUG
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT_EQ(osr_builtin->entry(),
Assembler::target_address_at(call_target_address));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return NOT_PATCHED;
}
}
#endif // DEBUG
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are

View File

@ -4891,6 +4891,88 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x11;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
// The back edge bookkeeping code matches the pattern:
//
// sub <profiling_counter>, <delta>
// jns ok
// call <interrupt stub>
// ok:
//
// The patched back edge looks like this:
//
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// ok:
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc,
Code* replacement_code) {
// Turn the jump into nops.
Address call_target_address = pc - kIntSize;
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
// Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
}
void BackEdgeTable::RevertAt(Code* unoptimized_code,
Address pc,
Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
// Restore the original call address.
Assembler::set_target_address_at(call_target_address,
interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, interrupt_code);
}
#ifdef DEBUG
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate,
Code* unoptimized_code,
Address pc) {
Address call_target_address = pc - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT_EQ(osr_builtin->entry(),
Assembler::target_address_at(call_target_address));
return ON_STACK_REPLACEMENT;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return INTERRUPT;
}
}
#endif // DEBUG
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32

View File

@ -78,88 +78,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
// The back edge bookkeeping code matches the pattern:
//
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
// lui t9, <interrupt stub address> upper
// ori t9, <interrupt stub address> lower
// jalr t9
// nop
// ok-label ----- pc_after points here
//
// We patch the code to the following form:
//
// addiu at, zero_reg, 1
// beq at, zero_reg, ok ;; Not changed
// lui t9, <on-stack replacement address> upper
// ori t9, <on-stack replacement address> lower
// jalr t9 ;; Not changed
// nop ;; Not changed
// ok-label ----- pc_after points here
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->addiu(at, zero_reg, 1);
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->slt(at, a3, zero_reg);
// Restore the original call address.
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
}
#ifdef DEBUG
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(osr_builtin->entry()));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
return NOT_PATCHED;
}
}
#endif // DEBUG
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are

View File

@ -4924,6 +4924,89 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
// The back edge bookkeeping code matches the pattern:
//
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
// lui t9, <interrupt stub address> upper
// ori t9, <interrupt stub address> lower
// jalr t9
// nop
// ok-label ----- pc_after points here
//
// We patch the code to the following form:
//
// addiu at, zero_reg, 1
// beq at, zero_reg, ok ;; Not changed
// lui t9, <on-stack replacement address> upper
// ori t9, <on-stack replacement address> lower
// jalr t9 ;; Not changed
// nop ;; Not changed
// ok-label ----- pc_after points here
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->addiu(at, zero_reg, 1);
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
void BackEdgeTable::RevertAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->slt(at, a3, zero_reg);
// Restore the original call address.
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
}
#ifdef DEBUG
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
if (Assembler::IsAddImmediate(
Assembler::instr_at(pc_after - 6 * kInstrSize))) {
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(osr_builtin->entry()));
return ON_STACK_REPLACEMENT;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT(reinterpret_cast<uint32_t>(
Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
reinterpret_cast<uint32_t>(interrupt_builtin->entry()));
return INTERRUPT;
}
}
#endif // DEBUG
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_MIPS

View File

@ -10367,10 +10367,9 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
ASSERT(kind() == FUNCTION);
for (FullCodeGenerator::BackEdgeTableIterator it(this, &no_gc);
!it.Done();
it.Next()) {
if (it.pc_offset() == pc_offset) return it.ast_id();
BackEdgeTable back_edges(this, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
}
return BailoutId::None();
}
@ -10838,15 +10837,15 @@ void Code::Disassemble(const char* name, FILE* out) {
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
DisallowHeapAllocation no_gc;
FullCodeGenerator::BackEdgeTableIterator back_edges(this, &no_gc);
BackEdgeTable back_edges(this, &no_gc);
PrintF(out, "Back edges (size = %u)\n", back_edges.table_length());
PrintF(out, "Back edges (size = %u)\n", back_edges.length());
PrintF(out, "ast_id pc_offset loop_depth\n");
for ( ; !back_edges.Done(); back_edges.Next()) {
PrintF(out, "%6d %9u %10u\n", back_edges.ast_id().ToInt(),
back_edges.pc_offset(),
back_edges.loop_depth());
for (uint32_t i = 0; i < back_edges.length(); i++) {
PrintF(out, "%6d %9u %10u\n", back_edges.ast_id(i).ToInt(),
back_edges.pc_offset(i),
back_edges.loop_depth(i));
}
PrintF(out, "\n");

View File

@ -185,7 +185,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
PrintF("]\n");
}
Deoptimizer::PatchInterruptCode(isolate_, shared->code());
BackEdgeTable::Patch(isolate_, shared->code());
}

View File

@ -8497,8 +8497,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
// Start patching from the currently patched loop nesting level.
int current_level = unoptimized->allow_osr_at_loop_nesting_level();
ASSERT(Deoptimizer::VerifyInterruptCode(
isolate, unoptimized, current_level));
ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
unoptimized->set_allow_osr_at_loop_nesting_level(i);
isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
@ -8651,8 +8650,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
result = JSFunction::CompileOsr(function, ast_id, CLEAR_EXCEPTION);
}
// Revert the patched interrupt now, regardless of whether OSR succeeds.
Deoptimizer::RevertInterruptCode(isolate, *unoptimized);
// Revert the patched back edge table, regardless of whether OSR succeeds.
BackEdgeTable::Revert(isolate, *unoptimized);
// Check whether we ended up with usable optimized code.
if (!result.is_null() && result->kind() == Code::OPTIMIZED_FUNCTION) {

View File

@ -82,87 +82,6 @@ void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
}
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1d;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
// The back edge bookkeeping code matches the pattern:
//
// add <profiling_counter>, <-delta>
// jns ok
// call <stack guard>
// ok:
//
// We will patch away the branch so the code is:
//
// add <profiling_counter>, <-delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// ok:
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
// Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
}
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
// Restore the original call address.
Assembler::set_target_address_at(call_target_address,
interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, interrupt_code);
}
#ifdef DEBUG
Deoptimizer::InterruptPatchState Deoptimizer::GetInterruptPatchState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT_EQ(osr_builtin->entry(),
Assembler::target_address_at(call_target_address));
return PATCHED_FOR_OSR;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return NOT_PATCHED;
}
}
#endif // DEBUG
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are

View File

@ -4877,6 +4877,88 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
#undef __
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1d;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
// The back edge bookkeeping code matches the pattern:
//
// add <profiling_counter>, <-delta>
// jns ok
// call <stack guard>
// ok:
//
// We will patch away the branch so the code is:
//
// add <profiling_counter>, <-delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// ok:
void BackEdgeTable::PatchAt(Code* unoptimized_code,
Address pc_after,
Code* replacement_code) {
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
// Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, replacement_code);
}
void BackEdgeTable::RevertAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code) {
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
// Restore the original call address.
Assembler::set_target_address_at(call_target_address,
interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, interrupt_code);
}
#ifdef DEBUG
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate,
Code* unoptimized_code,
Address pc_after) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
Code* osr_builtin =
isolate->builtins()->builtin(Builtins::kOnStackReplacement);
ASSERT_EQ(osr_builtin->entry(),
Assembler::target_address_at(call_target_address));
return ON_STACK_REPLACEMENT;
} else {
// Get the interrupt stub code object to match against from cache.
Code* interrupt_builtin =
isolate->builtins()->builtin(Builtins::kInterruptCheck);
ASSERT_EQ(interrupt_builtin->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return INTERRUPT;
}
}
#endif // DEBUG
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64