[sparkplug] OSR Ignition -> Sparkplug
Add support for OSR to baseline code. We compile baseline and perform OSR immediately when the bytecode budget interrupt hits. Drive-by: Clean-up deoptimizer special handling of JumpLoop by using the newly introduced GetBaselinePCForNextExecutedBytecode instead of GetBaselineEndPCForBytecodeOffset. Bug: v8:11420 Change-Id: Ifbea264d4a83a127dd2a11e28626bf2a5e8aca59 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2784687 Commit-Queue: Patrick Thier <pthier@chromium.org> Reviewed-by: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#73677}
This commit is contained in:
parent
904489691a
commit
52393b900b
@ -1784,6 +1784,20 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
|
||||||
|
Operand offset = Operand::Zero()) {
|
||||||
|
// Compute the target address = entry_address + offset
|
||||||
|
if (offset.IsImmediate() && offset.immediate() == 0) {
|
||||||
|
__ mov(lr, entry_address);
|
||||||
|
} else {
|
||||||
|
__ add(lr, entry_address, offset);
|
||||||
|
}
|
||||||
|
|
||||||
|
// "return" to the OSR entry point of the function.
|
||||||
|
__ Ret();
|
||||||
|
}
|
||||||
|
|
||||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||||
{
|
{
|
||||||
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
|
||||||
@ -1817,11 +1831,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
|||||||
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
|
__ ldr(r1, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(
|
||||||
DeoptimizationData::kOsrPcOffsetIndex)));
|
DeoptimizationData::kOsrPcOffsetIndex)));
|
||||||
|
|
||||||
// Compute the target address = code start + osr_offset
|
Generate_OSREntry(masm, r0, Operand::SmiUntag(r1));
|
||||||
__ add(lr, r0, Operand::SmiUntag(r1));
|
|
||||||
|
|
||||||
// And "return" to the OSR entry point of the function.
|
|
||||||
__ Ret();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -3466,8 +3476,9 @@ namespace {
|
|||||||
|
|
||||||
// Converts an interpreter frame into a baseline frame and continues execution
|
// Converts an interpreter frame into a baseline frame and continues execution
|
||||||
// in baseline code (baseline code has to exist on the shared function info),
|
// in baseline code (baseline code has to exist on the shared function info),
|
||||||
// either at the start or the end of the current bytecode.
|
// either at the current or next (in execution order) bytecode.
|
||||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
|
||||||
|
bool is_osr = false) {
|
||||||
// Get bytecode array and bytecode offset from the stack frame.
|
// Get bytecode array and bytecode offset from the stack frame.
|
||||||
__ ldr(kInterpreterBytecodeArrayRegister,
|
__ ldr(kInterpreterBytecodeArrayRegister,
|
||||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||||
@ -3506,10 +3517,14 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
|
|
||||||
// Compute baseline pc for bytecode offset.
|
// Compute baseline pc for bytecode offset.
|
||||||
__ Push(kInterpreterAccumulatorRegister);
|
__ Push(kInterpreterAccumulatorRegister);
|
||||||
ExternalReference get_baseline_pc_extref =
|
ExternalReference get_baseline_pc_extref;
|
||||||
next_bytecode
|
if (next_bytecode || is_osr) {
|
||||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
get_baseline_pc_extref =
|
||||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
ExternalReference::baseline_pc_for_next_executed_bytecode();
|
||||||
|
} else {
|
||||||
|
get_baseline_pc_extref =
|
||||||
|
ExternalReference::baseline_pc_for_bytecode_offset();
|
||||||
|
}
|
||||||
Register get_baseline_pc = r3;
|
Register get_baseline_pc = r3;
|
||||||
__ Move(get_baseline_pc, get_baseline_pc_extref);
|
__ Move(get_baseline_pc, get_baseline_pc_extref);
|
||||||
|
|
||||||
@ -3519,30 +3534,17 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||||
// in TurboFan instead of here.
|
// in TurboFan instead of here.
|
||||||
Label valid_bytecode_offset, function_entry_bytecode;
|
Label valid_bytecode_offset, function_entry_bytecode;
|
||||||
__ cmp(kInterpreterBytecodeOffsetRegister,
|
if (!is_osr) {
|
||||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
__ cmp(kInterpreterBytecodeOffsetRegister,
|
||||||
kFunctionEntryBytecodeOffset));
|
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||||
__ b(eq, &function_entry_bytecode);
|
kFunctionEntryBytecodeOffset));
|
||||||
__ bind(&valid_bytecode_offset);
|
__ b(eq, &function_entry_bytecode);
|
||||||
|
|
||||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
|
||||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
|
||||||
if (next_bytecode) {
|
|
||||||
Label not_jump_loop;
|
|
||||||
Register bytecode = r1;
|
|
||||||
__ ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister));
|
|
||||||
__ cmp(bytecode,
|
|
||||||
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
|
||||||
__ b(ne, ¬_jump_loop);
|
|
||||||
__ Move(get_baseline_pc,
|
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
|
||||||
__ bind(¬_jump_loop);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
|
__ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
|
||||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
|
__ bind(&valid_bytecode_offset);
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
__ PrepareCallCFunction(3, 0, r0);
|
__ PrepareCallCFunction(3, 0, r0);
|
||||||
@ -3554,21 +3556,37 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
__ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
__ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||||
__ CallCFunction(get_baseline_pc, 3, 0);
|
__ CallCFunction(get_baseline_pc, 3, 0);
|
||||||
}
|
}
|
||||||
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
|
|
||||||
__ add(code_obj, code_obj, kReturnRegister0);
|
__ add(code_obj, code_obj, kReturnRegister0);
|
||||||
__ Pop(kInterpreterAccumulatorRegister);
|
__ Pop(kInterpreterAccumulatorRegister);
|
||||||
|
|
||||||
__ Jump(code_obj);
|
if (is_osr) {
|
||||||
|
// Reset the OSR loop nesting depth to disarm back edges.
|
||||||
|
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
|
||||||
|
// Sparkplug here.
|
||||||
|
UseScratchRegisterScope temps(masm);
|
||||||
|
Register scratch = temps.Acquire();
|
||||||
|
__ mov(scratch, Operand(0));
|
||||||
|
__ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
|
||||||
|
BytecodeArray::kOsrNestingLevelOffset));
|
||||||
|
Generate_OSREntry(masm, code_obj,
|
||||||
|
Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
} else {
|
||||||
|
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
__ Jump(code_obj);
|
||||||
|
}
|
||||||
__ Trap(); // Unreachable.
|
__ Trap(); // Unreachable.
|
||||||
|
|
||||||
__ bind(&function_entry_bytecode);
|
if (!is_osr) {
|
||||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
__ bind(&function_entry_bytecode);
|
||||||
// the first bytecode.
|
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
// the first bytecode.
|
||||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
__ mov(kInterpreterBytecodeOffsetRegister, Operand(0));
|
||||||
__ Move(get_baseline_pc,
|
if (next_bytecode) {
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
__ Move(get_baseline_pc,
|
||||||
__ b(&valid_bytecode_offset);
|
ExternalReference::baseline_pc_for_bytecode_offset());
|
||||||
|
}
|
||||||
|
__ b(&valid_bytecode_offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -3581,6 +3599,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
|||||||
Generate_BaselineEntry(masm, true);
|
Generate_BaselineEntry(masm, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
|
||||||
|
MacroAssembler* masm) {
|
||||||
|
Generate_BaselineEntry(masm, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||||
FrameScope scope(masm, StackFrame::MANUAL);
|
FrameScope scope(masm, StackFrame::MANUAL);
|
||||||
__ EnterFrame(StackFrame::INTERNAL);
|
__ EnterFrame(StackFrame::INTERNAL);
|
||||||
|
@ -2023,6 +2023,27 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
|
||||||
|
Operand offset = Operand(0)) {
|
||||||
|
// Pop the return address to this function's caller from the return stack
|
||||||
|
// buffer, since we'll never return to it.
|
||||||
|
Label jump;
|
||||||
|
__ Adr(lr, &jump);
|
||||||
|
__ Ret();
|
||||||
|
|
||||||
|
__ Bind(&jump);
|
||||||
|
|
||||||
|
UseScratchRegisterScope temps(masm);
|
||||||
|
temps.Exclude(x17);
|
||||||
|
if (offset.IsZero()) {
|
||||||
|
__ Mov(x17, entry_address);
|
||||||
|
} else {
|
||||||
|
__ Add(x17, entry_address, offset);
|
||||||
|
}
|
||||||
|
__ Br(x17);
|
||||||
|
}
|
||||||
|
|
||||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
@ -2053,22 +2074,12 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
|||||||
x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
|
x1, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(
|
||||||
DeoptimizationData::kOsrPcOffsetIndex)));
|
DeoptimizationData::kOsrPcOffsetIndex)));
|
||||||
|
|
||||||
// Pop the return address to this function's caller from the return stack
|
|
||||||
// buffer, since we'll never return to it.
|
|
||||||
Label jump;
|
|
||||||
__ Adr(lr, &jump);
|
|
||||||
__ Ret();
|
|
||||||
|
|
||||||
__ Bind(&jump);
|
|
||||||
|
|
||||||
// Compute the target address = code_obj + header_size + osr_offset
|
// Compute the target address = code_obj + header_size + osr_offset
|
||||||
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
|
// <entry_addr> = <code_obj> + #header_size + <osr_offset>
|
||||||
__ Add(x0, x0, x1);
|
__ Add(x0, x0, x1);
|
||||||
UseScratchRegisterScope temps(masm);
|
Generate_OSREntry(masm, x0, Code::kHeaderSize - kHeapObjectTag);
|
||||||
temps.Exclude(x17);
|
|
||||||
__ Add(x17, x0, Code::kHeaderSize - kHeapObjectTag);
|
|
||||||
__ Br(x17);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||||
@ -3973,8 +3984,9 @@ namespace {
|
|||||||
|
|
||||||
// Converts an interpreter frame into a baseline frame and continues execution
|
// Converts an interpreter frame into a baseline frame and continues execution
|
||||||
// in baseline code (baseline code has to exist on the shared function info),
|
// in baseline code (baseline code has to exist on the shared function info),
|
||||||
// either at the start or the end of the current bytecode.
|
// either at the current or next (in execution order) bytecode.
|
||||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
|
||||||
|
bool is_osr = false) {
|
||||||
// Get bytecode array and bytecode offset from the stack frame.
|
// Get bytecode array and bytecode offset from the stack frame.
|
||||||
__ Ldr(kInterpreterBytecodeArrayRegister,
|
__ Ldr(kInterpreterBytecodeArrayRegister,
|
||||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||||
@ -4016,10 +4028,14 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
|
|
||||||
// Compute baseline pc for bytecode offset.
|
// Compute baseline pc for bytecode offset.
|
||||||
__ Push(padreg, kInterpreterAccumulatorRegister);
|
__ Push(padreg, kInterpreterAccumulatorRegister);
|
||||||
ExternalReference get_baseline_pc_extref =
|
ExternalReference get_baseline_pc_extref;
|
||||||
next_bytecode
|
if (next_bytecode || is_osr) {
|
||||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
get_baseline_pc_extref =
|
||||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
ExternalReference::baseline_pc_for_next_executed_bytecode();
|
||||||
|
} else {
|
||||||
|
get_baseline_pc_extref =
|
||||||
|
ExternalReference::baseline_pc_for_bytecode_offset();
|
||||||
|
}
|
||||||
Register get_baseline_pc = x3;
|
Register get_baseline_pc = x3;
|
||||||
__ Mov(get_baseline_pc, get_baseline_pc_extref);
|
__ Mov(get_baseline_pc, get_baseline_pc_extref);
|
||||||
|
|
||||||
@ -4029,30 +4045,17 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||||
// in TurboFan instead of here.
|
// in TurboFan instead of here.
|
||||||
Label valid_bytecode_offset, function_entry_bytecode;
|
Label valid_bytecode_offset, function_entry_bytecode;
|
||||||
__ cmp(kInterpreterBytecodeOffsetRegister,
|
if (!is_osr) {
|
||||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
__ cmp(kInterpreterBytecodeOffsetRegister,
|
||||||
kFunctionEntryBytecodeOffset));
|
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||||
__ B(eq, &function_entry_bytecode);
|
kFunctionEntryBytecodeOffset));
|
||||||
__ bind(&valid_bytecode_offset);
|
__ B(eq, &function_entry_bytecode);
|
||||||
|
|
||||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
|
||||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
|
||||||
if (next_bytecode) {
|
|
||||||
Label not_jump_loop;
|
|
||||||
Register bytecode = x1;
|
|
||||||
__ Ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister));
|
|
||||||
__ Cmp(bytecode,
|
|
||||||
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
|
||||||
__ B(ne, ¬_jump_loop);
|
|
||||||
__ Mov(get_baseline_pc,
|
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
|
||||||
__ bind(¬_jump_loop);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
|
__ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
|
||||||
(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
|
__ bind(&valid_bytecode_offset);
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
Register arg_reg_1 = x0;
|
Register arg_reg_1 = x0;
|
||||||
@ -4063,21 +4066,33 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
__ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
__ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||||
__ CallCFunction(get_baseline_pc, 3, 0);
|
__ CallCFunction(get_baseline_pc, 3, 0);
|
||||||
}
|
}
|
||||||
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
|
||||||
__ Add(code_obj, code_obj, kReturnRegister0);
|
__ Add(code_obj, code_obj, kReturnRegister0);
|
||||||
__ Pop(kInterpreterAccumulatorRegister, padreg);
|
__ Pop(kInterpreterAccumulatorRegister, padreg);
|
||||||
|
|
||||||
__ Jump(code_obj);
|
if (is_osr) {
|
||||||
|
// Reset the OSR loop nesting depth to disarm back edges.
|
||||||
|
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
|
||||||
|
// Sparkplug here.
|
||||||
|
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
|
||||||
|
BytecodeArray::kOsrNestingLevelOffset));
|
||||||
|
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
||||||
|
} else {
|
||||||
|
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
||||||
|
__ Jump(code_obj);
|
||||||
|
}
|
||||||
__ Trap(); // Unreachable.
|
__ Trap(); // Unreachable.
|
||||||
|
|
||||||
__ bind(&function_entry_bytecode);
|
if (!is_osr) {
|
||||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
__ bind(&function_entry_bytecode);
|
||||||
// the first bytecode.
|
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||||
__ Mov(kInterpreterBytecodeOffsetRegister,
|
// the first bytecode.
|
||||||
BytecodeArray::kHeaderSize - kHeapObjectTag);
|
__ Mov(kInterpreterBytecodeOffsetRegister, Operand(0));
|
||||||
__ Mov(get_baseline_pc,
|
if (next_bytecode) {
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
__ Mov(get_baseline_pc,
|
||||||
__ B(&valid_bytecode_offset);
|
ExternalReference::baseline_pc_for_bytecode_offset());
|
||||||
|
}
|
||||||
|
__ B(&valid_bytecode_offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -4090,6 +4105,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
|||||||
Generate_BaselineEntry(masm, true);
|
Generate_BaselineEntry(masm, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
|
||||||
|
MacroAssembler* masm) {
|
||||||
|
Generate_BaselineEntry(masm, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||||
FrameScope scope(masm, StackFrame::MANUAL);
|
FrameScope scope(masm, StackFrame::MANUAL);
|
||||||
__ EnterFrame(StackFrame::INTERNAL);
|
__ EnterFrame(StackFrame::INTERNAL);
|
||||||
|
@ -143,6 +143,7 @@ namespace internal {
|
|||||||
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
|
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
|
||||||
ASM(BaselineEnterAtBytecode, Void) \
|
ASM(BaselineEnterAtBytecode, Void) \
|
||||||
ASM(BaselineEnterAtNextBytecode, Void) \
|
ASM(BaselineEnterAtNextBytecode, Void) \
|
||||||
|
ASM(InterpreterOnStackReplacement_ToBaseline, Void) \
|
||||||
\
|
\
|
||||||
/* Code life-cycle */ \
|
/* Code life-cycle */ \
|
||||||
TFC(CompileLazy, JSTrampoline) \
|
TFC(CompileLazy, JSTrampoline) \
|
||||||
|
@ -2735,6 +2735,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
|
||||||
|
// Overwrite the return address on the stack.
|
||||||
|
__ mov(Operand(esp, 0), entry_address);
|
||||||
|
|
||||||
|
// And "return" to the OSR entry point of the function.
|
||||||
|
__ ret(0);
|
||||||
|
}
|
||||||
|
|
||||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
@ -2767,12 +2776,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
|||||||
// Compute the target address = code_obj + header_size + osr_offset
|
// Compute the target address = code_obj + header_size + osr_offset
|
||||||
__ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
|
__ lea(eax, Operand(eax, ecx, times_1, Code::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
// Overwrite the return address on the stack.
|
Generate_OSREntry(masm, eax);
|
||||||
__ mov(Operand(esp, 0), eax);
|
|
||||||
|
|
||||||
// And "return" to the OSR entry point of the function.
|
|
||||||
__ ret(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||||
@ -4077,8 +4083,9 @@ namespace {
|
|||||||
|
|
||||||
// Converts an interpreter frame into a baseline frame and continues execution
|
// Converts an interpreter frame into a baseline frame and continues execution
|
||||||
// in baseline code (baseline code has to exist on the shared function info),
|
// in baseline code (baseline code has to exist on the shared function info),
|
||||||
// either at the start or the end of the current bytecode.
|
// either at the current or next (in execution order) bytecode.
|
||||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
|
||||||
|
bool is_osr = false) {
|
||||||
// Get bytecode array and bytecode offset from the stack frame.
|
// Get bytecode array and bytecode offset from the stack frame.
|
||||||
__ mov(kInterpreterBytecodeArrayRegister,
|
__ mov(kInterpreterBytecodeArrayRegister,
|
||||||
MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||||
@ -4118,10 +4125,14 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
|
|
||||||
// Compute baseline pc for bytecode offset.
|
// Compute baseline pc for bytecode offset.
|
||||||
__ push(kInterpreterAccumulatorRegister);
|
__ push(kInterpreterAccumulatorRegister);
|
||||||
ExternalReference get_baseline_pc_extref =
|
ExternalReference get_baseline_pc_extref;
|
||||||
next_bytecode
|
if (next_bytecode || is_osr) {
|
||||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
get_baseline_pc_extref =
|
||||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
ExternalReference::baseline_pc_for_next_executed_bytecode();
|
||||||
|
} else {
|
||||||
|
get_baseline_pc_extref =
|
||||||
|
ExternalReference::baseline_pc_for_bytecode_offset();
|
||||||
|
}
|
||||||
Register get_baseline_pc = ecx;
|
Register get_baseline_pc = ecx;
|
||||||
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
|
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
|
||||||
|
|
||||||
@ -4131,33 +4142,17 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||||
// in TurboFan instead of here.
|
// in TurboFan instead of here.
|
||||||
Label valid_bytecode_offset, function_entry_bytecode;
|
Label valid_bytecode_offset, function_entry_bytecode;
|
||||||
__ cmp(kInterpreterBytecodeOffsetRegister,
|
if (!is_osr) {
|
||||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
__ cmp(kInterpreterBytecodeOffsetRegister,
|
||||||
kFunctionEntryBytecodeOffset));
|
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||||
__ j(equal, &function_entry_bytecode);
|
kFunctionEntryBytecodeOffset));
|
||||||
__ bind(&valid_bytecode_offset);
|
__ j(equal, &function_entry_bytecode);
|
||||||
|
|
||||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
|
||||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
|
||||||
if (next_bytecode) {
|
|
||||||
Label not_jump_loop;
|
|
||||||
__ push(kInterpreterBytecodeOffsetRegister);
|
|
||||||
Register bytecode = kInterpreterBytecodeOffsetRegister;
|
|
||||||
__ movzx_b(bytecode,
|
|
||||||
Operand(kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
|
||||||
__ cmpb(bytecode,
|
|
||||||
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
|
||||||
__ j(not_equal, ¬_jump_loop, Label::kNear);
|
|
||||||
__ LoadAddress(get_baseline_pc,
|
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
|
||||||
__ bind(¬_jump_loop);
|
|
||||||
__ pop(kInterpreterBytecodeOffsetRegister);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ sub(kInterpreterBytecodeOffsetRegister,
|
__ sub(kInterpreterBytecodeOffsetRegister,
|
||||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
|
__ bind(&valid_bytecode_offset);
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
__ PrepareCallCFunction(3, eax);
|
__ PrepareCallCFunction(3, eax);
|
||||||
@ -4172,17 +4167,30 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
||||||
__ pop(kInterpreterAccumulatorRegister);
|
__ pop(kInterpreterAccumulatorRegister);
|
||||||
|
|
||||||
__ jmp(code_obj);
|
if (is_osr) {
|
||||||
|
// Reset the OSR loop nesting depth to disarm back edges.
|
||||||
|
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
|
||||||
|
// Sparkplug here.
|
||||||
|
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
|
||||||
|
BytecodeArray::kOsrNestingLevelOffset),
|
||||||
|
Immediate(0));
|
||||||
|
Generate_OSREntry(masm, code_obj);
|
||||||
|
} else {
|
||||||
|
__ jmp(code_obj);
|
||||||
|
}
|
||||||
__ Trap(); // Unreachable.
|
__ Trap(); // Unreachable.
|
||||||
|
|
||||||
__ bind(&function_entry_bytecode);
|
if (!is_osr) {
|
||||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
__ bind(&function_entry_bytecode);
|
||||||
// the first bytecode.
|
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
// the first bytecode.
|
||||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
__ mov(kInterpreterBytecodeOffsetRegister, Immediate(0));
|
||||||
__ LoadAddress(get_baseline_pc,
|
if (next_bytecode) {
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
__ LoadAddress(get_baseline_pc,
|
||||||
__ jmp(&valid_bytecode_offset);
|
ExternalReference::baseline_pc_for_bytecode_offset());
|
||||||
|
}
|
||||||
|
__ jmp(&valid_bytecode_offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -4195,6 +4203,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
|||||||
Generate_BaselineEntry(masm, true);
|
Generate_BaselineEntry(masm, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
|
||||||
|
MacroAssembler* masm) {
|
||||||
|
Generate_BaselineEntry(masm, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||||
FrameScope scope(masm, StackFrame::MANUAL);
|
FrameScope scope(masm, StackFrame::MANUAL);
|
||||||
__ EnterFrame(StackFrame::INTERNAL);
|
__ EnterFrame(StackFrame::INTERNAL);
|
||||||
|
@ -2631,6 +2631,15 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
|
void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
|
||||||
|
// Overwrite the return address on the stack.
|
||||||
|
__ movq(StackOperandForReturnAddress(0), entry_address);
|
||||||
|
|
||||||
|
// And "return" to the OSR entry point of the function.
|
||||||
|
__ ret(0);
|
||||||
|
}
|
||||||
|
|
||||||
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
@ -2663,12 +2672,9 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
|
|||||||
// Compute the target address = code_obj + header_size + osr_offset
|
// Compute the target address = code_obj + header_size + osr_offset
|
||||||
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
|
__ leaq(rax, FieldOperand(rax, rbx, times_1, Code::kHeaderSize));
|
||||||
|
|
||||||
// Overwrite the return address on the stack.
|
Generate_OSREntry(masm, rax);
|
||||||
__ movq(StackOperandForReturnAddress(0), rax);
|
|
||||||
|
|
||||||
// And "return" to the OSR entry point of the function.
|
|
||||||
__ ret(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
|
||||||
@ -4347,8 +4353,9 @@ namespace {
|
|||||||
|
|
||||||
// Converts an interpreter frame into a baseline frame and continues execution
|
// Converts an interpreter frame into a baseline frame and continues execution
|
||||||
// in baseline code (baseline code has to exist on the shared function info),
|
// in baseline code (baseline code has to exist on the shared function info),
|
||||||
// either at the start or the end of the current bytecode.
|
// either at the current or next (in execution order) bytecode.
|
||||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
|
||||||
|
bool is_osr = false) {
|
||||||
// Get bytecode array and bytecode offset from the stack frame.
|
// Get bytecode array and bytecode offset from the stack frame.
|
||||||
__ movq(kInterpreterBytecodeArrayRegister,
|
__ movq(kInterpreterBytecodeArrayRegister,
|
||||||
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||||
@ -4387,10 +4394,14 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
|
|
||||||
// Compute baseline pc for bytecode offset.
|
// Compute baseline pc for bytecode offset.
|
||||||
__ pushq(kInterpreterAccumulatorRegister);
|
__ pushq(kInterpreterAccumulatorRegister);
|
||||||
ExternalReference get_baseline_pc_extref =
|
ExternalReference get_baseline_pc_extref;
|
||||||
next_bytecode
|
if (next_bytecode || is_osr) {
|
||||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
get_baseline_pc_extref =
|
||||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
ExternalReference::baseline_pc_for_next_executed_bytecode();
|
||||||
|
} else {
|
||||||
|
get_baseline_pc_extref =
|
||||||
|
ExternalReference::baseline_pc_for_bytecode_offset();
|
||||||
|
}
|
||||||
Register get_baseline_pc = rax;
|
Register get_baseline_pc = rax;
|
||||||
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
|
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
|
||||||
|
|
||||||
@ -4400,31 +4411,17 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||||
// in TurboFan instead of here.
|
// in TurboFan instead of here.
|
||||||
Label valid_bytecode_offset, function_entry_bytecode;
|
Label valid_bytecode_offset, function_entry_bytecode;
|
||||||
__ cmpq(kInterpreterBytecodeOffsetRegister,
|
if (!is_osr) {
|
||||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
__ cmpq(kInterpreterBytecodeOffsetRegister,
|
||||||
kFunctionEntryBytecodeOffset));
|
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||||
__ j(equal, &function_entry_bytecode);
|
kFunctionEntryBytecodeOffset));
|
||||||
__ bind(&valid_bytecode_offset);
|
__ j(equal, &function_entry_bytecode);
|
||||||
|
|
||||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
|
||||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
|
||||||
if (next_bytecode) {
|
|
||||||
Label not_jump_loop;
|
|
||||||
Register bytecode = rdi;
|
|
||||||
__ movzxbq(bytecode,
|
|
||||||
Operand(kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
|
||||||
__ cmpb(bytecode,
|
|
||||||
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
|
||||||
__ j(not_equal, ¬_jump_loop, Label::kNear);
|
|
||||||
__ LoadAddress(get_baseline_pc,
|
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
|
||||||
__ bind(¬_jump_loop);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__ subq(kInterpreterBytecodeOffsetRegister,
|
__ subq(kInterpreterBytecodeOffsetRegister,
|
||||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||||
|
|
||||||
|
__ bind(&valid_bytecode_offset);
|
||||||
{
|
{
|
||||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||||
__ PrepareCallCFunction(3);
|
__ PrepareCallCFunction(3);
|
||||||
@ -4437,17 +4434,30 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
|||||||
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
||||||
__ popq(kInterpreterAccumulatorRegister);
|
__ popq(kInterpreterAccumulatorRegister);
|
||||||
|
|
||||||
__ jmp(code_obj);
|
if (is_osr) {
|
||||||
|
// Reset the OSR loop nesting depth to disarm back edges.
|
||||||
|
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
|
||||||
|
// Sparkplug here.
|
||||||
|
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
|
||||||
|
BytecodeArray::kOsrNestingLevelOffset),
|
||||||
|
Immediate(0));
|
||||||
|
Generate_OSREntry(masm, code_obj);
|
||||||
|
} else {
|
||||||
|
__ jmp(code_obj);
|
||||||
|
}
|
||||||
__ Trap(); // Unreachable.
|
__ Trap(); // Unreachable.
|
||||||
|
|
||||||
__ bind(&function_entry_bytecode);
|
if (!is_osr) {
|
||||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
__ bind(&function_entry_bytecode);
|
||||||
// the first bytecode.
|
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||||
__ movq(kInterpreterBytecodeOffsetRegister,
|
// the first bytecode.
|
||||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
__ movq(kInterpreterBytecodeOffsetRegister, Immediate(0));
|
||||||
__ LoadAddress(get_baseline_pc,
|
if (next_bytecode) {
|
||||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
__ LoadAddress(get_baseline_pc,
|
||||||
__ jmp(&valid_bytecode_offset);
|
ExternalReference::baseline_pc_for_bytecode_offset());
|
||||||
|
}
|
||||||
|
__ jmp(&valid_bytecode_offset);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
@ -4460,6 +4470,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
|||||||
Generate_BaselineEntry(masm, true);
|
Generate_BaselineEntry(masm, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
|
||||||
|
MacroAssembler* masm) {
|
||||||
|
Generate_BaselineEntry(masm, false, true);
|
||||||
|
}
|
||||||
|
|
||||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||||
FrameScope scope(masm, StackFrame::MANUAL);
|
FrameScope scope(masm, StackFrame::MANUAL);
|
||||||
__ EnterFrame(StackFrame::INTERNAL);
|
__ EnterFrame(StackFrame::INTERNAL);
|
||||||
|
@ -404,6 +404,13 @@ Callable CodeFactory::InterpreterOnStackReplacement(Isolate* isolate) {
|
|||||||
Builtins::kInterpreterOnStackReplacement);
|
Builtins::kInterpreterOnStackReplacement);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// static
|
||||||
|
Callable CodeFactory::InterpreterOnStackReplacement_ToBaseline(
|
||||||
|
Isolate* isolate) {
|
||||||
|
return Builtins::CallableFor(
|
||||||
|
isolate, Builtins::kInterpreterOnStackReplacement_ToBaseline);
|
||||||
|
}
|
||||||
|
|
||||||
// static
|
// static
|
||||||
Callable CodeFactory::ArrayNoArgumentConstructor(
|
Callable CodeFactory::ArrayNoArgumentConstructor(
|
||||||
Isolate* isolate, ElementsKind kind,
|
Isolate* isolate, ElementsKind kind,
|
||||||
|
@ -92,6 +92,7 @@ class V8_EXPORT_PRIVATE CodeFactory final {
|
|||||||
Isolate* isolate, InterpreterPushArgsMode mode);
|
Isolate* isolate, InterpreterPushArgsMode mode);
|
||||||
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
|
static Callable InterpreterCEntry(Isolate* isolate, int result_size = 1);
|
||||||
static Callable InterpreterOnStackReplacement(Isolate* isolate);
|
static Callable InterpreterOnStackReplacement(Isolate* isolate);
|
||||||
|
static Callable InterpreterOnStackReplacement_ToBaseline(Isolate* isolate);
|
||||||
|
|
||||||
static Callable ArrayNoArgumentConstructor(
|
static Callable ArrayNoArgumentConstructor(
|
||||||
Isolate* isolate, ElementsKind kind,
|
Isolate* isolate, ElementsKind kind,
|
||||||
|
@ -623,9 +623,9 @@ ExternalReference::address_of_enable_experimental_regexp_engine() {
|
|||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
|
||||||
static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj,
|
static uintptr_t BaselinePCForBytecodeOffset(Address raw_code_obj,
|
||||||
int bytecode_offset,
|
int bytecode_offset,
|
||||||
Address raw_bytecode_array) {
|
Address raw_bytecode_array) {
|
||||||
Code code_obj = Code::cast(Object(raw_code_obj));
|
Code code_obj = Code::cast(Object(raw_code_obj));
|
||||||
BytecodeArray bytecode_array =
|
BytecodeArray bytecode_array =
|
||||||
BytecodeArray::cast(Object(raw_bytecode_array));
|
BytecodeArray::cast(Object(raw_bytecode_array));
|
||||||
@ -633,22 +633,21 @@ static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj,
|
|||||||
bytecode_array);
|
bytecode_array);
|
||||||
}
|
}
|
||||||
|
|
||||||
static uintptr_t BaselineEndPCForBytecodeOffset(Address raw_code_obj,
|
static uintptr_t BaselinePCForNextExecutedBytecode(Address raw_code_obj,
|
||||||
int bytecode_offset,
|
int bytecode_offset,
|
||||||
Address raw_bytecode_array) {
|
Address raw_bytecode_array) {
|
||||||
Code code_obj = Code::cast(Object(raw_code_obj));
|
Code code_obj = Code::cast(Object(raw_code_obj));
|
||||||
BytecodeArray bytecode_array =
|
BytecodeArray bytecode_array =
|
||||||
BytecodeArray::cast(Object(raw_bytecode_array));
|
BytecodeArray::cast(Object(raw_bytecode_array));
|
||||||
return code_obj.GetBaselineEndPCForBytecodeOffset(bytecode_offset,
|
return code_obj.GetBaselinePCForNextExecutedBytecode(bytecode_offset,
|
||||||
bytecode_array);
|
bytecode_array);
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
FUNCTION_REFERENCE(baseline_end_pc_for_bytecode_offset,
|
FUNCTION_REFERENCE(baseline_pc_for_bytecode_offset, BaselinePCForBytecodeOffset)
|
||||||
BaselineEndPCForBytecodeOffset)
|
FUNCTION_REFERENCE(baseline_pc_for_next_executed_bytecode,
|
||||||
FUNCTION_REFERENCE(baseline_start_pc_for_bytecode_offset,
|
BaselinePCForNextExecutedBytecode)
|
||||||
BaselineStartPCForBytecodeOffset)
|
|
||||||
|
|
||||||
ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
|
ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
|
||||||
Isolate* isolate) {
|
Isolate* isolate) {
|
||||||
|
@ -124,8 +124,9 @@ class StatsCounter;
|
|||||||
V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
|
V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
|
||||||
V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
|
V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
|
||||||
V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
|
V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
|
||||||
V(baseline_start_pc_for_bytecode_offset, "BaselineStartPCForBytecodeOffset") \
|
V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
|
||||||
V(baseline_end_pc_for_bytecode_offset, "BaselineEndPCForBytecodeOffset") \
|
V(baseline_pc_for_next_executed_bytecode, \
|
||||||
|
"BaselinePCForNextExecutedBytecode") \
|
||||||
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
|
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
|
||||||
V(check_object_type, "check_object_type") \
|
V(check_object_type, "check_object_type") \
|
||||||
V(compute_integer_hash, "ComputeSeededHash") \
|
V(compute_integer_hash, "ComputeSeededHash") \
|
||||||
|
@ -1330,6 +1330,34 @@ void InterpreterAssembler::MaybeDropFrames(TNode<Context> context) {
|
|||||||
BIND(&ok);
|
BIND(&ok);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
|
||||||
|
TNode<IntPtrT> relative_jump) {
|
||||||
|
TNode<JSFunction> function = CAST(LoadRegister(Register::function_closure()));
|
||||||
|
TNode<HeapObject> shared_info = LoadJSFunctionSharedFunctionInfo(function);
|
||||||
|
TNode<Object> sfi_data =
|
||||||
|
LoadObjectField(shared_info, SharedFunctionInfo::kFunctionDataOffset);
|
||||||
|
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
|
||||||
|
|
||||||
|
Label baseline(this);
|
||||||
|
GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
|
||||||
|
{
|
||||||
|
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
|
||||||
|
CallStub(callable, context);
|
||||||
|
JumpBackward(relative_jump);
|
||||||
|
}
|
||||||
|
|
||||||
|
BIND(&baseline);
|
||||||
|
{
|
||||||
|
Callable callable =
|
||||||
|
CodeFactory::InterpreterOnStackReplacement_ToBaseline(isolate());
|
||||||
|
// We already compiled the baseline code, so we don't need to handle failed
|
||||||
|
// compilation as in the Ignition -> Turbofan case. Therefore we can just
|
||||||
|
// tailcall to the OSR builtin.
|
||||||
|
SaveBytecodeOffset();
|
||||||
|
TailCallStub(callable, context);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
|
void InterpreterAssembler::TraceBytecode(Runtime::FunctionId function_id) {
|
||||||
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
|
CallRuntime(function_id, GetContext(), BytecodeArrayTaggedPointer(),
|
||||||
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
|
SmiTag(BytecodeOffset()), GetAccumulatorUnchecked());
|
||||||
|
@ -244,6 +244,9 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
|
|||||||
// Dispatch to frame dropper trampoline if necessary.
|
// Dispatch to frame dropper trampoline if necessary.
|
||||||
void MaybeDropFrames(TNode<Context> context);
|
void MaybeDropFrames(TNode<Context> context);
|
||||||
|
|
||||||
|
// Perform OnStackReplacement.
|
||||||
|
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
|
||||||
|
|
||||||
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
|
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
|
||||||
TNode<IntPtrT> BytecodeOffset();
|
TNode<IntPtrT> BytecodeOffset();
|
||||||
|
|
||||||
|
@ -2203,11 +2203,7 @@ IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
|
|||||||
JumpBackward(relative_jump);
|
JumpBackward(relative_jump);
|
||||||
|
|
||||||
BIND(&osr_armed);
|
BIND(&osr_armed);
|
||||||
{
|
OnStackReplacement(context, relative_jump);
|
||||||
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
|
|
||||||
CallStub(callable, context);
|
|
||||||
JumpBackward(relative_jump);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
|
// SwitchOnSmiNoFeedback <table_start> <table_length> <case_value_base>
|
||||||
|
@ -411,6 +411,26 @@ uintptr_t Code::GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
|
|||||||
bytecodes);
|
bytecodes);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
uintptr_t Code::GetBaselinePCForNextExecutedBytecode(int bytecode_offset,
|
||||||
|
BytecodeArray bytecodes) {
|
||||||
|
DisallowGarbageCollection no_gc;
|
||||||
|
CHECK_EQ(kind(), CodeKind::BASELINE);
|
||||||
|
baseline::BytecodeOffsetIterator offset_iterator(
|
||||||
|
ByteArray::cast(bytecode_offset_table()), bytecodes);
|
||||||
|
Handle<BytecodeArray> bytecodes_handle(
|
||||||
|
reinterpret_cast<Address*>(&bytecodes));
|
||||||
|
interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes_handle,
|
||||||
|
bytecode_offset);
|
||||||
|
interpreter::Bytecode bytecode = bytecode_iterator.current_bytecode();
|
||||||
|
if (bytecode == interpreter::Bytecode::kJumpLoop) {
|
||||||
|
return GetBaselineStartPCForBytecodeOffset(
|
||||||
|
bytecode_iterator.GetJumpTargetOffset(), bytecodes);
|
||||||
|
} else {
|
||||||
|
DCHECK(!interpreter::Bytecodes::IsJump(bytecode));
|
||||||
|
return GetBaselineEndPCForBytecodeOffset(bytecode_offset, bytecodes);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
|
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
|
||||||
bool is_off_heap_trampoline) {
|
bool is_off_heap_trampoline) {
|
||||||
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
|
CHECK(0 <= stack_slots && stack_slots < StackSlotsField::kMax);
|
||||||
|
@ -412,6 +412,14 @@ class Code : public HeapObject {
|
|||||||
inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
|
inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
|
||||||
BytecodeArray bytecodes);
|
BytecodeArray bytecodes);
|
||||||
|
|
||||||
|
// Returns the PC of the next bytecode in execution order.
|
||||||
|
// If the bytecode at the given offset is JumpLoop, the PC of the jump target
|
||||||
|
// is returned. Other jumps are not allowed.
|
||||||
|
// For other bytecodes this is equivalent to
|
||||||
|
// GetBaselineEndPCForBytecodeOffset.
|
||||||
|
inline uintptr_t GetBaselinePCForNextExecutedBytecode(
|
||||||
|
int bytecode_offset, BytecodeArray bytecodes);
|
||||||
|
|
||||||
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
|
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
|
||||||
BytecodeArray bytecodes);
|
BytecodeArray bytecodes);
|
||||||
|
|
||||||
|
@ -337,14 +337,30 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromBytecode) {
|
|||||||
function->shared().is_compiled_scope(isolate));
|
function->shared().is_compiled_scope(isolate));
|
||||||
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
|
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
|
||||||
DCHECK(is_compiled_scope.is_compiled());
|
DCHECK(is_compiled_scope.is_compiled());
|
||||||
if (FLAG_sparkplug) {
|
|
||||||
Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
|
|
||||||
&is_compiled_scope);
|
|
||||||
}
|
|
||||||
// Also initialize the invocation count here. This is only really needed for
|
// Also initialize the invocation count here. This is only really needed for
|
||||||
// OSR. When we OSR functions with lazy feedback allocation we want to have
|
// OSR. When we OSR functions with lazy feedback allocation we want to have
|
||||||
// a non zero invocation count so we can inline functions.
|
// a non zero invocation count so we can inline functions.
|
||||||
function->feedback_vector().set_invocation_count(1);
|
function->feedback_vector().set_invocation_count(1);
|
||||||
|
if (FLAG_sparkplug) {
|
||||||
|
if (Compiler::CompileBaseline(isolate, function,
|
||||||
|
Compiler::CLEAR_EXCEPTION,
|
||||||
|
&is_compiled_scope)) {
|
||||||
|
if (FLAG_use_osr) {
|
||||||
|
JavaScriptFrameIterator it(isolate);
|
||||||
|
DCHECK(it.frame()->is_unoptimized());
|
||||||
|
UnoptimizedFrame* frame = UnoptimizedFrame::cast(it.frame());
|
||||||
|
if (FLAG_trace_osr) {
|
||||||
|
CodeTracer::Scope scope(isolate->GetCodeTracer());
|
||||||
|
PrintF(
|
||||||
|
scope.file(),
|
||||||
|
"[OSR - Entry at OSR bytecode offset %d into baseline code]\n",
|
||||||
|
frame->GetBytecodeOffset());
|
||||||
|
}
|
||||||
|
frame->GetBytecodeArray().set_osr_loop_nesting_level(
|
||||||
|
AbstractCode::kMaxLoopNestingMarker);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return ReadOnlyRoots(isolate).undefined_value();
|
return ReadOnlyRoots(isolate).undefined_value();
|
||||||
}
|
}
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user