Reland "[sparkplug][deoptimizer] Deoptimize to baseline."
This is a reland of bdcd7d79d3
Handle lazy deopts when the current bytecode is JumpLoop.
Instead of advancing to the next bytecode, re-execute the JumpLoop.
TBR=jgruber@chromium.org, neis@chromium.org
Original change's description:
> [sparkplug][deoptimizer] Deoptimize to baseline.
>
> If we have baseline code, deoptimize to baseline instead of the
> interpreter. The process is similar to deopting to the interpreter.
> We just use different builtins
> (BaselineEnterAtBytecode/BaselineEnterAtNextBytecode) instead of
> InterpreterEnterBytecodeDispatch/InterpreterEnterBytecodeAdvance, that
> patch an interpreter frame to a baseline frame and continue execution in
> baseline code (based on the deopt type, at the current or next
> bytecode).
>
> Bug: v8:11420
> Change-Id: Iabaefb36c05155a435c7b380906a86d9b9d549fa
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2695591
> Commit-Queue: Patrick Thier <pthier@chromium.org>
> Reviewed-by: Leszek Swirski <leszeks@chromium.org>
> Reviewed-by: Georg Neis <neis@chromium.org>
> Reviewed-by: Jakob Gruber <jgruber@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#73609}
Bug: v8:11420
Change-Id: Ib8cac028121188ddc23ff29377760ed684eb7392
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2783035
Reviewed-by: Patrick Thier <pthier@chromium.org>
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Patrick Thier <pthier@chromium.org>
Cr-Commit-Position: refs/heads/master@{#73636}
This commit is contained in:
parent
7d844bdbb0
commit
e3ccb53877
@ -92,6 +92,10 @@ void BaselineAssembler::Bind(Label* label) {
|
||||
__ bind(label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::JumpTarget() {
|
||||
// NOP on arm.
|
||||
}
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ b(target);
|
||||
}
|
||||
|
@ -87,6 +87,8 @@ void BaselineAssembler::Bind(Label* label) {
|
||||
__ BindJumpTarget(label);
|
||||
}
|
||||
|
||||
void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ B(target);
|
||||
}
|
||||
|
@ -40,6 +40,9 @@ class BaselineAssembler {
|
||||
inline void DebugBreak();
|
||||
|
||||
inline void Bind(Label* label);
|
||||
// Marks the current position as a valid jump target on CFI enabled
|
||||
// architectures.
|
||||
inline void JumpTarget();
|
||||
inline void JumpIf(Condition cc, Label* target,
|
||||
Label::Distance distance = Label::kFar);
|
||||
inline void Jump(Label* target, Label::Distance distance = Label::kFar);
|
||||
|
@ -426,6 +426,7 @@ void BaselineCompiler::PreVisitSingleBytecode() {
|
||||
|
||||
void BaselineCompiler::VisitSingleBytecode() {
|
||||
int offset = iterator().current_offset();
|
||||
bool is_marked_as_jump_target = false;
|
||||
if (labels_[offset]) {
|
||||
// Bind labels for this offset that have already been linked to a
|
||||
// jump (i.e. forward jumps, excluding jump tables).
|
||||
@ -436,15 +437,23 @@ void BaselineCompiler::VisitSingleBytecode() {
|
||||
labels_[offset]->linked.Clear();
|
||||
#endif
|
||||
__ Bind(&labels_[offset]->unlinked);
|
||||
is_marked_as_jump_target = true;
|
||||
}
|
||||
|
||||
// Record positions of exception handlers.
|
||||
if (iterator().current_offset() == *next_handler_offset_) {
|
||||
__ ExceptionHandler();
|
||||
next_handler_offset_++;
|
||||
is_marked_as_jump_target = true;
|
||||
}
|
||||
DCHECK_LT(iterator().current_offset(), *next_handler_offset_);
|
||||
|
||||
// Mark position as valid jump target, if it isn't one already.
|
||||
// This is required for the deoptimizer, when CFI is enabled.
|
||||
if (!is_marked_as_jump_target) {
|
||||
__ JumpTarget();
|
||||
}
|
||||
|
||||
if (FLAG_code_comments) {
|
||||
std::ostringstream str;
|
||||
str << "[ ";
|
||||
|
@ -90,6 +90,10 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
void BaselineAssembler::JumpTarget() {
|
||||
// NOP on ia32.
|
||||
}
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ jmp(target, distance);
|
||||
}
|
||||
|
@ -93,6 +93,10 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
|
||||
|
||||
void BaselineAssembler::Bind(Label* label) { __ bind(label); }
|
||||
|
||||
void BaselineAssembler::JumpTarget() {
|
||||
// NOP on x64.
|
||||
}
|
||||
|
||||
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
|
||||
__ jmp(target, distance);
|
||||
}
|
||||
|
@ -3462,6 +3462,125 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
|
||||
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Converts an interpreter frame into a baseline frame and continues execution
|
||||
// in baseline code (baseline code has to exist on the shared function info),
|
||||
// either at the start or the end of the current bytecode.
|
||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
||||
// Get bytecode array and bytecode offset from the stack frame.
|
||||
__ ldr(kInterpreterBytecodeArrayRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ ldr(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Get function from the frame.
|
||||
Register closure = r1;
|
||||
__ ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = r2;
|
||||
__ ldr(feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ ldr(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
Register scratch = r3;
|
||||
__ CompareObjectType(feedback_vector, scratch, scratch,
|
||||
FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
__ str(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Get the Code object from the shared function info.
|
||||
Register code_obj = r4;
|
||||
__ ldr(code_obj,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ ldr(code_obj,
|
||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ ldr(code_obj,
|
||||
FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
|
||||
closure = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
__ Push(kInterpreterAccumulatorRegister);
|
||||
ExternalReference get_baseline_pc_extref =
|
||||
next_bytecode
|
||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
||||
Register get_baseline_pc = r3;
|
||||
__ Move(get_baseline_pc, get_baseline_pc_extref);
|
||||
|
||||
// If the code deoptimizes during the implicit function entry stack interrupt
|
||||
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
|
||||
// not a valid bytecode offset.
|
||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||
// in TurboFan instead of here.
|
||||
Label valid_bytecode_offset, function_entry_bytecode;
|
||||
__ cmp(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ b(eq, &function_entry_bytecode);
|
||||
__ bind(&valid_bytecode_offset);
|
||||
|
||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
||||
if (next_bytecode) {
|
||||
Label not_jump_loop;
|
||||
Register bytecode = r1;
|
||||
__ ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ cmp(bytecode,
|
||||
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
||||
__ b(ne, ¬_jump_loop);
|
||||
__ Move(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ bind(¬_jump_loop);
|
||||
}
|
||||
|
||||
__ sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ PrepareCallCFunction(3, 0, r0);
|
||||
Register arg_reg_1 = r0;
|
||||
Register arg_reg_2 = r1;
|
||||
Register arg_reg_3 = r2;
|
||||
__ mov(arg_reg_1, code_obj);
|
||||
__ mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
|
||||
__ mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||
__ CallCFunction(get_baseline_pc, 3, 0);
|
||||
}
|
||||
__ add(code_obj, code_obj, Operand(Code::kHeaderSize - kHeapObjectTag));
|
||||
__ add(code_obj, code_obj, kReturnRegister0);
|
||||
__ Pop(kInterpreterAccumulatorRegister);
|
||||
|
||||
__ Jump(code_obj);
|
||||
__ Trap(); // Unreachable.
|
||||
|
||||
__ bind(&function_entry_bytecode);
|
||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||
// the first bytecode.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ Move(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ b(&valid_bytecode_offset);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::INTERNAL);
|
||||
|
@ -3969,6 +3969,127 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
|
||||
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Converts an interpreter frame into a baseline frame and continues execution
|
||||
// in baseline code (baseline code has to exist on the shared function info),
|
||||
// either at the start or the end of the current bytecode.
|
||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
||||
// Get bytecode array and bytecode offset from the stack frame.
|
||||
__ Ldr(kInterpreterBytecodeArrayRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
// Get function from the frame.
|
||||
Register closure = x1;
|
||||
__ Ldr(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = x2;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector,
|
||||
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
Register scratch = x3;
|
||||
__ CompareObjectType(feedback_vector, scratch, scratch,
|
||||
FEEDBACK_VECTOR_TYPE);
|
||||
__ Assert(eq, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
__ Str(feedback_vector,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Get the Code object from the shared function info.
|
||||
Register code_obj = x22;
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj,
|
||||
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj,
|
||||
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
|
||||
closure = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
__ Push(padreg, kInterpreterAccumulatorRegister);
|
||||
ExternalReference get_baseline_pc_extref =
|
||||
next_bytecode
|
||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
||||
Register get_baseline_pc = x3;
|
||||
__ Mov(get_baseline_pc, get_baseline_pc_extref);
|
||||
|
||||
// If the code deoptimizes during the implicit function entry stack interrupt
|
||||
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
|
||||
// not a valid bytecode offset.
|
||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||
// in TurboFan instead of here.
|
||||
Label valid_bytecode_offset, function_entry_bytecode;
|
||||
__ cmp(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ B(eq, &function_entry_bytecode);
|
||||
__ bind(&valid_bytecode_offset);
|
||||
|
||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
||||
if (next_bytecode) {
|
||||
Label not_jump_loop;
|
||||
Register bytecode = x1;
|
||||
__ Ldrb(bytecode, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
__ Cmp(bytecode,
|
||||
Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
||||
__ B(ne, ¬_jump_loop);
|
||||
__ Mov(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ bind(¬_jump_loop);
|
||||
}
|
||||
|
||||
__ Sub(kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
|
||||
(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
Register arg_reg_1 = x0;
|
||||
Register arg_reg_2 = x1;
|
||||
Register arg_reg_3 = x2;
|
||||
__ Mov(arg_reg_1, code_obj);
|
||||
__ Mov(arg_reg_2, kInterpreterBytecodeOffsetRegister);
|
||||
__ Mov(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||
__ CallCFunction(get_baseline_pc, 3, 0);
|
||||
}
|
||||
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
|
||||
__ Add(code_obj, code_obj, kReturnRegister0);
|
||||
__ Pop(kInterpreterAccumulatorRegister, padreg);
|
||||
|
||||
__ Jump(code_obj);
|
||||
__ Trap(); // Unreachable.
|
||||
|
||||
__ bind(&function_entry_bytecode);
|
||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||
// the first bytecode.
|
||||
__ Mov(kInterpreterBytecodeOffsetRegister,
|
||||
BytecodeArray::kHeaderSize - kHeapObjectTag);
|
||||
__ Mov(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ B(&valid_bytecode_offset);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::INTERNAL);
|
||||
|
@ -141,6 +141,8 @@ namespace internal {
|
||||
ASM(BaselineOutOfLinePrologue, BaselineOutOfLinePrologue) \
|
||||
ASM(BaselineOnStackReplacement, ContextOnly) \
|
||||
ASM(BaselineLeaveFrame, BaselineLeaveFrame) \
|
||||
ASM(BaselineEnterAtBytecode, Void) \
|
||||
ASM(BaselineEnterAtNextBytecode, Void) \
|
||||
\
|
||||
/* Code life-cycle */ \
|
||||
TFC(CompileLazy, JSTrampoline) \
|
||||
|
@ -4073,6 +4073,128 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
|
||||
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Converts an interpreter frame into a baseline frame and continues execution
|
||||
// in baseline code (baseline code has to exist on the shared function info),
|
||||
// either at the start or the end of the current bytecode.
|
||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
||||
// Get bytecode array and bytecode offset from the stack frame.
|
||||
__ mov(kInterpreterBytecodeArrayRegister,
|
||||
MemOperand(ebp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Get function from the frame.
|
||||
Register closure = esi;
|
||||
__ mov(closure, MemOperand(ebp, StandardFrameConstants::kFunctionOffset));
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = ecx;
|
||||
__ mov(feedback_vector,
|
||||
FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ mov(feedback_vector, FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
__ push(kInterpreterAccumulatorRegister);
|
||||
Register scratch = kInterpreterAccumulatorRegister;
|
||||
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, scratch);
|
||||
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
|
||||
scratch = no_reg;
|
||||
__ pop(kInterpreterAccumulatorRegister);
|
||||
}
|
||||
__ mov(MemOperand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
|
||||
feedback_vector);
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Get the Code object from the shared function info.
|
||||
Register code_obj = closure;
|
||||
__ mov(code_obj,
|
||||
FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
closure = no_reg;
|
||||
__ mov(code_obj,
|
||||
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
__ push(kInterpreterAccumulatorRegister);
|
||||
ExternalReference get_baseline_pc_extref =
|
||||
next_bytecode
|
||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
||||
Register get_baseline_pc = ecx;
|
||||
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
|
||||
|
||||
// If the code deoptimizes during the implicit function entry stack interrupt
|
||||
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
|
||||
// not a valid bytecode offset.
|
||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||
// in TurboFan instead of here.
|
||||
Label valid_bytecode_offset, function_entry_bytecode;
|
||||
__ cmp(kInterpreterBytecodeOffsetRegister,
|
||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ j(equal, &function_entry_bytecode);
|
||||
__ bind(&valid_bytecode_offset);
|
||||
|
||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
||||
if (next_bytecode) {
|
||||
Label not_jump_loop;
|
||||
__ push(kInterpreterBytecodeOffsetRegister);
|
||||
Register bytecode = kInterpreterBytecodeOffsetRegister;
|
||||
__ movzx_b(bytecode,
|
||||
Operand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
__ cmpb(bytecode,
|
||||
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
||||
__ j(not_equal, ¬_jump_loop, Label::kNear);
|
||||
__ LoadAddress(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ bind(¬_jump_loop);
|
||||
__ pop(kInterpreterBytecodeOffsetRegister);
|
||||
}
|
||||
|
||||
__ sub(kInterpreterBytecodeOffsetRegister,
|
||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ PrepareCallCFunction(3, eax);
|
||||
__ mov(Operand(esp, 0 * kSystemPointerSize), code_obj);
|
||||
__ mov(Operand(esp, 1 * kSystemPointerSize),
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
__ mov(Operand(esp, 2 * kSystemPointerSize),
|
||||
kInterpreterBytecodeArrayRegister);
|
||||
__ CallCFunction(get_baseline_pc, 3);
|
||||
}
|
||||
__ lea(code_obj,
|
||||
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
||||
__ pop(kInterpreterAccumulatorRegister);
|
||||
|
||||
__ jmp(code_obj);
|
||||
__ Trap(); // Unreachable.
|
||||
|
||||
__ bind(&function_entry_bytecode);
|
||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||
// the first bytecode.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadAddress(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ jmp(&valid_bytecode_offset);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::INTERNAL);
|
||||
|
@ -4343,6 +4343,123 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
|
||||
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Converts an interpreter frame into a baseline frame and continues execution
|
||||
// in baseline code (baseline code has to exist on the shared function info),
|
||||
// either at the start or the end of the current bytecode.
|
||||
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
|
||||
// Get bytecode array and bytecode offset from the stack frame.
|
||||
__ movq(kInterpreterBytecodeArrayRegister,
|
||||
MemOperand(rbp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ SmiUntag(
|
||||
kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
// Get function from the frame.
|
||||
Register closure = rdi;
|
||||
__ movq(closure, MemOperand(rbp, StandardFrameConstants::kFunctionOffset));
|
||||
|
||||
// Replace BytecodeOffset with the feedback vector.
|
||||
Register feedback_vector = rbx;
|
||||
__ LoadTaggedPointerField(
|
||||
feedback_vector, FieldOperand(closure, JSFunction::kFeedbackCellOffset));
|
||||
__ LoadTaggedPointerField(feedback_vector,
|
||||
FieldOperand(feedback_vector, Cell::kValueOffset));
|
||||
if (__ emit_debug_code()) {
|
||||
__ CmpObjectType(feedback_vector, FEEDBACK_VECTOR_TYPE, kScratchRegister);
|
||||
__ Assert(equal, AbortReason::kExpectedFeedbackVector);
|
||||
}
|
||||
__ movq(MemOperand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp),
|
||||
feedback_vector);
|
||||
feedback_vector = no_reg;
|
||||
|
||||
// Get the Code object from the shared function info.
|
||||
Register code_obj = rbx;
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj, FieldOperand(closure, JSFunction::kSharedFunctionInfoOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj,
|
||||
FieldOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
|
||||
__ LoadTaggedPointerField(
|
||||
code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
|
||||
closure = no_reg;
|
||||
|
||||
// Compute baseline pc for bytecode offset.
|
||||
__ pushq(kInterpreterAccumulatorRegister);
|
||||
ExternalReference get_baseline_pc_extref =
|
||||
next_bytecode
|
||||
? ExternalReference::baseline_end_pc_for_bytecode_offset()
|
||||
: ExternalReference::baseline_start_pc_for_bytecode_offset();
|
||||
Register get_baseline_pc = rax;
|
||||
__ LoadAddress(get_baseline_pc, get_baseline_pc_extref);
|
||||
|
||||
// If the code deoptimizes during the implicit function entry stack interrupt
|
||||
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
|
||||
// not a valid bytecode offset.
|
||||
// TODO(pthier): Investigate if it is feasible to handle this special case
|
||||
// in TurboFan instead of here.
|
||||
Label valid_bytecode_offset, function_entry_bytecode;
|
||||
__ cmpq(kInterpreterBytecodeOffsetRegister,
|
||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ j(equal, &function_entry_bytecode);
|
||||
__ bind(&valid_bytecode_offset);
|
||||
|
||||
// In the case we advance the BC, check if the current bytecode is JumpLoop.
|
||||
// If it is, re-execute it instead of continuing at the next bytecode.
|
||||
if (next_bytecode) {
|
||||
Label not_jump_loop;
|
||||
Register bytecode = rdi;
|
||||
__ movzxbq(bytecode,
|
||||
Operand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
__ cmpb(bytecode,
|
||||
Immediate(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
|
||||
__ j(not_equal, ¬_jump_loop, Label::kNear);
|
||||
__ LoadAddress(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ bind(¬_jump_loop);
|
||||
}
|
||||
|
||||
__ subq(kInterpreterBytecodeOffsetRegister,
|
||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
|
||||
{
|
||||
FrameScope scope(masm, StackFrame::INTERNAL);
|
||||
__ PrepareCallCFunction(3);
|
||||
__ movq(arg_reg_1, code_obj);
|
||||
__ movq(arg_reg_2, kInterpreterBytecodeOffsetRegister);
|
||||
__ movq(arg_reg_3, kInterpreterBytecodeArrayRegister);
|
||||
__ CallCFunction(get_baseline_pc, 3);
|
||||
}
|
||||
__ leaq(code_obj,
|
||||
FieldOperand(code_obj, kReturnRegister0, times_1, Code::kHeaderSize));
|
||||
__ popq(kInterpreterAccumulatorRegister);
|
||||
|
||||
__ jmp(code_obj);
|
||||
__ Trap(); // Unreachable.
|
||||
|
||||
__ bind(&function_entry_bytecode);
|
||||
// If the bytecode offset is kFunctionEntryOffset, get the start address of
|
||||
// the first bytecode.
|
||||
__ movq(kInterpreterBytecodeOffsetRegister,
|
||||
Immediate(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadAddress(get_baseline_pc,
|
||||
ExternalReference::baseline_start_pc_for_bytecode_offset());
|
||||
__ jmp(&valid_bytecode_offset);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, false);
|
||||
}
|
||||
|
||||
void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
|
||||
Generate_BaselineEntry(masm, true);
|
||||
}
|
||||
|
||||
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
|
||||
FrameScope scope(masm, StackFrame::MANUAL);
|
||||
__ EnterFrame(StackFrame::INTERNAL);
|
||||
|
@ -621,6 +621,35 @@ ExternalReference::address_of_enable_experimental_regexp_engine() {
|
||||
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
static uintptr_t BaselineStartPCForBytecodeOffset(Address raw_code_obj,
|
||||
int bytecode_offset,
|
||||
Address raw_bytecode_array) {
|
||||
Code code_obj = Code::cast(Object(raw_code_obj));
|
||||
BytecodeArray bytecode_array =
|
||||
BytecodeArray::cast(Object(raw_bytecode_array));
|
||||
return code_obj.GetBaselineStartPCForBytecodeOffset(bytecode_offset,
|
||||
bytecode_array);
|
||||
}
|
||||
|
||||
static uintptr_t BaselineEndPCForBytecodeOffset(Address raw_code_obj,
|
||||
int bytecode_offset,
|
||||
Address raw_bytecode_array) {
|
||||
Code code_obj = Code::cast(Object(raw_code_obj));
|
||||
BytecodeArray bytecode_array =
|
||||
BytecodeArray::cast(Object(raw_bytecode_array));
|
||||
return code_obj.GetBaselineEndPCForBytecodeOffset(bytecode_offset,
|
||||
bytecode_array);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
FUNCTION_REFERENCE(baseline_end_pc_for_bytecode_offset,
|
||||
BaselineEndPCForBytecodeOffset)
|
||||
FUNCTION_REFERENCE(baseline_start_pc_for_bytecode_offset,
|
||||
BaselineStartPCForBytecodeOffset)
|
||||
|
||||
ExternalReference ExternalReference::thread_in_wasm_flag_address_address(
|
||||
Isolate* isolate) {
|
||||
return ExternalReference(isolate->thread_in_wasm_flag_address_address());
|
||||
|
@ -124,6 +124,8 @@ class StatsCounter;
|
||||
V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
|
||||
V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
|
||||
V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
|
||||
V(baseline_start_pc_for_bytecode_offset, "BaselineStartPCForBytecodeOffset") \
|
||||
V(baseline_end_pc_for_bytecode_offset, "BaselineEndPCForBytecodeOffset") \
|
||||
V(bytecode_size_table_address, "Bytecodes::bytecode_size_table_address") \
|
||||
V(check_object_type, "check_object_type") \
|
||||
V(compute_integer_hash, "ComputeSeededHash") \
|
||||
|
@ -14,6 +14,8 @@ void Builtins_ContinueToJavaScriptBuiltinWithResult();
|
||||
void Builtins_ContinueToJavaScriptBuiltin();
|
||||
void construct_stub_create_deopt_addr();
|
||||
void construct_stub_invoke_deopt_addr();
|
||||
void Builtins_BaselineEnterAtBytecode();
|
||||
void Builtins_BaselineEnterAtNextBytecode();
|
||||
typedef void (*function_ptr)();
|
||||
}
|
||||
|
||||
@ -30,6 +32,8 @@ constexpr function_ptr builtins[] = {
|
||||
&Builtins_ContinueToJavaScriptBuiltin,
|
||||
&construct_stub_create_deopt_addr,
|
||||
&construct_stub_invoke_deopt_addr,
|
||||
&Builtins_BaselineEnterAtBytecode,
|
||||
&Builtins_BaselineEnterAtNextBytecode,
|
||||
};
|
||||
|
||||
bool Deoptimizer::IsValidReturnAddress(Address address) {
|
||||
|
@ -991,11 +991,25 @@ void Deoptimizer::DoComputeOutputFrames() {
|
||||
stack_guard->real_jslimit() - kStackLimitSlackForDeoptimizationInBytes);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
// Get the dispatch builtin for unoptimized frames.
|
||||
Builtins::Name DispatchBuiltinFor(bool is_baseline, bool advance_bc) {
|
||||
if (is_baseline) {
|
||||
return advance_bc ? Builtins::kBaselineEnterAtNextBytecode
|
||||
: Builtins::kBaselineEnterAtBytecode;
|
||||
} else {
|
||||
return advance_bc ? Builtins::kInterpreterEnterBytecodeAdvance
|
||||
: Builtins::kInterpreterEnterBytecodeDispatch;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
int frame_index,
|
||||
bool goto_catch_handler) {
|
||||
SharedFunctionInfo shared = translated_frame->raw_shared_info();
|
||||
|
||||
TranslatedFrame::iterator value_iterator = translated_frame->begin();
|
||||
const bool is_bottommost = (0 == frame_index);
|
||||
const bool is_topmost = (output_count_ - 1 == frame_index);
|
||||
@ -1020,15 +1034,10 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
const uint32_t output_frame_size = frame_info.frame_size_in_bytes();
|
||||
|
||||
TranslatedFrame::iterator function_iterator = value_iterator++;
|
||||
if (verbose_tracing_enabled()) {
|
||||
PrintF(trace_scope()->file(), " translating unoptimized frame ");
|
||||
std::unique_ptr<char[]> name = shared.DebugNameCStr();
|
||||
PrintF(trace_scope()->file(), "%s", name.get());
|
||||
PrintF(trace_scope()->file(),
|
||||
" => bytecode_offset=%d, variable_frame_size=%d, frame_size=%d%s\n",
|
||||
real_bytecode_offset, frame_info.frame_size_in_bytes_without_fixed(),
|
||||
output_frame_size, goto_catch_handler ? " (throw)" : "");
|
||||
}
|
||||
|
||||
BytecodeArray bytecode_array =
|
||||
shared.HasBreakInfo() ? shared.GetDebugInfo().DebugBytecodeArray()
|
||||
: shared.GetBytecodeArray(isolate());
|
||||
|
||||
// Allocate and store the output frame description.
|
||||
FrameDescription* output_frame = new (output_frame_size)
|
||||
@ -1039,6 +1048,34 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
CHECK_NULL(output_[frame_index]);
|
||||
output_[frame_index] = output_frame;
|
||||
|
||||
// Compute this frame's PC and state.
|
||||
// For interpreted frames, the PC will be a special builtin that
|
||||
// continues the bytecode dispatch. Note that non-topmost and lazy-style
|
||||
// bailout handlers also advance the bytecode offset before dispatch, hence
|
||||
// simulating what normal handlers do upon completion of the operation.
|
||||
// For baseline frames, the PC will be a builtin to convert the interpreter
|
||||
// frame to a baseline frame before continuing execution of baseline code.
|
||||
// We can't directly continue into baseline code, because of CFI.
|
||||
Builtins* builtins = isolate_->builtins();
|
||||
const bool advance_bc =
|
||||
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
|
||||
!goto_catch_handler;
|
||||
const bool is_baseline = shared.HasBaselineData();
|
||||
Code dispatch_builtin =
|
||||
builtins->builtin(DispatchBuiltinFor(is_baseline, advance_bc));
|
||||
|
||||
if (verbose_tracing_enabled()) {
|
||||
PrintF(trace_scope()->file(), " translating %s frame ",
|
||||
is_baseline ? "baseline" : "interpreted");
|
||||
std::unique_ptr<char[]> name = shared.DebugNameCStr();
|
||||
PrintF(trace_scope()->file(), "%s", name.get());
|
||||
PrintF(trace_scope()->file(), " => bytecode_offset=%d, ",
|
||||
real_bytecode_offset);
|
||||
PrintF(trace_scope()->file(), "variable_frame_size=%d, frame_size=%d%s\n",
|
||||
frame_info.frame_size_in_bytes_without_fixed(), output_frame_size,
|
||||
goto_catch_handler ? " (throw)" : "");
|
||||
}
|
||||
|
||||
// The top address of the frame is computed from the previous frame's top and
|
||||
// this frame's size.
|
||||
const intptr_t top_address =
|
||||
@ -1145,9 +1182,6 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
frame_writer.PushRawValue(argc, "actual argument count\n");
|
||||
|
||||
// Set the bytecode array pointer.
|
||||
Object bytecode_array = shared.HasBreakInfo()
|
||||
? shared.GetDebugInfo().DebugBytecodeArray()
|
||||
: shared.GetBytecodeArray(isolate());
|
||||
frame_writer.PushRawObject(bytecode_array, "bytecode array\n");
|
||||
|
||||
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
|
||||
@ -1237,26 +1271,16 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
|
||||
CHECK_EQ(translated_frame->end(), value_iterator);
|
||||
CHECK_EQ(0u, frame_writer.top_offset());
|
||||
|
||||
// Compute this frame's PC and state. The PC will be a special builtin that
|
||||
// continues the bytecode dispatch. Note that non-topmost and lazy-style
|
||||
// bailout handlers also advance the bytecode offset before dispatch, hence
|
||||
// simulating what normal handlers do upon completion of the operation.
|
||||
Builtins* builtins = isolate_->builtins();
|
||||
Code dispatch_builtin =
|
||||
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
|
||||
!goto_catch_handler
|
||||
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
|
||||
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
const intptr_t pc =
|
||||
static_cast<intptr_t>(dispatch_builtin.InstructionStart());
|
||||
if (is_topmost) {
|
||||
// Only the pc of the topmost frame needs to be signed since it is
|
||||
// authenticated at the end of the DeoptimizationEntry builtin.
|
||||
const intptr_t top_most_pc = PointerAuthentication::SignAndCheckPC(
|
||||
static_cast<intptr_t>(dispatch_builtin.InstructionStart()),
|
||||
frame_writer.frame()->GetTop());
|
||||
pc, frame_writer.frame()->GetTop());
|
||||
output_frame->SetPc(top_most_pc);
|
||||
} else {
|
||||
output_frame->SetPc(
|
||||
static_cast<intptr_t>(dispatch_builtin.InstructionStart()));
|
||||
output_frame->SetPc(pc);
|
||||
}
|
||||
|
||||
// Update constant pool.
|
||||
|
@ -225,16 +225,13 @@ namespace {
|
||||
|
||||
bool IsInterpreterFramePc(Isolate* isolate, Address pc,
|
||||
StackFrame::State* state) {
|
||||
Code interpreter_entry_trampoline =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
|
||||
Code interpreter_bytecode_advance =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeAdvance);
|
||||
Code interpreter_bytecode_dispatch =
|
||||
isolate->builtins()->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
|
||||
if (interpreter_entry_trampoline.contains(isolate, pc) ||
|
||||
interpreter_bytecode_advance.contains(isolate, pc) ||
|
||||
interpreter_bytecode_dispatch.contains(isolate, pc)) {
|
||||
Builtins::Name builtin_index = InstructionStream::TryLookupCode(isolate, pc);
|
||||
if (builtin_index != Builtins::kNoBuiltinId &&
|
||||
(builtin_index == Builtins::kInterpreterEntryTrampoline ||
|
||||
builtin_index == Builtins::kInterpreterEnterBytecodeAdvance ||
|
||||
builtin_index == Builtins::kInterpreterEnterBytecodeDispatch ||
|
||||
builtin_index == Builtins::kBaselineEnterAtBytecode ||
|
||||
builtin_index == Builtins::kBaselineEnterAtNextBytecode)) {
|
||||
return true;
|
||||
} else if (FLAG_interpreted_frames_native_stack) {
|
||||
intptr_t marker = Memory<intptr_t>(
|
||||
@ -251,7 +248,7 @@ bool IsInterpreterFramePc(Isolate* isolate, Address pc,
|
||||
} else if (!isolate->heap()->InSpaceSlow(pc, CODE_SPACE)) {
|
||||
return false;
|
||||
}
|
||||
interpreter_entry_trampoline =
|
||||
Code interpreter_entry_trampoline =
|
||||
isolate->heap()->GcSafeFindCodeForInnerPointer(pc);
|
||||
return interpreter_entry_trampoline.is_interpreter_trampoline_builtin();
|
||||
} else {
|
||||
@ -595,7 +592,10 @@ StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
|
||||
switch (code_obj.kind()) {
|
||||
case CodeKind::BUILTIN:
|
||||
if (StackFrame::IsTypeMarker(marker)) break;
|
||||
if (code_obj.is_interpreter_trampoline_builtin()) {
|
||||
if (code_obj.is_interpreter_trampoline_builtin() ||
|
||||
// Frames for baseline entry trampolines on the stack are still
|
||||
// interpreted frames.
|
||||
code_obj.is_baseline_trampoline_builtin()) {
|
||||
return INTERPRETED;
|
||||
}
|
||||
if (code_obj.is_baseline_leave_frame_builtin()) {
|
||||
@ -1838,8 +1838,8 @@ int BaselineFrame::GetBytecodeOffset() const {
|
||||
}
|
||||
|
||||
intptr_t BaselineFrame::GetPCForBytecodeOffset(int bytecode_offset) const {
|
||||
return LookupCode().GetBaselinePCForBytecodeOffset(bytecode_offset,
|
||||
GetBytecodeArray());
|
||||
return LookupCode().GetBaselineStartPCForBytecodeOffset(bytecode_offset,
|
||||
GetBytecodeArray());
|
||||
}
|
||||
|
||||
void BaselineFrame::PatchContext(Context value) {
|
||||
|
@ -371,7 +371,7 @@ CodeKind Code::kind() const {
|
||||
int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc,
|
||||
BytecodeArray bytecodes) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
CHECK(!is_baseline_prologue_builtin());
|
||||
CHECK(!is_baseline_trampoline_builtin());
|
||||
if (is_baseline_leave_frame_builtin()) return kFunctionExitBytecodeOffset;
|
||||
CHECK_EQ(kind(), CodeKind::BASELINE);
|
||||
baseline::BytecodeOffsetIterator offset_iterator(
|
||||
@ -382,13 +382,33 @@ int Code::GetBytecodeOffsetForBaselinePC(Address baseline_pc,
|
||||
}
|
||||
|
||||
uintptr_t Code::GetBaselinePCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeToPCPosition position,
|
||||
BytecodeArray bytecodes) {
|
||||
DisallowGarbageCollection no_gc;
|
||||
CHECK_EQ(kind(), CodeKind::BASELINE);
|
||||
baseline::BytecodeOffsetIterator offset_iterator(
|
||||
ByteArray::cast(bytecode_offset_table()), bytecodes);
|
||||
offset_iterator.AdvanceToBytecodeOffset(bytecode_offset);
|
||||
return offset_iterator.current_pc_start_offset();
|
||||
uintptr_t pc = 0;
|
||||
if (position == kPcAtStartOfBytecode) {
|
||||
pc = offset_iterator.current_pc_start_offset();
|
||||
} else {
|
||||
DCHECK_EQ(position, kPcAtEndOfBytecode);
|
||||
pc = offset_iterator.current_pc_end_offset();
|
||||
}
|
||||
return pc;
|
||||
}
|
||||
|
||||
uintptr_t Code::GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeArray bytecodes) {
|
||||
return GetBaselinePCForBytecodeOffset(bytecode_offset, kPcAtStartOfBytecode,
|
||||
bytecodes);
|
||||
}
|
||||
|
||||
uintptr_t Code::GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeArray bytecodes) {
|
||||
return GetBaselinePCForBytecodeOffset(bytecode_offset, kPcAtEndOfBytecode,
|
||||
bytecodes);
|
||||
}
|
||||
|
||||
void Code::initialize_flags(CodeKind kind, bool is_turbofanned, int stack_slots,
|
||||
@ -414,12 +434,16 @@ inline bool Code::is_interpreter_trampoline_builtin() const {
|
||||
index == Builtins::kInterpreterEnterBytecodeDispatch);
|
||||
}
|
||||
|
||||
inline bool Code::is_baseline_leave_frame_builtin() const {
|
||||
return builtin_index() == Builtins::kBaselineLeaveFrame;
|
||||
inline bool Code::is_baseline_trampoline_builtin() const {
|
||||
const int index = builtin_index();
|
||||
return index != Builtins::kNoBuiltinId &&
|
||||
(index == Builtins::kBaselineOutOfLinePrologue ||
|
||||
index == Builtins::kBaselineEnterAtBytecode ||
|
||||
index == Builtins::kBaselineEnterAtNextBytecode);
|
||||
}
|
||||
|
||||
inline bool Code::is_baseline_prologue_builtin() const {
|
||||
return builtin_index() == Builtins::kBaselineOutOfLinePrologue;
|
||||
inline bool Code::is_baseline_leave_frame_builtin() const {
|
||||
return builtin_index() == Builtins::kBaselineLeaveFrame;
|
||||
}
|
||||
|
||||
inline bool Code::checks_optimization_marker() const {
|
||||
|
@ -277,7 +277,7 @@ class Code : public HeapObject {
|
||||
inline bool is_interpreter_trampoline_builtin() const;
|
||||
|
||||
// Testers for baseline builtins.
|
||||
inline bool is_baseline_prologue_builtin() const;
|
||||
inline bool is_baseline_trampoline_builtin() const;
|
||||
inline bool is_baseline_leave_frame_builtin() const;
|
||||
|
||||
// Tells whether the code checks the optimization marker in the function's
|
||||
@ -406,8 +406,12 @@ class Code : public HeapObject {
|
||||
static inline void CopyRelocInfoToByteArray(ByteArray dest,
|
||||
const CodeDesc& desc);
|
||||
|
||||
inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeArray bytecodes);
|
||||
inline uintptr_t GetBaselineStartPCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeArray bytecodes);
|
||||
|
||||
inline uintptr_t GetBaselineEndPCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeArray bytecodes);
|
||||
|
||||
inline int GetBytecodeOffsetForBaselinePC(Address baseline_pc,
|
||||
BytecodeArray bytecodes);
|
||||
|
||||
@ -554,6 +558,17 @@ class Code : public HeapObject {
|
||||
bool is_promise_rejection() const;
|
||||
bool is_exception_caught() const;
|
||||
|
||||
enum BytecodeToPCPosition {
|
||||
kPcAtStartOfBytecode,
|
||||
// End of bytecode equals the start of the next bytecode.
|
||||
// We need it when we deoptimize to the next bytecode (lazy deopt or deopt
|
||||
// of non-topmost frame).
|
||||
kPcAtEndOfBytecode
|
||||
};
|
||||
inline uintptr_t GetBaselinePCForBytecodeOffset(int bytecode_offset,
|
||||
BytecodeToPCPosition position,
|
||||
BytecodeArray bytecodes);
|
||||
|
||||
OBJECT_CONSTRUCTORS(Code, HeapObject);
|
||||
};
|
||||
|
||||
|
9
test/mjsunit/regress/regress-crbug-1191886.js
Normal file
9
test/mjsunit/regress/regress-crbug-1191886.js
Normal file
@ -0,0 +1,9 @@
|
||||
// Copyright 2021 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
let arr = [];
|
||||
for (var i = 0; i < 1000000; i++) {
|
||||
arr[i] = [];
|
||||
}
|
||||
assertEquals(1000000, i);
|
Loading…
Reference in New Issue
Block a user