[ignition] Fix wide suspends to also return
Wide suspends have a "wide" (or "extra-wide") bytecode at their offset, rather than the suspend itself, so they were failing the return check. Bug: chromium:805765 Change-Id: Iabfc2a2167d09eda2f6885d9100287aadcd8fee9 Reviewed-on: https://chromium-review.googlesource.com/887082 Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Commit-Queue: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#50923}
This commit is contained in:
parent
3249b162bd
commit
830e39abae
@ -26,6 +26,7 @@ namespace internal {
|
|||||||
"The function_data field should be a BytecodeArray on interpreter entry") \
|
"The function_data field should be a BytecodeArray on interpreter entry") \
|
||||||
V(kInputStringTooLong, "Input string too long") \
|
V(kInputStringTooLong, "Input string too long") \
|
||||||
V(kInvalidBytecode, "Invalid bytecode") \
|
V(kInvalidBytecode, "Invalid bytecode") \
|
||||||
|
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
|
||||||
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
|
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
|
||||||
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
|
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
|
||||||
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
|
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
|
||||||
|
@ -844,10 +844,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
// handlers do upon completion of the underlying operation.
|
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
// label if the bytecode (without prefix) is a return bytecode.
|
||||||
Register bytecode_offset, Register bytecode,
|
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||||
Register scratch1) {
|
Register bytecode_array,
|
||||||
|
Register bytecode_offset,
|
||||||
|
Register bytecode, Register scratch1,
|
||||||
|
Label* if_return) {
|
||||||
Register bytecode_size_table = scratch1;
|
Register bytecode_size_table = scratch1;
|
||||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||||
bytecode));
|
bytecode));
|
||||||
@ -857,11 +860,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||||
|
|
||||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||||
Label load_size, extra_wide;
|
Label process_bytecode, extra_wide;
|
||||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||||
__ cmp(bytecode, Operand(0x1));
|
__ cmp(bytecode, Operand(0x1));
|
||||||
__ b(hi, &load_size);
|
__ b(hi, &process_bytecode);
|
||||||
__ b(eq, &extra_wide);
|
__ b(eq, &extra_wide);
|
||||||
|
|
||||||
// Load the next bytecode and update table to the wide scaled table.
|
// Load the next bytecode and update table to the wide scaled table.
|
||||||
@ -869,7 +872,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
|
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
|
||||||
__ add(bytecode_size_table, bytecode_size_table,
|
__ add(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
__ jmp(&load_size);
|
__ jmp(&process_bytecode);
|
||||||
|
|
||||||
__ bind(&extra_wide);
|
__ bind(&extra_wide);
|
||||||
// Load the next bytecode and update table to the extra wide scaled table.
|
// Load the next bytecode and update table to the extra wide scaled table.
|
||||||
@ -878,8 +881,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ add(bytecode_size_table, bytecode_size_table,
|
__ add(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
|
|
||||||
// Load the size of the current bytecode.
|
__ bind(&process_bytecode);
|
||||||
__ bind(&load_size);
|
|
||||||
|
// Bailout to the return label if this is a return bytecode.
|
||||||
|
#define JUMP_IF_EQUAL(NAME) \
|
||||||
|
__ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||||
|
__ b(if_return, eq);
|
||||||
|
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||||
|
#undef JUMP_IF_EQUAL
|
||||||
|
|
||||||
|
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||||
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
|
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
|
||||||
__ add(bytecode_offset, bytecode_offset, scratch1);
|
__ add(bytecode_offset, bytecode_offset, scratch1);
|
||||||
}
|
}
|
||||||
@ -1025,19 +1036,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|||||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||||
|
|
||||||
// Check if we should return by testing for one of the returning bytecodes.
|
// Either return, or advance to the next bytecode and dispatch.
|
||||||
Label do_return;
|
Label do_return;
|
||||||
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
|
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeOffsetRegister));
|
kInterpreterBytecodeOffsetRegister));
|
||||||
#define JUMP_IF_EQUAL(NAME) \
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
__ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
kInterpreterBytecodeOffsetRegister, r1, r2,
|
||||||
__ b(&do_return, eq);
|
&do_return);
|
||||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
|
||||||
#undef JUMP_IF_EQUAL
|
|
||||||
|
|
||||||
// Advance to the next bytecode and dispatch.
|
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, r1, r2);
|
|
||||||
__ jmp(&do_dispatch);
|
__ jmp(&do_dispatch);
|
||||||
|
|
||||||
__ bind(&do_return);
|
__ bind(&do_return);
|
||||||
@ -1240,14 +1245,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
kInterpreterBytecodeOffsetRegister));
|
kInterpreterBytecodeOffsetRegister));
|
||||||
|
|
||||||
// Advance to the next bytecode.
|
// Advance to the next bytecode.
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
Label if_return;
|
||||||
kInterpreterBytecodeOffsetRegister, r1, r2);
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
|
kInterpreterBytecodeOffsetRegister, r1, r2,
|
||||||
|
&if_return);
|
||||||
|
|
||||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||||
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
|
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
|
||||||
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
|
|
||||||
Generate_InterpreterEnterBytecode(masm);
|
Generate_InterpreterEnterBytecode(masm);
|
||||||
|
|
||||||
|
// We should never take the if_return path.
|
||||||
|
__ bind(&if_return);
|
||||||
|
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||||
|
@ -936,10 +936,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
// handlers do upon completion of the underlying operation.
|
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
// label if the bytecode (without prefix) is a return bytecode.
|
||||||
Register bytecode_offset, Register bytecode,
|
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||||
Register scratch1) {
|
Register bytecode_array,
|
||||||
|
Register bytecode_offset,
|
||||||
|
Register bytecode, Register scratch1,
|
||||||
|
Label* if_return) {
|
||||||
Register bytecode_size_table = scratch1;
|
Register bytecode_size_table = scratch1;
|
||||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||||
bytecode));
|
bytecode));
|
||||||
@ -949,11 +952,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||||
|
|
||||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||||
Label load_size, extra_wide;
|
Label process_bytecode, extra_wide;
|
||||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||||
__ Cmp(bytecode, Operand(0x1));
|
__ Cmp(bytecode, Operand(0x1));
|
||||||
__ B(hi, &load_size);
|
__ B(hi, &process_bytecode);
|
||||||
__ B(eq, &extra_wide);
|
__ B(eq, &extra_wide);
|
||||||
|
|
||||||
// Load the next bytecode and update table to the wide scaled table.
|
// Load the next bytecode and update table to the wide scaled table.
|
||||||
@ -961,7 +964,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
|
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
|
||||||
__ Add(bytecode_size_table, bytecode_size_table,
|
__ Add(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
__ B(&load_size);
|
__ B(&process_bytecode);
|
||||||
|
|
||||||
__ Bind(&extra_wide);
|
__ Bind(&extra_wide);
|
||||||
// Load the next bytecode and update table to the extra wide scaled table.
|
// Load the next bytecode and update table to the extra wide scaled table.
|
||||||
@ -970,8 +973,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ Add(bytecode_size_table, bytecode_size_table,
|
__ Add(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
|
|
||||||
// Load the size of the current bytecode.
|
__ Bind(&process_bytecode);
|
||||||
__ Bind(&load_size);
|
|
||||||
|
// Bailout to the return label if this is a return bytecode.
|
||||||
|
#define JUMP_IF_EQUAL(NAME) \
|
||||||
|
__ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||||
|
__ B(if_return, eq);
|
||||||
|
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||||
|
#undef JUMP_IF_EQUAL
|
||||||
|
|
||||||
|
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||||
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
|
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
|
||||||
__ Add(bytecode_offset, bytecode_offset, scratch1);
|
__ Add(bytecode_offset, bytecode_offset, scratch1);
|
||||||
}
|
}
|
||||||
@ -1118,19 +1129,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|||||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||||
|
|
||||||
// Check if we should return by testing for one of the returning bytecodes.
|
// Either return, or advance to the next bytecode and dispatch.
|
||||||
Label do_return;
|
Label do_return;
|
||||||
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
|
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeOffsetRegister));
|
kInterpreterBytecodeOffsetRegister));
|
||||||
#define JUMP_IF_EQUAL(NAME) \
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
__ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
kInterpreterBytecodeOffsetRegister, x1, x2,
|
||||||
__ B(&do_return, eq);
|
&do_return);
|
||||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
|
||||||
#undef JUMP_IF_EQUAL
|
|
||||||
|
|
||||||
// Advance to the next bytecode and dispatch.
|
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, x1, x2);
|
|
||||||
__ B(&do_dispatch);
|
__ B(&do_dispatch);
|
||||||
|
|
||||||
__ bind(&do_return);
|
__ bind(&do_return);
|
||||||
@ -1359,14 +1364,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
kInterpreterBytecodeOffsetRegister));
|
kInterpreterBytecodeOffsetRegister));
|
||||||
|
|
||||||
// Advance to the next bytecode.
|
// Advance to the next bytecode.
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
Label if_return;
|
||||||
kInterpreterBytecodeOffsetRegister, x1, x2);
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
|
kInterpreterBytecodeOffsetRegister, x1, x2,
|
||||||
|
&if_return);
|
||||||
|
|
||||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||||
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
|
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
|
||||||
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
|
|
||||||
Generate_InterpreterEnterBytecode(masm);
|
Generate_InterpreterEnterBytecode(masm);
|
||||||
|
|
||||||
|
// We should never take the if_return path.
|
||||||
|
__ bind(&if_return);
|
||||||
|
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||||
|
@ -767,10 +767,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
// handlers do upon completion of the underlying operation.
|
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
// label if the bytecode (without prefix) is a return bytecode.
|
||||||
Register bytecode_offset, Register bytecode,
|
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||||
Register scratch1) {
|
Register bytecode_array,
|
||||||
|
Register bytecode_offset,
|
||||||
|
Register bytecode, Register scratch1,
|
||||||
|
Label* if_return) {
|
||||||
Register bytecode_size_table = scratch1;
|
Register bytecode_size_table = scratch1;
|
||||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||||
bytecode));
|
bytecode));
|
||||||
@ -780,11 +783,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
ExternalReference::bytecode_size_table_address(masm->isolate())));
|
ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||||
|
|
||||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||||
Label load_size, extra_wide;
|
Label process_bytecode, extra_wide;
|
||||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||||
__ cmpb(bytecode, Immediate(0x1));
|
__ cmpb(bytecode, Immediate(0x1));
|
||||||
__ j(above, &load_size, Label::kNear);
|
__ j(above, &process_bytecode, Label::kNear);
|
||||||
__ j(equal, &extra_wide, Label::kNear);
|
__ j(equal, &extra_wide, Label::kNear);
|
||||||
|
|
||||||
// Load the next bytecode and update table to the wide scaled table.
|
// Load the next bytecode and update table to the wide scaled table.
|
||||||
@ -792,7 +795,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
|
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
|
||||||
__ add(bytecode_size_table,
|
__ add(bytecode_size_table,
|
||||||
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
__ jmp(&load_size, Label::kNear);
|
__ jmp(&process_bytecode, Label::kNear);
|
||||||
|
|
||||||
__ bind(&extra_wide);
|
__ bind(&extra_wide);
|
||||||
// Load the next bytecode and update table to the extra wide scaled table.
|
// Load the next bytecode and update table to the extra wide scaled table.
|
||||||
@ -801,8 +804,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ add(bytecode_size_table,
|
__ add(bytecode_size_table,
|
||||||
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
|
|
||||||
// Load the size of the current bytecode.
|
__ bind(&process_bytecode);
|
||||||
__ bind(&load_size);
|
|
||||||
|
// Bailout to the return label if this is a return bytecode.
|
||||||
|
#define JUMP_IF_EQUAL(NAME) \
|
||||||
|
__ cmpb(bytecode, \
|
||||||
|
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||||
|
__ j(equal, if_return, Label::kNear);
|
||||||
|
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||||
|
#undef JUMP_IF_EQUAL
|
||||||
|
|
||||||
|
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||||
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
|
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -946,19 +958,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|||||||
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||||
|
|
||||||
// Check if we should return by testing for one of the returning bytecodes.
|
// Either return, or advance to the next bytecode and dispatch.
|
||||||
Label do_return;
|
Label do_return;
|
||||||
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
|
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||||
#define JUMP_IF_EQUAL(NAME) \
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
__ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
kInterpreterBytecodeOffsetRegister, ebx, edx,
|
||||||
__ j(equal, &do_return, Label::kNear);
|
&do_return);
|
||||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
|
||||||
#undef JUMP_IF_EQUAL
|
|
||||||
|
|
||||||
// Advance to the next bytecode and dispatch.
|
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, ebx, edx);
|
|
||||||
__ jmp(&do_dispatch);
|
__ jmp(&do_dispatch);
|
||||||
|
|
||||||
__ bind(&do_return);
|
__ bind(&do_return);
|
||||||
@ -1283,8 +1289,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||||
|
|
||||||
// Advance to the next bytecode.
|
// Advance to the next bytecode.
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
Label if_return;
|
||||||
kInterpreterBytecodeOffsetRegister, ebx, edx);
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
|
kInterpreterBytecodeOffsetRegister, ebx, edx,
|
||||||
|
&if_return);
|
||||||
|
|
||||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||||
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
|
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
|
||||||
@ -1292,6 +1300,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
|
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
|
||||||
|
|
||||||
Generate_InterpreterEnterBytecode(masm);
|
Generate_InterpreterEnterBytecode(masm);
|
||||||
|
|
||||||
|
// We should never take the if_return path.
|
||||||
|
__ bind(&if_return);
|
||||||
|
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||||
|
@ -821,10 +821,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
// handlers do upon completion of the underlying operation.
|
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
// label if the bytecode (without prefix) is a return bytecode.
|
||||||
Register bytecode_offset, Register bytecode,
|
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||||
Register scratch1, Register scratch2) {
|
Register bytecode_array,
|
||||||
|
Register bytecode_offset,
|
||||||
|
Register bytecode, Register scratch1,
|
||||||
|
Register scratch2, Label* if_return) {
|
||||||
Register bytecode_size_table = scratch1;
|
Register bytecode_size_table = scratch1;
|
||||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||||
bytecode));
|
bytecode));
|
||||||
@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||||
|
|
||||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||||
Label load_size, extra_wide;
|
Label process_bytecode, extra_wide;
|
||||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||||
__ Branch(&load_size, hi, bytecode, Operand(1));
|
__ Branch(&process_bytecode, hi, bytecode, Operand(1));
|
||||||
__ Branch(&extra_wide, eq, bytecode, Operand(1));
|
__ Branch(&extra_wide, eq, bytecode, Operand(1));
|
||||||
|
|
||||||
// Load the next bytecode and update table to the wide scaled table.
|
// Load the next bytecode and update table to the wide scaled table.
|
||||||
@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ lbu(bytecode, MemOperand(scratch2));
|
__ lbu(bytecode, MemOperand(scratch2));
|
||||||
__ Addu(bytecode_size_table, bytecode_size_table,
|
__ Addu(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
__ jmp(&load_size);
|
__ jmp(&process_bytecode);
|
||||||
|
|
||||||
__ bind(&extra_wide);
|
__ bind(&extra_wide);
|
||||||
// Load the next bytecode and update table to the extra wide scaled table.
|
// Load the next bytecode and update table to the extra wide scaled table.
|
||||||
@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ Addu(bytecode_size_table, bytecode_size_table,
|
__ Addu(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
|
|
||||||
// Load the size of the current bytecode.
|
__ bind(&process_bytecode);
|
||||||
__ bind(&load_size);
|
|
||||||
|
// Bailout to the return label if this is a return bytecode.
|
||||||
|
#define JUMP_IF_EQUAL(NAME) \
|
||||||
|
__ Branch(if_return, eq, bytecode, \
|
||||||
|
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
||||||
|
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||||
|
#undef JUMP_IF_EQUAL
|
||||||
|
|
||||||
|
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||||
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
|
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
|
||||||
__ lw(scratch2, MemOperand(scratch2));
|
__ lw(scratch2, MemOperand(scratch2));
|
||||||
__ Addu(bytecode_offset, bytecode_offset, scratch2);
|
__ Addu(bytecode_offset, bytecode_offset, scratch2);
|
||||||
@ -1007,21 +1018,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|||||||
__ lw(kInterpreterBytecodeOffsetRegister,
|
__ lw(kInterpreterBytecodeOffsetRegister,
|
||||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||||
|
// Either return, or advance to the next bytecode and dispatch.
|
||||||
// Check if we should return by testing for one of the returning bytecodes.
|
|
||||||
Label do_return;
|
Label do_return;
|
||||||
__ Addu(a1, kInterpreterBytecodeArrayRegister,
|
__ Addu(a1, kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeOffsetRegister);
|
kInterpreterBytecodeOffsetRegister);
|
||||||
__ lbu(a1, MemOperand(a1));
|
__ lbu(a1, MemOperand(a1));
|
||||||
#define JUMP_IF_EQUAL(NAME) \
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
__ Branch(&do_return, eq, a1, \
|
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||||
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
&do_return);
|
||||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
|
||||||
#undef JUMP_IF_EQUAL
|
|
||||||
|
|
||||||
// Advance to the next bytecode and dispatch.
|
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
|
||||||
__ jmp(&do_dispatch);
|
__ jmp(&do_dispatch);
|
||||||
|
|
||||||
__ bind(&do_return);
|
__ bind(&do_return);
|
||||||
@ -1244,14 +1248,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
__ lbu(a1, MemOperand(a1));
|
__ lbu(a1, MemOperand(a1));
|
||||||
|
|
||||||
// Advance to the next bytecode.
|
// Advance to the next bytecode.
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
Label if_return;
|
||||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
|
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||||
|
&if_return);
|
||||||
|
|
||||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||||
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
|
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
|
||||||
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
|
|
||||||
Generate_InterpreterEnterBytecode(masm);
|
Generate_InterpreterEnterBytecode(masm);
|
||||||
|
|
||||||
|
// We should never take the if_return path.
|
||||||
|
__ bind(&if_return);
|
||||||
|
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||||
|
@ -822,10 +822,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
// handlers do upon completion of the underlying operation.
|
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
// label if the bytecode (without prefix) is a return bytecode.
|
||||||
Register bytecode_offset, Register bytecode,
|
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||||
Register scratch1, Register scratch2) {
|
Register bytecode_array,
|
||||||
|
Register bytecode_offset,
|
||||||
|
Register bytecode, Register scratch1,
|
||||||
|
Register scratch2, Label* if_return) {
|
||||||
Register bytecode_size_table = scratch1;
|
Register bytecode_size_table = scratch1;
|
||||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||||
bytecode));
|
bytecode));
|
||||||
@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||||
|
|
||||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||||
Label load_size, extra_wide;
|
Label process_bytecode, extra_wide;
|
||||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||||
__ Branch(&load_size, hi, bytecode, Operand(1));
|
__ Branch(&process_bytecode, hi, bytecode, Operand(1));
|
||||||
__ Branch(&extra_wide, eq, bytecode, Operand(1));
|
__ Branch(&extra_wide, eq, bytecode, Operand(1));
|
||||||
|
|
||||||
// Load the next bytecode and update table to the wide scaled table.
|
// Load the next bytecode and update table to the wide scaled table.
|
||||||
@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ Lbu(bytecode, MemOperand(scratch2));
|
__ Lbu(bytecode, MemOperand(scratch2));
|
||||||
__ Daddu(bytecode_size_table, bytecode_size_table,
|
__ Daddu(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
__ jmp(&load_size);
|
__ jmp(&process_bytecode);
|
||||||
|
|
||||||
__ bind(&extra_wide);
|
__ bind(&extra_wide);
|
||||||
// Load the next bytecode and update table to the extra wide scaled table.
|
// Load the next bytecode and update table to the extra wide scaled table.
|
||||||
@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ Daddu(bytecode_size_table, bytecode_size_table,
|
__ Daddu(bytecode_size_table, bytecode_size_table,
|
||||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
|
|
||||||
// Load the size of the current bytecode.
|
__ bind(&process_bytecode);
|
||||||
__ bind(&load_size);
|
|
||||||
|
// Bailout to the return label if this is a return bytecode.
|
||||||
|
#define JUMP_IF_EQUAL(NAME) \
|
||||||
|
__ Branch(if_return, eq, bytecode, \
|
||||||
|
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
||||||
|
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||||
|
#undef JUMP_IF_EQUAL
|
||||||
|
|
||||||
|
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||||
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
|
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
|
||||||
__ Lw(scratch2, MemOperand(scratch2));
|
__ Lw(scratch2, MemOperand(scratch2));
|
||||||
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
|
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
|
||||||
@ -1008,20 +1019,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|||||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||||
|
|
||||||
// Check if we should return by testing for one of the returning bytecodes.
|
// Either return, or advance to the next bytecode and dispatch.
|
||||||
Label do_return;
|
Label do_return;
|
||||||
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
|
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeOffsetRegister);
|
kInterpreterBytecodeOffsetRegister);
|
||||||
__ Lbu(a1, MemOperand(a1));
|
__ Lbu(a1, MemOperand(a1));
|
||||||
#define JUMP_IF_EQUAL(NAME) \
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
__ Branch(&do_return, eq, a1, \
|
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||||
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
&do_return);
|
||||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
|
||||||
#undef JUMP_IF_EQUAL
|
|
||||||
|
|
||||||
// Advance to the next bytecode and dispatch.
|
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
|
||||||
__ jmp(&do_dispatch);
|
__ jmp(&do_dispatch);
|
||||||
|
|
||||||
__ bind(&do_return);
|
__ bind(&do_return);
|
||||||
@ -1244,14 +1249,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
__ Lbu(a1, MemOperand(a1));
|
__ Lbu(a1, MemOperand(a1));
|
||||||
|
|
||||||
// Advance to the next bytecode.
|
// Advance to the next bytecode.
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
Label if_return;
|
||||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
|
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||||
|
&if_return);
|
||||||
|
|
||||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||||
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
|
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
|
||||||
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||||
|
|
||||||
Generate_InterpreterEnterBytecode(masm);
|
Generate_InterpreterEnterBytecode(masm);
|
||||||
|
|
||||||
|
// We should never take the if_return path.
|
||||||
|
__ bind(&if_return);
|
||||||
|
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||||
|
@ -836,10 +836,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Advance the current bytecode offset. This simulates what all bytecode
|
// Advance the current bytecode offset. This simulates what all bytecode
|
||||||
// handlers do upon completion of the underlying operation.
|
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
// label if the bytecode (without prefix) is a return bytecode.
|
||||||
Register bytecode_offset, Register bytecode,
|
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||||
Register scratch1) {
|
Register bytecode_array,
|
||||||
|
Register bytecode_offset,
|
||||||
|
Register bytecode, Register scratch1,
|
||||||
|
Label* if_return) {
|
||||||
Register bytecode_size_table = scratch1;
|
Register bytecode_size_table = scratch1;
|
||||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||||
bytecode));
|
bytecode));
|
||||||
@ -848,11 +851,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
ExternalReference::bytecode_size_table_address(masm->isolate()));
|
ExternalReference::bytecode_size_table_address(masm->isolate()));
|
||||||
|
|
||||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||||
Label load_size, extra_wide;
|
Label process_bytecode, extra_wide;
|
||||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||||
__ cmpb(bytecode, Immediate(0x1));
|
__ cmpb(bytecode, Immediate(0x1));
|
||||||
__ j(above, &load_size, Label::kNear);
|
__ j(above, &process_bytecode, Label::kNear);
|
||||||
__ j(equal, &extra_wide, Label::kNear);
|
__ j(equal, &extra_wide, Label::kNear);
|
||||||
|
|
||||||
// Load the next bytecode and update table to the wide scaled table.
|
// Load the next bytecode and update table to the wide scaled table.
|
||||||
@ -860,7 +863,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
|
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
|
||||||
__ addp(bytecode_size_table,
|
__ addp(bytecode_size_table,
|
||||||
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
__ jmp(&load_size, Label::kNear);
|
__ jmp(&process_bytecode, Label::kNear);
|
||||||
|
|
||||||
__ bind(&extra_wide);
|
__ bind(&extra_wide);
|
||||||
// Load the next bytecode and update table to the extra wide scaled table.
|
// Load the next bytecode and update table to the extra wide scaled table.
|
||||||
@ -869,8 +872,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
|||||||
__ addp(bytecode_size_table,
|
__ addp(bytecode_size_table,
|
||||||
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||||
|
|
||||||
// Load the size of the current bytecode.
|
__ bind(&process_bytecode);
|
||||||
__ bind(&load_size);
|
|
||||||
|
// Bailout to the return label if this is a return bytecode.
|
||||||
|
#define JUMP_IF_EQUAL(NAME) \
|
||||||
|
__ cmpb(bytecode, \
|
||||||
|
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||||
|
__ j(equal, if_return, Label::kNear);
|
||||||
|
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||||
|
#undef JUMP_IF_EQUAL
|
||||||
|
|
||||||
|
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||||
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
|
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1018,19 +1030,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
|||||||
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
|
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
|
||||||
kInterpreterBytecodeOffsetRegister);
|
kInterpreterBytecodeOffsetRegister);
|
||||||
|
|
||||||
// Check if we should return by testing for one of the returning bytecodes.
|
// Either return, or advance to the next bytecode and dispatch.
|
||||||
Label do_return;
|
Label do_return;
|
||||||
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
|
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
|
||||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||||
#define JUMP_IF_EQUAL(NAME) \
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
__ cmpb(rbx, Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
kInterpreterBytecodeOffsetRegister, rbx, rcx,
|
||||||
__ j(equal, &do_return, Label::kNear);
|
&do_return);
|
||||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
|
||||||
#undef JUMP_IF_EQUAL
|
|
||||||
|
|
||||||
// Advance to the next bytecode and dispatch.
|
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
|
||||||
kInterpreterBytecodeOffsetRegister, rbx, rcx);
|
|
||||||
__ jmp(&do_dispatch);
|
__ jmp(&do_dispatch);
|
||||||
|
|
||||||
__ bind(&do_return);
|
__ bind(&do_return);
|
||||||
@ -1258,14 +1264,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
|||||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||||
|
|
||||||
// Advance to the next bytecode.
|
// Advance to the next bytecode.
|
||||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
Label if_return;
|
||||||
kInterpreterBytecodeOffsetRegister, rbx, rcx);
|
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||||
|
kInterpreterBytecodeOffsetRegister, rbx, rcx,
|
||||||
|
&if_return);
|
||||||
|
|
||||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||||
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
|
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
|
||||||
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
|
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
|
||||||
|
|
||||||
Generate_InterpreterEnterBytecode(masm);
|
Generate_InterpreterEnterBytecode(masm);
|
||||||
|
|
||||||
|
// We should never take the if_return path.
|
||||||
|
__ bind(&if_return);
|
||||||
|
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||||
}
|
}
|
||||||
|
|
||||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||||
|
13
test/mjsunit/regress/regress-crbug-805765.js
Normal file
13
test/mjsunit/regress/regress-crbug-805765.js
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
var code = "(function* gen() {"
|
||||||
|
for (var i = 0; i < 256; ++i) {
|
||||||
|
code += `var v_${i} = 0;`
|
||||||
|
}
|
||||||
|
code += `yield; })`
|
||||||
|
|
||||||
|
var gen = eval(code);
|
||||||
|
var g = gen();
|
||||||
|
g.next();
|
Loading…
Reference in New Issue
Block a user