[ignition] Fix wide suspends to also return
Wide suspends have a "wide" (or "extra-wide") bytecode at their offset, rather than the suspend itself, so they were failing the return check. Bug: chromium:805765 Change-Id: Iabfc2a2167d09eda2f6885d9100287aadcd8fee9 Reviewed-on: https://chromium-review.googlesource.com/887082 Reviewed-by: Ross McIlroy <rmcilroy@chromium.org> Commit-Queue: Leszek Swirski <leszeks@chromium.org> Cr-Commit-Position: refs/heads/master@{#50923}
This commit is contained in:
parent
3249b162bd
commit
830e39abae
@ -26,6 +26,7 @@ namespace internal {
|
||||
"The function_data field should be a BytecodeArray on interpreter entry") \
|
||||
V(kInputStringTooLong, "Input string too long") \
|
||||
V(kInvalidBytecode, "Invalid bytecode") \
|
||||
V(kInvalidBytecodeAdvance, "Cannot advance current bytecode, ") \
|
||||
V(kInvalidElementsKindForInternalArrayOrInternalPackedArray, \
|
||||
"Invalid ElementsKind for InternalArray or InternalPackedArray") \
|
||||
V(kInvalidHandleScopeLevel, "Invalid HandleScope level") \
|
||||
|
@ -844,10 +844,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Advance the current bytecode offset. This simulates what all bytecode
|
||||
// handlers do upon completion of the underlying operation.
|
||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Register bytecode_offset, Register bytecode,
|
||||
Register scratch1) {
|
||||
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||
// label if the bytecode (without prefix) is a return bytecode.
|
||||
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
Register bytecode_array,
|
||||
Register bytecode_offset,
|
||||
Register bytecode, Register scratch1,
|
||||
Label* if_return) {
|
||||
Register bytecode_size_table = scratch1;
|
||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||
bytecode));
|
||||
@ -857,11 +860,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||
|
||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||
Label load_size, extra_wide;
|
||||
Label process_bytecode, extra_wide;
|
||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||
__ cmp(bytecode, Operand(0x1));
|
||||
__ b(hi, &load_size);
|
||||
__ b(hi, &process_bytecode);
|
||||
__ b(eq, &extra_wide);
|
||||
|
||||
// Load the next bytecode and update table to the wide scaled table.
|
||||
@ -869,7 +872,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
|
||||
__ add(bytecode_size_table, bytecode_size_table,
|
||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
__ jmp(&load_size);
|
||||
__ jmp(&process_bytecode);
|
||||
|
||||
__ bind(&extra_wide);
|
||||
// Load the next bytecode and update table to the extra wide scaled table.
|
||||
@ -878,8 +881,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ add(bytecode_size_table, bytecode_size_table,
|
||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
|
||||
// Load the size of the current bytecode.
|
||||
__ bind(&load_size);
|
||||
__ bind(&process_bytecode);
|
||||
|
||||
// Bailout to the return label if this is a return bytecode.
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ cmp(bytecode, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ b(if_return, eq);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||
__ ldr(scratch1, MemOperand(bytecode_size_table, bytecode, LSL, 2));
|
||||
__ add(bytecode_offset, bytecode_offset, scratch1);
|
||||
}
|
||||
@ -1025,19 +1036,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Check if we should return by testing for one of the returning bytecodes.
|
||||
// Either return, or advance to the next bytecode and dispatch.
|
||||
Label do_return;
|
||||
__ ldrb(r1, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ cmp(r1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ b(&do_return, eq);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Advance to the next bytecode and dispatch.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, r1, r2);
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, r1, r2,
|
||||
&do_return);
|
||||
__ jmp(&do_dispatch);
|
||||
|
||||
__ bind(&do_return);
|
||||
@ -1240,14 +1245,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
|
||||
// Advance to the next bytecode.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, r1, r2);
|
||||
Label if_return;
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, r1, r2,
|
||||
&if_return);
|
||||
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(r2, kInterpreterBytecodeOffsetRegister);
|
||||
__ str(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
@ -936,10 +936,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Advance the current bytecode offset. This simulates what all bytecode
|
||||
// handlers do upon completion of the underlying operation.
|
||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Register bytecode_offset, Register bytecode,
|
||||
Register scratch1) {
|
||||
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||
// label if the bytecode (without prefix) is a return bytecode.
|
||||
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
Register bytecode_array,
|
||||
Register bytecode_offset,
|
||||
Register bytecode, Register scratch1,
|
||||
Label* if_return) {
|
||||
Register bytecode_size_table = scratch1;
|
||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||
bytecode));
|
||||
@ -949,11 +952,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||
|
||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||
Label load_size, extra_wide;
|
||||
Label process_bytecode, extra_wide;
|
||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||
__ Cmp(bytecode, Operand(0x1));
|
||||
__ B(hi, &load_size);
|
||||
__ B(hi, &process_bytecode);
|
||||
__ B(eq, &extra_wide);
|
||||
|
||||
// Load the next bytecode and update table to the wide scaled table.
|
||||
@ -961,7 +964,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ Ldrb(bytecode, MemOperand(bytecode_array, bytecode_offset));
|
||||
__ Add(bytecode_size_table, bytecode_size_table,
|
||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
__ B(&load_size);
|
||||
__ B(&process_bytecode);
|
||||
|
||||
__ Bind(&extra_wide);
|
||||
// Load the next bytecode and update table to the extra wide scaled table.
|
||||
@ -970,8 +973,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ Add(bytecode_size_table, bytecode_size_table,
|
||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
|
||||
// Load the size of the current bytecode.
|
||||
__ Bind(&load_size);
|
||||
__ Bind(&process_bytecode);
|
||||
|
||||
// Bailout to the return label if this is a return bytecode.
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ B(if_return, eq);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||
__ Ldr(scratch1.W(), MemOperand(bytecode_size_table, bytecode, LSL, 2));
|
||||
__ Add(bytecode_offset, bytecode_offset, scratch1);
|
||||
}
|
||||
@ -1118,19 +1129,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Check if we should return by testing for one of the returning bytecodes.
|
||||
// Either return, or advance to the next bytecode and dispatch.
|
||||
Label do_return;
|
||||
__ Ldrb(x1, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ Cmp(x1, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ B(&do_return, eq);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Advance to the next bytecode and dispatch.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, x1, x2);
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, x1, x2,
|
||||
&do_return);
|
||||
__ B(&do_dispatch);
|
||||
|
||||
__ bind(&do_return);
|
||||
@ -1359,14 +1364,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
|
||||
// Advance to the next bytecode.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, x1, x2);
|
||||
Label if_return;
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, x1, x2,
|
||||
&if_return);
|
||||
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(x2, kInterpreterBytecodeOffsetRegister);
|
||||
__ Str(x2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
@ -767,10 +767,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Advance the current bytecode offset. This simulates what all bytecode
|
||||
// handlers do upon completion of the underlying operation.
|
||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Register bytecode_offset, Register bytecode,
|
||||
Register scratch1) {
|
||||
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||
// label if the bytecode (without prefix) is a return bytecode.
|
||||
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
Register bytecode_array,
|
||||
Register bytecode_offset,
|
||||
Register bytecode, Register scratch1,
|
||||
Label* if_return) {
|
||||
Register bytecode_size_table = scratch1;
|
||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||
bytecode));
|
||||
@ -780,11 +783,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||
|
||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||
Label load_size, extra_wide;
|
||||
Label process_bytecode, extra_wide;
|
||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||
__ cmpb(bytecode, Immediate(0x1));
|
||||
__ j(above, &load_size, Label::kNear);
|
||||
__ j(above, &process_bytecode, Label::kNear);
|
||||
__ j(equal, &extra_wide, Label::kNear);
|
||||
|
||||
// Load the next bytecode and update table to the wide scaled table.
|
||||
@ -792,7 +795,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ movzx_b(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
|
||||
__ add(bytecode_size_table,
|
||||
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
__ jmp(&load_size, Label::kNear);
|
||||
__ jmp(&process_bytecode, Label::kNear);
|
||||
|
||||
__ bind(&extra_wide);
|
||||
// Load the next bytecode and update table to the extra wide scaled table.
|
||||
@ -801,8 +804,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ add(bytecode_size_table,
|
||||
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
|
||||
// Load the size of the current bytecode.
|
||||
__ bind(&load_size);
|
||||
__ bind(&process_bytecode);
|
||||
|
||||
// Bailout to the return label if this is a return bytecode.
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ cmpb(bytecode, \
|
||||
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ j(equal, if_return, Label::kNear);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||
__ add(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
|
||||
}
|
||||
|
||||
@ -946,19 +958,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Check if we should return by testing for one of the returning bytecodes.
|
||||
// Either return, or advance to the next bytecode and dispatch.
|
||||
Label do_return;
|
||||
__ movzx_b(ebx, Operand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ cmpb(ebx, Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ j(equal, &do_return, Label::kNear);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Advance to the next bytecode and dispatch.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, ebx, edx);
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, ebx, edx,
|
||||
&do_return);
|
||||
__ jmp(&do_dispatch);
|
||||
|
||||
__ bind(&do_return);
|
||||
@ -1283,8 +1289,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
|
||||
// Advance to the next bytecode.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, ebx, edx);
|
||||
Label if_return;
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, ebx, edx,
|
||||
&if_return);
|
||||
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ mov(ebx, kInterpreterBytecodeOffsetRegister);
|
||||
@ -1292,6 +1300,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
__ mov(Operand(ebp, InterpreterFrameConstants::kBytecodeOffsetFromFp), ebx);
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
@ -821,10 +821,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Advance the current bytecode offset. This simulates what all bytecode
|
||||
// handlers do upon completion of the underlying operation.
|
||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Register bytecode_offset, Register bytecode,
|
||||
Register scratch1, Register scratch2) {
|
||||
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||
// label if the bytecode (without prefix) is a return bytecode.
|
||||
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
Register bytecode_array,
|
||||
Register bytecode_offset,
|
||||
Register bytecode, Register scratch1,
|
||||
Register scratch2, Label* if_return) {
|
||||
Register bytecode_size_table = scratch1;
|
||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||
bytecode));
|
||||
@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||
|
||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||
Label load_size, extra_wide;
|
||||
Label process_bytecode, extra_wide;
|
||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||
__ Branch(&load_size, hi, bytecode, Operand(1));
|
||||
__ Branch(&process_bytecode, hi, bytecode, Operand(1));
|
||||
__ Branch(&extra_wide, eq, bytecode, Operand(1));
|
||||
|
||||
// Load the next bytecode and update table to the wide scaled table.
|
||||
@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ lbu(bytecode, MemOperand(scratch2));
|
||||
__ Addu(bytecode_size_table, bytecode_size_table,
|
||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
__ jmp(&load_size);
|
||||
__ jmp(&process_bytecode);
|
||||
|
||||
__ bind(&extra_wide);
|
||||
// Load the next bytecode and update table to the extra wide scaled table.
|
||||
@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ Addu(bytecode_size_table, bytecode_size_table,
|
||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
|
||||
// Load the size of the current bytecode.
|
||||
__ bind(&load_size);
|
||||
__ bind(&process_bytecode);
|
||||
|
||||
// Bailout to the return label if this is a return bytecode.
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ Branch(if_return, eq, bytecode, \
|
||||
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||
__ Lsa(scratch2, bytecode_size_table, bytecode, 2);
|
||||
__ lw(scratch2, MemOperand(scratch2));
|
||||
__ Addu(bytecode_offset, bytecode_offset, scratch2);
|
||||
@ -1007,21 +1018,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ lw(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Check if we should return by testing for one of the returning bytecodes.
|
||||
// Either return, or advance to the next bytecode and dispatch.
|
||||
Label do_return;
|
||||
__ Addu(a1, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
__ lbu(a1, MemOperand(a1));
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ Branch(&do_return, eq, a1, \
|
||||
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Advance to the next bytecode and dispatch.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||
&do_return);
|
||||
__ jmp(&do_dispatch);
|
||||
|
||||
__ bind(&do_return);
|
||||
@ -1244,14 +1248,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
__ lbu(a1, MemOperand(a1));
|
||||
|
||||
// Advance to the next bytecode.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
||||
Label if_return;
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||
&if_return);
|
||||
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
|
||||
__ sw(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
@ -822,10 +822,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Advance the current bytecode offset. This simulates what all bytecode
|
||||
// handlers do upon completion of the underlying operation.
|
||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Register bytecode_offset, Register bytecode,
|
||||
Register scratch1, Register scratch2) {
|
||||
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||
// label if the bytecode (without prefix) is a return bytecode.
|
||||
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
Register bytecode_array,
|
||||
Register bytecode_offset,
|
||||
Register bytecode, Register scratch1,
|
||||
Register scratch2, Label* if_return) {
|
||||
Register bytecode_size_table = scratch1;
|
||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||
bytecode));
|
||||
@ -834,10 +837,10 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
|
||||
|
||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||
Label load_size, extra_wide;
|
||||
Label process_bytecode, extra_wide;
|
||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||
__ Branch(&load_size, hi, bytecode, Operand(1));
|
||||
__ Branch(&process_bytecode, hi, bytecode, Operand(1));
|
||||
__ Branch(&extra_wide, eq, bytecode, Operand(1));
|
||||
|
||||
// Load the next bytecode and update table to the wide scaled table.
|
||||
@ -846,7 +849,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ Lbu(bytecode, MemOperand(scratch2));
|
||||
__ Daddu(bytecode_size_table, bytecode_size_table,
|
||||
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
__ jmp(&load_size);
|
||||
__ jmp(&process_bytecode);
|
||||
|
||||
__ bind(&extra_wide);
|
||||
// Load the next bytecode and update table to the extra wide scaled table.
|
||||
@ -856,8 +859,16 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ Daddu(bytecode_size_table, bytecode_size_table,
|
||||
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
|
||||
// Load the size of the current bytecode.
|
||||
__ bind(&load_size);
|
||||
__ bind(&process_bytecode);
|
||||
|
||||
// Bailout to the return label if this is a return bytecode.
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ Branch(if_return, eq, bytecode, \
|
||||
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||
__ Dlsa(scratch2, bytecode_size_table, bytecode, 2);
|
||||
__ Lw(scratch2, MemOperand(scratch2));
|
||||
__ Daddu(bytecode_offset, bytecode_offset, scratch2);
|
||||
@ -1008,20 +1019,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Check if we should return by testing for one of the returning bytecodes.
|
||||
// Either return, or advance to the next bytecode and dispatch.
|
||||
Label do_return;
|
||||
__ Daddu(a1, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
__ Lbu(a1, MemOperand(a1));
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ Branch(&do_return, eq, a1, \
|
||||
Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Advance to the next bytecode and dispatch.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||
&do_return);
|
||||
__ jmp(&do_dispatch);
|
||||
|
||||
__ bind(&do_return);
|
||||
@ -1244,14 +1249,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
__ Lbu(a1, MemOperand(a1));
|
||||
|
||||
// Advance to the next bytecode.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3);
|
||||
Label if_return;
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, a1, a2, a3,
|
||||
&if_return);
|
||||
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
|
||||
__ Sd(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
@ -836,10 +836,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
|
||||
}
|
||||
|
||||
// Advance the current bytecode offset. This simulates what all bytecode
|
||||
// handlers do upon completion of the underlying operation.
|
||||
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
Register bytecode_offset, Register bytecode,
|
||||
Register scratch1) {
|
||||
// handlers do upon completion of the underlying operation. Will bail out to a
|
||||
// label if the bytecode (without prefix) is a return bytecode.
|
||||
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
|
||||
Register bytecode_array,
|
||||
Register bytecode_offset,
|
||||
Register bytecode, Register scratch1,
|
||||
Label* if_return) {
|
||||
Register bytecode_size_table = scratch1;
|
||||
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
|
||||
bytecode));
|
||||
@ -848,11 +851,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
ExternalReference::bytecode_size_table_address(masm->isolate()));
|
||||
|
||||
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
|
||||
Label load_size, extra_wide;
|
||||
Label process_bytecode, extra_wide;
|
||||
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
|
||||
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
|
||||
__ cmpb(bytecode, Immediate(0x1));
|
||||
__ j(above, &load_size, Label::kNear);
|
||||
__ j(above, &process_bytecode, Label::kNear);
|
||||
__ j(equal, &extra_wide, Label::kNear);
|
||||
|
||||
// Load the next bytecode and update table to the wide scaled table.
|
||||
@ -860,7 +863,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ movzxbp(bytecode, Operand(bytecode_array, bytecode_offset, times_1, 0));
|
||||
__ addp(bytecode_size_table,
|
||||
Immediate(kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
__ jmp(&load_size, Label::kNear);
|
||||
__ jmp(&process_bytecode, Label::kNear);
|
||||
|
||||
__ bind(&extra_wide);
|
||||
// Load the next bytecode and update table to the extra wide scaled table.
|
||||
@ -869,8 +872,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
|
||||
__ addp(bytecode_size_table,
|
||||
Immediate(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
|
||||
|
||||
// Load the size of the current bytecode.
|
||||
__ bind(&load_size);
|
||||
__ bind(&process_bytecode);
|
||||
|
||||
// Bailout to the return label if this is a return bytecode.
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ cmpb(bytecode, \
|
||||
Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ j(equal, if_return, Label::kNear);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Otherwise, load the size of the current bytecode and advance the offset.
|
||||
__ addl(bytecode_offset, Operand(bytecode_size_table, bytecode, times_4, 0));
|
||||
}
|
||||
|
||||
@ -1018,19 +1030,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ SmiToInteger32(kInterpreterBytecodeOffsetRegister,
|
||||
kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
// Check if we should return by testing for one of the returning bytecodes.
|
||||
// Either return, or advance to the next bytecode and dispatch.
|
||||
Label do_return;
|
||||
__ movzxbp(rbx, Operand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
#define JUMP_IF_EQUAL(NAME) \
|
||||
__ cmpb(rbx, Immediate(static_cast<int>(interpreter::Bytecode::k##NAME))); \
|
||||
__ j(equal, &do_return, Label::kNear);
|
||||
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
|
||||
#undef JUMP_IF_EQUAL
|
||||
|
||||
// Advance to the next bytecode and dispatch.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, rbx, rcx);
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, rbx, rcx,
|
||||
&do_return);
|
||||
__ jmp(&do_dispatch);
|
||||
|
||||
__ bind(&do_return);
|
||||
@ -1258,14 +1264,20 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister, times_1, 0));
|
||||
|
||||
// Advance to the next bytecode.
|
||||
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, rbx, rcx);
|
||||
Label if_return;
|
||||
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister, rbx, rcx,
|
||||
&if_return);
|
||||
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ Integer32ToSmi(rbx, kInterpreterBytecodeOffsetRegister);
|
||||
__ movp(Operand(rbp, InterpreterFrameConstants::kBytecodeOffsetFromFp), rbx);
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
}
|
||||
|
||||
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {
|
||||
|
13
test/mjsunit/regress/regress-crbug-805765.js
Normal file
13
test/mjsunit/regress/regress-crbug-805765.js
Normal file
@ -0,0 +1,13 @@
|
||||
// Copyright 2018 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
var code = "(function* gen() {"
|
||||
for (var i = 0; i < 256; ++i) {
|
||||
code += `var v_${i} = 0;`
|
||||
}
|
||||
code += `yield; })`
|
||||
|
||||
var gen = eval(code);
|
||||
var g = gen();
|
||||
g.next();
|
Loading…
Reference in New Issue
Block a user