PPC/s390: [ignition] Fix wide suspends to also return

Port 830e39abae

Original Commit Message:

    Wide suspends have a "wide" (or "extra-wide") bytecode at their offset,
    rather than the suspend itself, so they were failing the return check.

R=leszeks@chromium.org, joransiu@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N

Change-Id: I0c95b9fd34df7232ae07fd1e508f40cd139e9734
Reviewed-on: https://chromium-review.googlesource.com/894303
Reviewed-by: Joran Siu <joransiu@ca.ibm.com>
Commit-Queue: Junliang Yan <jyan@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#50971}
This commit is contained in:
Junliang Yan 2018-01-30 16:19:02 -05:00 committed by Commit Bot
parent ba30988cbc
commit 2de48de771
2 changed files with 66 additions and 40 deletions

View File

@ -844,10 +844,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register bytecode,
Register scratch1) {
// handlers do upon completion of the underlying operation. Will bail out to a
// label if the bytecode (without prefix) is a return bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@ -857,11 +860,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ cmpi(bytecode, Operand(0x1));
__ bgt(&load_size);
__ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@ -869,7 +872,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ lbzx(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ addi(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ b(&load_size);
__ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@ -879,7 +882,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
__ bind(&load_size);
__ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ cmpi(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
__ beq(if_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
// Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftImm(scratch2, bytecode, Operand(2));
__ lwzx(scratch2, MemOperand(bytecode_size_table, scratch2));
__ add(bytecode_offset, bytecode_offset, scratch2);
@ -1039,19 +1052,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return by testing for one of the returning bytecodes.
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
#define JUMP_IF_EQUAL(NAME) \
__ cmpi(r4, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
__ beq(&do_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5);
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5,
&do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@ -1274,8 +1281,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5);
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r4, r5,
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
@ -1283,6 +1292,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {

View File

@ -846,10 +846,13 @@ static void MaybeTailCallOptimizedCodeSlot(MacroAssembler* masm,
}
// Advance the current bytecode offset. This simulates what all bytecode
// handlers do upon completion of the underlying operation.
static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Register bytecode_offset, Register bytecode,
Register scratch1) {
// handlers do upon completion of the underlying operation. Will bail out to a
// label if the bytecode (without prefix) is a return bytecode.
static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
Register bytecode_array,
Register bytecode_offset,
Register bytecode, Register scratch1,
Label* if_return) {
Register bytecode_size_table = scratch1;
Register scratch2 = bytecode;
DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode_size_table,
@ -859,11 +862,11 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(ExternalReference::bytecode_size_table_address(masm->isolate())));
// Check if the bytecode is a Wide or ExtraWide prefix bytecode.
Label load_size, extra_wide;
Label process_bytecode, extra_wide;
STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
__ CmpP(bytecode, Operand(0x1));
__ bgt(&load_size);
__ bgt(&process_bytecode);
__ beq(&extra_wide);
// Load the next bytecode and update table to the wide scaled table.
@ -871,7 +874,7 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
__ LoadlB(bytecode, MemOperand(bytecode_array, bytecode_offset));
__ AddP(bytecode_size_table, bytecode_size_table,
Operand(kIntSize * interpreter::Bytecodes::kBytecodeCount));
__ b(&load_size);
__ b(&process_bytecode);
__ bind(&extra_wide);
// Load the next bytecode and update table to the extra wide scaled table.
@ -881,7 +884,17 @@ static void AdvanceBytecodeOffset(MacroAssembler* masm, Register bytecode_array,
Operand(2 * kIntSize * interpreter::Bytecodes::kBytecodeCount));
// Load the size of the current bytecode.
__ bind(&load_size);
__ bind(&process_bytecode);
// Bailout to the return label if this is a return bytecode.
#define JUMP_IF_EQUAL(NAME) \
__ CmpP(bytecode, \
Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
__ beq(if_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
// Otherwise, load the size of the current bytecode and advance the offset.
__ ShiftLeftP(scratch2, bytecode, Operand(2));
__ LoadlW(scratch2, MemOperand(bytecode_size_table, scratch2));
__ AddP(bytecode_offset, bytecode_offset, scratch2);
@ -1038,19 +1051,13 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
// Check if we should return by testing for one of the returning bytecodes.
// Either return, or advance to the next bytecode and dispatch.
Label do_return;
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister));
#define JUMP_IF_EQUAL(NAME) \
__ CmpP(r3, Operand(static_cast<int>(interpreter::Bytecode::k##NAME))); \
__ beq(&do_return);
RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
#undef JUMP_IF_EQUAL
// Advance to the next bytecode and dispatch.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r3, r4);
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r3, r4,
&do_return);
__ b(&do_dispatch);
__ bind(&do_return);
@ -1271,8 +1278,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
kInterpreterBytecodeOffsetRegister));
// Advance to the next bytecode.
AdvanceBytecodeOffset(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r3, r4);
Label if_return;
AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
kInterpreterBytecodeOffsetRegister, r3, r4,
&if_return);
// Convert new bytecode offset to a Smi and save in the stackframe.
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
@ -1280,6 +1289,10 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
Generate_InterpreterEnterBytecode(masm);
// We should never take the if_return path.
__ bind(&if_return);
__ Abort(AbortReason::kInvalidBytecodeAdvance);
}
void Builtins::Generate_InterpreterEnterBytecodeDispatch(MacroAssembler* masm) {