PPC/s390: [interpreter] Make FunctionEntry StackCheck bytecodes implicit
Port 9d3dc6f219
Original Commit Message:
FunctionEntry StackChecks is one of the two cases where we generate a
StackCheck bytecode. In these cases, we do stack check against the js
limit (not to be confused with the real js limit). Their purpose is to
be able to interrupt the running code.
We can omit the FunctionEntry StackCheck by embedding its code into
the InterpreterEntryTrampoline builtin. We save one bytecode per
interpreted function.
This change has rippling effects for optimized code, as well as the
deoptimizer.
R=solanes@chromium.org, joransiu@ca.ibm.com, jyan@ca.ibm.com, michael_dawson@ca.ibm.com
BUG=
LOG=N
Change-Id: If797a8acba7581c9c388ac09b5554c774c5993a2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2048124
Reviewed-by: Junliang Yan <jyan@ca.ibm.com>
Commit-Queue: Milad Farazmand <miladfar@ca.ibm.com>
Cr-Commit-Position: refs/heads/master@{#66210}
This commit is contained in:
parent
ffdece642c
commit
9987977efe
@ -59,10 +59,16 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
|
||||
|
||||
namespace {
|
||||
|
||||
void LoadRealStackLimit(MacroAssembler* masm, Register destination) {
|
||||
enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
|
||||
|
||||
void LoadStackLimit(MacroAssembler* masm, Register destination,
|
||||
StackLimitKind kind) {
|
||||
DCHECK(masm->root_array_available());
|
||||
Isolate* isolate = masm->isolate();
|
||||
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
|
||||
ExternalReference limit =
|
||||
kind == StackLimitKind::kRealStackLimit
|
||||
? ExternalReference::address_of_real_jslimit(isolate)
|
||||
: ExternalReference::address_of_jslimit(isolate);
|
||||
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
|
||||
|
||||
intptr_t offset =
|
||||
@ -76,7 +82,7 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
|
||||
// Check the stack for overflow. We are not trying to catch
|
||||
// interruptions (e.g. debug break and preemption) here, so the "real stack
|
||||
// limit" is checked.
|
||||
LoadRealStackLimit(masm, scratch);
|
||||
LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
|
||||
// Make scratch the space we have left. The stack might already be overflowed
|
||||
// here which will cause scratch to become negative.
|
||||
__ sub(scratch, sp, scratch);
|
||||
@ -425,7 +431,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// Check the stack for overflow. We are not trying to catch interruptions
|
||||
// (i.e. debug break and preemption) here, so check the "real stack limit".
|
||||
Label stack_overflow;
|
||||
LoadRealStackLimit(masm, scratch);
|
||||
LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
|
||||
__ cmpl(sp, scratch);
|
||||
__ blt(&stack_overflow);
|
||||
|
||||
@ -718,7 +724,7 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
|
||||
// interruptions (e.g. debug break and preemption) here, so the "real stack
|
||||
// limit" is checked.
|
||||
Label okay;
|
||||
LoadRealStackLimit(masm, scratch1);
|
||||
LoadStackLimit(masm, scratch1, StackLimitKind::kRealStackLimit);
|
||||
// Make scratch1 the space we have left. The stack might already be overflowed
|
||||
// here which will cause scratch1 to become negative.
|
||||
__ sub(scratch1, sp, scratch1);
|
||||
@ -1119,7 +1125,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Do a stack check to ensure we don't go over the limit.
|
||||
__ sub(r8, sp, r5);
|
||||
LoadRealStackLimit(masm, r0);
|
||||
LoadStackLimit(masm, r0, StackLimitKind::kRealStackLimit);
|
||||
__ cmpl(r8, r0);
|
||||
__ blt(&stack_overflow);
|
||||
|
||||
@ -1149,6 +1155,14 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ StorePX(r6, MemOperand(fp, r8));
|
||||
__ bind(&no_incoming_new_target_or_generator_register);
|
||||
|
||||
// Perform interrupt stack check.
|
||||
// TODO(solanes): Merge with the real stack limit check above.
|
||||
Label stack_check_interrupt, after_stack_check_interrupt;
|
||||
LoadStackLimit(masm, r6, StackLimitKind::kInterruptStackLimit);
|
||||
__ cmpl(sp, r6);
|
||||
__ blt(&stack_check_interrupt);
|
||||
__ bind(&after_stack_check_interrupt);
|
||||
|
||||
// The accumulator is already loaded with undefined.
|
||||
|
||||
// Load the dispatch table into a register and dispatch to the bytecode
|
||||
@ -1191,6 +1205,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
LeaveInterpreterFrame(masm, r5);
|
||||
__ blr();
|
||||
|
||||
__ bind(&stack_check_interrupt);
|
||||
// Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
|
||||
// for the call to the StackGuard.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset)));
|
||||
__ StoreP(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ CallRuntime(Runtime::kStackGuard);
|
||||
|
||||
// After the call, restore the bytecode array, bytecode offset and accumulator
|
||||
// registers again. Also, restore the bytecode offset in the stack to its
|
||||
// previous value.
|
||||
__ LoadP(kInterpreterBytecodeArrayRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
|
||||
__ SmiTag(r6, kInterpreterBytecodeOffsetRegister);
|
||||
__ StoreP(r6,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
__ jmp(&after_stack_check_interrupt);
|
||||
|
||||
__ bind(&optimized_code_slot_not_empty);
|
||||
Label maybe_has_optimized_code;
|
||||
// Check if optimized code marker is actually a weak reference to the
|
||||
@ -1395,6 +1434,16 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
Label okay;
|
||||
__ cmpi(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ bge(&okay);
|
||||
__ bkpt(0);
|
||||
__ bind(&okay);
|
||||
}
|
||||
|
||||
// Dispatch to the target bytecode.
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
@ -1414,6 +1463,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
Label enter_bytecode, function_entry_bytecode;
|
||||
__ cmpi(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ beq(&function_entry_bytecode);
|
||||
|
||||
// Load the current bytecode.
|
||||
__ lbzx(r4, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
@ -1424,6 +1479,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister, r4, r5,
|
||||
&if_return);
|
||||
|
||||
__ bind(&enter_bytecode);
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
|
||||
__ StoreP(r5,
|
||||
@ -1431,6 +1487,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
__ bind(&function_entry_bytecode);
|
||||
// If the code deoptimizes during the implicit function entry stack interrupt
|
||||
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
|
||||
// not a valid bytecode offset. Detect this case and advance to the first
|
||||
// actual bytecode.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ b(&enter_bytecode);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
@ -2084,7 +2149,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
{
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
LoadRealStackLimit(masm, scratch);
|
||||
LoadStackLimit(masm, scratch, StackLimitKind::kRealStackLimit);
|
||||
__ cmpl(sp, scratch);
|
||||
}
|
||||
__ bgt(&done); // Signed comparison.
|
||||
|
@ -59,10 +59,15 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
|
||||
|
||||
namespace {
|
||||
|
||||
MemOperand RealStackLimitAsMemOperand(MacroAssembler* masm) {
|
||||
enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
|
||||
|
||||
MemOperand StackLimitAsMemOperand(MacroAssembler* masm, StackLimitKind kind) {
|
||||
DCHECK(masm->root_array_available());
|
||||
Isolate* isolate = masm->isolate();
|
||||
ExternalReference limit = ExternalReference::address_of_real_jslimit(isolate);
|
||||
ExternalReference limit =
|
||||
kind == StackLimitKind::kRealStackLimit
|
||||
? ExternalReference::address_of_real_jslimit(isolate)
|
||||
: ExternalReference::address_of_jslimit(isolate);
|
||||
DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
|
||||
|
||||
intptr_t offset =
|
||||
@ -76,7 +81,8 @@ void Generate_StackOverflowCheck(MacroAssembler* masm, Register num_args,
|
||||
// Check the stack for overflow. We are not trying to catch
|
||||
// interruptions (e.g. debug break and preemption) here, so the "real stack
|
||||
// limit" is checked.
|
||||
__ LoadP(scratch, RealStackLimitAsMemOperand(masm));
|
||||
__ LoadP(scratch,
|
||||
StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
|
||||
// Make scratch the space we have left. The stack might already be overflowed
|
||||
// here which will cause scratch to become negative.
|
||||
__ SubP(scratch, sp, scratch);
|
||||
@ -417,7 +423,8 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
|
||||
// Check the stack for overflow. We are not trying to catch interruptions
|
||||
// (i.e. debug break and preemption) here, so check the "real stack limit".
|
||||
Label stack_overflow;
|
||||
__ LoadP(scratch, RealStackLimitAsMemOperand(masm));
|
||||
__ LoadP(scratch,
|
||||
StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
|
||||
__ CmpLogicalP(sp, scratch);
|
||||
__ blt(&stack_overflow);
|
||||
|
||||
@ -762,7 +769,8 @@ static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
|
||||
// interruptions (e.g. debug break and preemption) here, so the "real stack
|
||||
// limit" is checked.
|
||||
Label okay;
|
||||
__ LoadP(scratch1, RealStackLimitAsMemOperand(masm));
|
||||
__ LoadP(scratch1,
|
||||
StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
|
||||
// Make scratch1 the space we have left. The stack might already be overflowed
|
||||
// here which will cause scratch1 to become negative.
|
||||
__ SubP(scratch1, sp, scratch1);
|
||||
@ -1173,7 +1181,8 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
|
||||
// Do a stack check to ensure we don't go over the limit.
|
||||
__ SubP(r8, sp, r4);
|
||||
__ CmpLogicalP(r8, RealStackLimitAsMemOperand(masm));
|
||||
__ CmpLogicalP(
|
||||
r8, StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
|
||||
__ blt(&stack_overflow);
|
||||
|
||||
// If ok, push undefined as the initial value for all register file entries.
|
||||
@ -1203,6 +1212,15 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
__ StoreP(r5, MemOperand(fp, r8));
|
||||
__ bind(&no_incoming_new_target_or_generator_register);
|
||||
|
||||
// Perform interrupt stack check.
|
||||
// TODO(solanes): Merge with the real stack limit check above.
|
||||
Label stack_check_interrupt, after_stack_check_interrupt;
|
||||
__ LoadP(r5,
|
||||
StackLimitAsMemOperand(masm, StackLimitKind::kInterruptStackLimit));
|
||||
__ CmpLogicalP(sp, r5);
|
||||
__ blt(&stack_check_interrupt);
|
||||
__ bind(&after_stack_check_interrupt);
|
||||
|
||||
// The accumulator is already loaded with undefined.
|
||||
|
||||
// Load the dispatch table into a register and dispatch to the bytecode
|
||||
@ -1255,6 +1273,31 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
|
||||
// Fall through if there's no runnable optimized code.
|
||||
__ jmp(¬_optimized);
|
||||
|
||||
__ bind(&stack_check_interrupt);
|
||||
// Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
|
||||
// for the call to the StackGuard.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset)));
|
||||
__ StoreP(kInterpreterBytecodeOffsetRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ CallRuntime(Runtime::kStackGuard);
|
||||
|
||||
// After the call, restore the bytecode array, bytecode offset and accumulator
|
||||
// registers again. Also, restore the bytecode offset in the stack to its
|
||||
// previous value.
|
||||
__ LoadP(kInterpreterBytecodeArrayRegister,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
|
||||
|
||||
__ SmiTag(r5, kInterpreterBytecodeOffsetRegister);
|
||||
__ StoreP(r5,
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
|
||||
__ jmp(&after_stack_check_interrupt);
|
||||
|
||||
__ bind(&maybe_has_optimized_code);
|
||||
// Load code entry from the weak reference, if it was cleared, resume
|
||||
// execution of unoptimized code.
|
||||
@ -1448,6 +1491,15 @@ static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
if (FLAG_debug_code) {
|
||||
Label okay;
|
||||
__ CmpP(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ bge(&okay);
|
||||
__ bkpt(0);
|
||||
__ bind(&okay);
|
||||
}
|
||||
|
||||
// Dispatch to the target bytecode.
|
||||
UseScratchRegisterScope temps(masm);
|
||||
Register scratch = temps.Acquire();
|
||||
@ -1467,6 +1519,12 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
|
||||
__ SmiUntag(kInterpreterBytecodeOffsetRegister);
|
||||
|
||||
Label enter_bytecode, function_entry_bytecode;
|
||||
__ CmpP(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
|
||||
kFunctionEntryBytecodeOffset));
|
||||
__ beq(&function_entry_bytecode);
|
||||
|
||||
// Load the current bytecode.
|
||||
__ LoadlB(r3, MemOperand(kInterpreterBytecodeArrayRegister,
|
||||
kInterpreterBytecodeOffsetRegister));
|
||||
@ -1477,6 +1535,7 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
kInterpreterBytecodeOffsetRegister, r3, r4,
|
||||
&if_return);
|
||||
|
||||
__ bind(&enter_bytecode);
|
||||
// Convert new bytecode offset to a Smi and save in the stackframe.
|
||||
__ SmiTag(r4, kInterpreterBytecodeOffsetRegister);
|
||||
__ StoreP(r4,
|
||||
@ -1484,6 +1543,15 @@ void Builtins::Generate_InterpreterEnterBytecodeAdvance(MacroAssembler* masm) {
|
||||
|
||||
Generate_InterpreterEnterBytecode(masm);
|
||||
|
||||
__ bind(&function_entry_bytecode);
|
||||
// If the code deoptimizes during the implicit function entry stack interrupt
|
||||
// check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
|
||||
// not a valid bytecode offset. Detect this case and advance to the first
|
||||
// actual bytecode.
|
||||
__ mov(kInterpreterBytecodeOffsetRegister,
|
||||
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
|
||||
__ b(&enter_bytecode);
|
||||
|
||||
// We should never take the if_return path.
|
||||
__ bind(&if_return);
|
||||
__ Abort(AbortReason::kInvalidBytecodeAdvance);
|
||||
@ -2140,7 +2208,8 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
|
||||
// Check the stack for overflow. We are not trying to catch interruptions
|
||||
// (i.e. debug break and preemption) here, so check the "real stack
|
||||
// limit".
|
||||
__ CmpLogicalP(sp, RealStackLimitAsMemOperand(masm));
|
||||
__ CmpLogicalP(
|
||||
sp, StackLimitAsMemOperand(masm, StackLimitKind::kRealStackLimit));
|
||||
__ bgt(&done); // Signed comparison.
|
||||
// Restore the stack pointer.
|
||||
__ LoadRR(sp, scratch);
|
||||
|
Loading…
Reference in New Issue
Block a user