[TurboProp] Avoid marking the output of a call live in its catch handler
The output of a call won't be live if an exception is thrown while the call is on the stack and we unwind to a catch handler. BUG=chromium:1138075,v8:9684 Change-Id: I95bf535bac388940869eb213e25565d64fe96df1 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2476317 Commit-Queue: Ross McIlroy <rmcilroy@chromium.org> Reviewed-by: Georg Neis <neis@chromium.org> Cr-Commit-Position: refs/heads/master@{#70562}
This commit is contained in:
parent
cb802efb04
commit
cdc8d9a5ec
@ -755,7 +755,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ CallCodeObject(reg);
|
||||
}
|
||||
@ -797,7 +797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ JumpCodeObject(reg);
|
||||
}
|
||||
@ -825,7 +825,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
CHECK(!instr->InputAt(0)->IsImmediate());
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ Jump(reg);
|
||||
unwinding_info_writer_.MarkBlockWillExit();
|
||||
|
@ -691,7 +691,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ CallCodeObject(reg);
|
||||
}
|
||||
@ -732,7 +732,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ JumpCodeObject(reg);
|
||||
}
|
||||
@ -762,7 +762,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
CHECK(!instr->InputAt(0)->IsImmediate());
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
UseScratchRegisterScope temps(tasm());
|
||||
temps.Exclude(x17);
|
||||
|
@ -257,17 +257,6 @@ class OutOfLineCode : public ZoneObject {
|
||||
OutOfLineCode* const next_;
|
||||
};
|
||||
|
||||
inline bool HasCallDescriptorFlag(Instruction* instr,
|
||||
CallDescriptor::Flag flag) {
|
||||
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
|
||||
#ifdef DEBUG
|
||||
static constexpr int kInstructionCodeFlagsMask =
|
||||
((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
|
||||
DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
|
||||
#endif
|
||||
return MiscField::decode(instr->opcode()) & flag;
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
@ -974,12 +974,12 @@ Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
|
||||
|
||||
void CodeGenerator::RecordCallPosition(Instruction* instr) {
|
||||
const bool needs_frame_state =
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kNeedsFrameState);
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kNeedsFrameState);
|
||||
RecordSafepoint(instr->reference_map(), needs_frame_state
|
||||
? Safepoint::kLazyDeopt
|
||||
: Safepoint::kNoLazyDeopt);
|
||||
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kHasExceptionHandler)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler)) {
|
||||
InstructionOperandConverter i(this, instr);
|
||||
RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
|
||||
DCHECK(instructions()->InstructionBlockAt(handler_rpo)->IsHandler());
|
||||
|
@ -695,10 +695,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ LoadCodeObjectEntry(reg, reg);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineCall(reg);
|
||||
} else {
|
||||
__ call(reg);
|
||||
@ -723,7 +723,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
|
||||
__ wasm_call(wasm_code, constant.rmode());
|
||||
} else {
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineCall(wasm_code, constant.rmode());
|
||||
} else {
|
||||
__ call(wasm_code, constant.rmode());
|
||||
@ -731,7 +731,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineCall(reg);
|
||||
} else {
|
||||
__ call(reg);
|
||||
@ -753,10 +753,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ LoadCodeObjectEntry(reg, reg);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineJump(reg);
|
||||
} else {
|
||||
__ jmp(reg);
|
||||
@ -773,7 +773,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
__ jmp(wasm_code, constant.rmode());
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineJump(reg);
|
||||
} else {
|
||||
__ jmp(reg);
|
||||
@ -787,9 +787,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
CHECK(!HasImmediateInput(instr, 0));
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineJump(reg);
|
||||
} else {
|
||||
__ jmp(reg);
|
||||
|
@ -63,104 +63,108 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
|
||||
|
||||
// Target-specific opcodes that specify which assembly sequence to emit.
|
||||
// Most opcodes specify a single instruction.
|
||||
#define COMMON_ARCH_OPCODE_LIST(V) \
|
||||
/* Tail call opcodes are grouped together to make IsTailCall fast */ \
|
||||
V(ArchTailCallCodeObjectFromJSFunction) \
|
||||
V(ArchTailCallCodeObject) \
|
||||
V(ArchTailCallAddress) \
|
||||
V(ArchTailCallWasm) \
|
||||
/* Update IsTailCall if further TailCall opcodes are added */ \
|
||||
\
|
||||
V(ArchCallCodeObject) \
|
||||
V(ArchCallJSFunction) \
|
||||
V(ArchPrepareCallCFunction) \
|
||||
V(ArchSaveCallerRegisters) \
|
||||
V(ArchRestoreCallerRegisters) \
|
||||
V(ArchCallCFunction) \
|
||||
V(ArchPrepareTailCall) \
|
||||
V(ArchCallWasmFunction) \
|
||||
V(ArchCallBuiltinPointer) \
|
||||
V(ArchJmp) \
|
||||
V(ArchBinarySearchSwitch) \
|
||||
V(ArchTableSwitch) \
|
||||
V(ArchNop) \
|
||||
V(ArchAbortCSAAssert) \
|
||||
V(ArchDebugBreak) \
|
||||
V(ArchComment) \
|
||||
V(ArchThrowTerminator) \
|
||||
V(ArchDeoptimize) \
|
||||
V(ArchRet) \
|
||||
V(ArchFramePointer) \
|
||||
V(ArchParentFramePointer) \
|
||||
V(ArchTruncateDoubleToI) \
|
||||
V(ArchStoreWithWriteBarrier) \
|
||||
V(ArchStackSlot) \
|
||||
V(ArchWordPoisonOnSpeculation) \
|
||||
V(ArchStackPointerGreaterThan) \
|
||||
V(ArchStackCheckOffset) \
|
||||
V(Word32AtomicLoadInt8) \
|
||||
V(Word32AtomicLoadUint8) \
|
||||
V(Word32AtomicLoadInt16) \
|
||||
V(Word32AtomicLoadUint16) \
|
||||
V(Word32AtomicLoadWord32) \
|
||||
V(Word32AtomicStoreWord8) \
|
||||
V(Word32AtomicStoreWord16) \
|
||||
V(Word32AtomicStoreWord32) \
|
||||
V(Word32AtomicExchangeInt8) \
|
||||
V(Word32AtomicExchangeUint8) \
|
||||
V(Word32AtomicExchangeInt16) \
|
||||
V(Word32AtomicExchangeUint16) \
|
||||
V(Word32AtomicExchangeWord32) \
|
||||
V(Word32AtomicCompareExchangeInt8) \
|
||||
V(Word32AtomicCompareExchangeUint8) \
|
||||
V(Word32AtomicCompareExchangeInt16) \
|
||||
V(Word32AtomicCompareExchangeUint16) \
|
||||
V(Word32AtomicCompareExchangeWord32) \
|
||||
V(Word32AtomicAddInt8) \
|
||||
V(Word32AtomicAddUint8) \
|
||||
V(Word32AtomicAddInt16) \
|
||||
V(Word32AtomicAddUint16) \
|
||||
V(Word32AtomicAddWord32) \
|
||||
V(Word32AtomicSubInt8) \
|
||||
V(Word32AtomicSubUint8) \
|
||||
V(Word32AtomicSubInt16) \
|
||||
V(Word32AtomicSubUint16) \
|
||||
V(Word32AtomicSubWord32) \
|
||||
V(Word32AtomicAndInt8) \
|
||||
V(Word32AtomicAndUint8) \
|
||||
V(Word32AtomicAndInt16) \
|
||||
V(Word32AtomicAndUint16) \
|
||||
V(Word32AtomicAndWord32) \
|
||||
V(Word32AtomicOrInt8) \
|
||||
V(Word32AtomicOrUint8) \
|
||||
V(Word32AtomicOrInt16) \
|
||||
V(Word32AtomicOrUint16) \
|
||||
V(Word32AtomicOrWord32) \
|
||||
V(Word32AtomicXorInt8) \
|
||||
V(Word32AtomicXorUint8) \
|
||||
V(Word32AtomicXorInt16) \
|
||||
V(Word32AtomicXorUint16) \
|
||||
V(Word32AtomicXorWord32) \
|
||||
V(Ieee754Float64Acos) \
|
||||
V(Ieee754Float64Acosh) \
|
||||
V(Ieee754Float64Asin) \
|
||||
V(Ieee754Float64Asinh) \
|
||||
V(Ieee754Float64Atan) \
|
||||
V(Ieee754Float64Atanh) \
|
||||
V(Ieee754Float64Atan2) \
|
||||
V(Ieee754Float64Cbrt) \
|
||||
V(Ieee754Float64Cos) \
|
||||
V(Ieee754Float64Cosh) \
|
||||
V(Ieee754Float64Exp) \
|
||||
V(Ieee754Float64Expm1) \
|
||||
V(Ieee754Float64Log) \
|
||||
V(Ieee754Float64Log1p) \
|
||||
V(Ieee754Float64Log10) \
|
||||
V(Ieee754Float64Log2) \
|
||||
V(Ieee754Float64Pow) \
|
||||
V(Ieee754Float64Sin) \
|
||||
V(Ieee754Float64Sinh) \
|
||||
V(Ieee754Float64Tan) \
|
||||
#define COMMON_ARCH_OPCODE_LIST(V) \
|
||||
/* Tail call opcodes are grouped together to make IsTailCall fast */ \
|
||||
/* and Arch call opcodes are grouped together to make */ \
|
||||
/* IsCallWithDescriptorFlags fast */ \
|
||||
V(ArchTailCallCodeObjectFromJSFunction) \
|
||||
V(ArchTailCallCodeObject) \
|
||||
V(ArchTailCallAddress) \
|
||||
V(ArchTailCallWasm) \
|
||||
/* Update IsTailCall if further TailCall opcodes are added */ \
|
||||
\
|
||||
V(ArchCallCodeObject) \
|
||||
V(ArchCallJSFunction) \
|
||||
V(ArchCallWasmFunction) \
|
||||
V(ArchCallBuiltinPointer) \
|
||||
/* Update IsCallWithDescriptorFlags if further Call opcodes are added */ \
|
||||
\
|
||||
V(ArchPrepareCallCFunction) \
|
||||
V(ArchSaveCallerRegisters) \
|
||||
V(ArchRestoreCallerRegisters) \
|
||||
V(ArchCallCFunction) \
|
||||
V(ArchPrepareTailCall) \
|
||||
V(ArchJmp) \
|
||||
V(ArchBinarySearchSwitch) \
|
||||
V(ArchTableSwitch) \
|
||||
V(ArchNop) \
|
||||
V(ArchAbortCSAAssert) \
|
||||
V(ArchDebugBreak) \
|
||||
V(ArchComment) \
|
||||
V(ArchThrowTerminator) \
|
||||
V(ArchDeoptimize) \
|
||||
V(ArchRet) \
|
||||
V(ArchFramePointer) \
|
||||
V(ArchParentFramePointer) \
|
||||
V(ArchTruncateDoubleToI) \
|
||||
V(ArchStoreWithWriteBarrier) \
|
||||
V(ArchStackSlot) \
|
||||
V(ArchWordPoisonOnSpeculation) \
|
||||
V(ArchStackPointerGreaterThan) \
|
||||
V(ArchStackCheckOffset) \
|
||||
V(Word32AtomicLoadInt8) \
|
||||
V(Word32AtomicLoadUint8) \
|
||||
V(Word32AtomicLoadInt16) \
|
||||
V(Word32AtomicLoadUint16) \
|
||||
V(Word32AtomicLoadWord32) \
|
||||
V(Word32AtomicStoreWord8) \
|
||||
V(Word32AtomicStoreWord16) \
|
||||
V(Word32AtomicStoreWord32) \
|
||||
V(Word32AtomicExchangeInt8) \
|
||||
V(Word32AtomicExchangeUint8) \
|
||||
V(Word32AtomicExchangeInt16) \
|
||||
V(Word32AtomicExchangeUint16) \
|
||||
V(Word32AtomicExchangeWord32) \
|
||||
V(Word32AtomicCompareExchangeInt8) \
|
||||
V(Word32AtomicCompareExchangeUint8) \
|
||||
V(Word32AtomicCompareExchangeInt16) \
|
||||
V(Word32AtomicCompareExchangeUint16) \
|
||||
V(Word32AtomicCompareExchangeWord32) \
|
||||
V(Word32AtomicAddInt8) \
|
||||
V(Word32AtomicAddUint8) \
|
||||
V(Word32AtomicAddInt16) \
|
||||
V(Word32AtomicAddUint16) \
|
||||
V(Word32AtomicAddWord32) \
|
||||
V(Word32AtomicSubInt8) \
|
||||
V(Word32AtomicSubUint8) \
|
||||
V(Word32AtomicSubInt16) \
|
||||
V(Word32AtomicSubUint16) \
|
||||
V(Word32AtomicSubWord32) \
|
||||
V(Word32AtomicAndInt8) \
|
||||
V(Word32AtomicAndUint8) \
|
||||
V(Word32AtomicAndInt16) \
|
||||
V(Word32AtomicAndUint16) \
|
||||
V(Word32AtomicAndWord32) \
|
||||
V(Word32AtomicOrInt8) \
|
||||
V(Word32AtomicOrUint8) \
|
||||
V(Word32AtomicOrInt16) \
|
||||
V(Word32AtomicOrUint16) \
|
||||
V(Word32AtomicOrWord32) \
|
||||
V(Word32AtomicXorInt8) \
|
||||
V(Word32AtomicXorUint8) \
|
||||
V(Word32AtomicXorInt16) \
|
||||
V(Word32AtomicXorUint16) \
|
||||
V(Word32AtomicXorWord32) \
|
||||
V(Ieee754Float64Acos) \
|
||||
V(Ieee754Float64Acosh) \
|
||||
V(Ieee754Float64Asin) \
|
||||
V(Ieee754Float64Asinh) \
|
||||
V(Ieee754Float64Atan) \
|
||||
V(Ieee754Float64Atanh) \
|
||||
V(Ieee754Float64Atan2) \
|
||||
V(Ieee754Float64Cbrt) \
|
||||
V(Ieee754Float64Cos) \
|
||||
V(Ieee754Float64Cosh) \
|
||||
V(Ieee754Float64Exp) \
|
||||
V(Ieee754Float64Expm1) \
|
||||
V(Ieee754Float64Log) \
|
||||
V(Ieee754Float64Log1p) \
|
||||
V(Ieee754Float64Log10) \
|
||||
V(Ieee754Float64Log2) \
|
||||
V(Ieee754Float64Pow) \
|
||||
V(Ieee754Float64Sin) \
|
||||
V(Ieee754Float64Sinh) \
|
||||
V(Ieee754Float64Tan) \
|
||||
V(Ieee754Float64Tanh)
|
||||
|
||||
#define ARCH_OPCODE_LIST(V) \
|
||||
|
@ -2731,6 +2731,7 @@ constexpr InstructionCode EncodeCallDescriptorFlags(
|
||||
// Note: Not all bits of `flags` are preserved.
|
||||
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode ==
|
||||
MiscField::kSize);
|
||||
DCHECK(Instruction::IsCallWithDescriptorFlags(opcode));
|
||||
return opcode | MiscField::encode(flags & MiscField::kMax);
|
||||
}
|
||||
|
||||
|
@ -927,6 +927,23 @@ class V8_EXPORT_PRIVATE Instruction final {
|
||||
return arch_opcode() == ArchOpcode::kArchThrowTerminator;
|
||||
}
|
||||
|
||||
static constexpr bool IsCallWithDescriptorFlags(InstructionCode arch_opcode) {
|
||||
return arch_opcode <= ArchOpcode::kArchCallBuiltinPointer;
|
||||
}
|
||||
bool IsCallWithDescriptorFlags() const {
|
||||
return IsCallWithDescriptorFlags(arch_opcode());
|
||||
}
|
||||
bool HasCallDescriptorFlag(CallDescriptor::Flag flag) const {
|
||||
DCHECK(IsCallWithDescriptorFlags());
|
||||
STATIC_ASSERT(CallDescriptor::kFlagsBitsEncodedInInstructionCode == 10);
|
||||
#ifdef DEBUG
|
||||
static constexpr int kInstructionCodeFlagsMask =
|
||||
((1 << CallDescriptor::kFlagsBitsEncodedInInstructionCode) - 1);
|
||||
DCHECK_EQ(static_cast<int>(flag) & kInstructionCodeFlagsMask, flag);
|
||||
#endif
|
||||
return MiscField::decode(opcode()) & flag;
|
||||
}
|
||||
|
||||
enum GapPosition {
|
||||
START,
|
||||
END,
|
||||
|
@ -168,8 +168,7 @@ const InstructionBlock* MidTierRegisterAllocationData::GetBlock(
|
||||
}
|
||||
|
||||
const BitVector* MidTierRegisterAllocationData::GetBlocksDominatedBy(
|
||||
int instr_index) {
|
||||
const InstructionBlock* block = GetBlock(instr_index);
|
||||
const InstructionBlock* block) {
|
||||
return block_state(block->rpo_number()).dominated_blocks();
|
||||
}
|
||||
|
||||
@ -283,10 +282,12 @@ class VirtualRegisterData final {
|
||||
// Define VirtualRegisterData with the type of output that produces this
|
||||
// virtual register.
|
||||
void DefineAsUnallocatedOperand(int virtual_register, int instr_index,
|
||||
bool is_deferred_block);
|
||||
bool is_deferred_block,
|
||||
bool is_exceptional_call_output);
|
||||
void DefineAsFixedSpillOperand(AllocatedOperand* operand,
|
||||
int virtual_register, int instr_index,
|
||||
bool is_deferred_block);
|
||||
bool is_deferred_block,
|
||||
bool is_exceptional_call_output);
|
||||
void DefineAsConstantOperand(ConstantOperand* operand, int instr_index,
|
||||
bool is_deferred_block);
|
||||
void DefineAsPhi(int virtual_register, int instr_index,
|
||||
@ -364,6 +365,9 @@ class VirtualRegisterData final {
|
||||
bool is_defined_in_deferred_block() const {
|
||||
return is_defined_in_deferred_block_;
|
||||
}
|
||||
bool is_exceptional_call_output() const {
|
||||
return is_exceptional_call_output_;
|
||||
}
|
||||
|
||||
struct DeferredSpillSlotOutput {
|
||||
public:
|
||||
@ -381,9 +385,11 @@ class VirtualRegisterData final {
|
||||
class SpillRange : public ZoneObject {
|
||||
public:
|
||||
// Defines a spill range for an output operand.
|
||||
SpillRange(int definition_instr_index, MidTierRegisterAllocationData* data)
|
||||
SpillRange(int definition_instr_index,
|
||||
const InstructionBlock* definition_block,
|
||||
MidTierRegisterAllocationData* data)
|
||||
: live_range_(definition_instr_index, definition_instr_index),
|
||||
live_blocks_(data->GetBlocksDominatedBy(definition_instr_index)),
|
||||
live_blocks_(data->GetBlocksDominatedBy(definition_block)),
|
||||
deferred_spill_outputs_(nullptr) {}
|
||||
|
||||
// Defines a spill range for a Phi variable.
|
||||
@ -391,8 +397,7 @@ class VirtualRegisterData final {
|
||||
MidTierRegisterAllocationData* data)
|
||||
: live_range_(phi_block->first_instruction_index(),
|
||||
phi_block->first_instruction_index()),
|
||||
live_blocks_(
|
||||
data->GetBlocksDominatedBy(phi_block->first_instruction_index())),
|
||||
live_blocks_(data->GetBlocksDominatedBy(phi_block)),
|
||||
deferred_spill_outputs_(nullptr) {
|
||||
// For phis, add the gap move instructions in the predecssor blocks to
|
||||
// the live range.
|
||||
@ -469,7 +474,8 @@ class VirtualRegisterData final {
|
||||
private:
|
||||
void Initialize(int virtual_register, InstructionOperand* spill_operand,
|
||||
int instr_index, bool is_phi, bool is_constant,
|
||||
bool is_defined_in_deferred_block);
|
||||
bool is_defined_in_deferred_block,
|
||||
bool is_exceptional_call_output);
|
||||
|
||||
void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
|
||||
void AddPendingSpillOperand(PendingOperand* pending_operand);
|
||||
@ -485,6 +491,7 @@ class VirtualRegisterData final {
|
||||
bool is_constant_ : 1;
|
||||
bool is_defined_in_deferred_block_ : 1;
|
||||
bool needs_spill_at_output_ : 1;
|
||||
bool is_exceptional_call_output_ : 1;
|
||||
};
|
||||
|
||||
VirtualRegisterData& MidTierRegisterAllocationData::VirtualRegisterDataFor(
|
||||
@ -498,7 +505,8 @@ void VirtualRegisterData::Initialize(int virtual_register,
|
||||
InstructionOperand* spill_operand,
|
||||
int instr_index, bool is_phi,
|
||||
bool is_constant,
|
||||
bool is_defined_in_deferred_block) {
|
||||
bool is_defined_in_deferred_block,
|
||||
bool is_exceptional_call_output) {
|
||||
vreg_ = virtual_register;
|
||||
spill_operand_ = spill_operand;
|
||||
spill_range_ = nullptr;
|
||||
@ -507,34 +515,34 @@ void VirtualRegisterData::Initialize(int virtual_register,
|
||||
is_constant_ = is_constant;
|
||||
is_defined_in_deferred_block_ = is_defined_in_deferred_block;
|
||||
needs_spill_at_output_ = !is_constant_ && spill_operand_ != nullptr;
|
||||
is_exceptional_call_output_ = is_exceptional_call_output;
|
||||
}
|
||||
|
||||
void VirtualRegisterData::DefineAsConstantOperand(ConstantOperand* operand,
|
||||
int instr_index,
|
||||
bool is_deferred_block) {
|
||||
Initialize(operand->virtual_register(), operand, instr_index, false, true,
|
||||
is_deferred_block);
|
||||
is_deferred_block, false);
|
||||
}
|
||||
|
||||
void VirtualRegisterData::DefineAsFixedSpillOperand(AllocatedOperand* operand,
|
||||
int virtual_register,
|
||||
int instr_index,
|
||||
bool is_deferred_block) {
|
||||
void VirtualRegisterData::DefineAsFixedSpillOperand(
|
||||
AllocatedOperand* operand, int virtual_register, int instr_index,
|
||||
bool is_deferred_block, bool is_exceptional_call_output) {
|
||||
Initialize(virtual_register, operand, instr_index, false, false,
|
||||
is_deferred_block);
|
||||
is_deferred_block, is_exceptional_call_output);
|
||||
}
|
||||
|
||||
void VirtualRegisterData::DefineAsUnallocatedOperand(int virtual_register,
|
||||
int instr_index,
|
||||
bool is_deferred_block) {
|
||||
void VirtualRegisterData::DefineAsUnallocatedOperand(
|
||||
int virtual_register, int instr_index, bool is_deferred_block,
|
||||
bool is_exceptional_call_output) {
|
||||
Initialize(virtual_register, nullptr, instr_index, false, false,
|
||||
is_deferred_block);
|
||||
is_deferred_block, is_exceptional_call_output);
|
||||
}
|
||||
|
||||
void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index,
|
||||
bool is_deferred_block) {
|
||||
Initialize(virtual_register, nullptr, instr_index, true, false,
|
||||
is_deferred_block);
|
||||
is_deferred_block, false);
|
||||
}
|
||||
|
||||
void VirtualRegisterData::EnsureSpillRange(
|
||||
@ -542,16 +550,27 @@ void VirtualRegisterData::EnsureSpillRange(
|
||||
DCHECK(!is_constant());
|
||||
if (HasSpillRange()) return;
|
||||
|
||||
const InstructionBlock* definition_block =
|
||||
data->GetBlock(output_instr_index_);
|
||||
if (is_phi()) {
|
||||
// Define a spill slot that is defined for the phi's range.
|
||||
const InstructionBlock* definition_block =
|
||||
data->code()->InstructionAt(output_instr_index_)->block();
|
||||
spill_range_ =
|
||||
data->allocation_zone()->New<SpillRange>(definition_block, data);
|
||||
} else {
|
||||
if (is_exceptional_call_output()) {
|
||||
// If this virtual register is output by a call which has an exception
|
||||
// catch handler, then the output will only be live in the IfSuccess
|
||||
// successor block, not the IfException side, so make the definition block
|
||||
// the IfSuccess successor block explicitly.
|
||||
DCHECK_EQ(output_instr_index_,
|
||||
definition_block->last_instruction_index() - 1);
|
||||
DCHECK_EQ(definition_block->SuccessorCount(), 2);
|
||||
DCHECK(data->GetBlock(definition_block->successors()[1])->IsHandler());
|
||||
definition_block = data->GetBlock(definition_block->successors()[0]);
|
||||
}
|
||||
// The spill slot will be defined after the instruction that outputs it.
|
||||
spill_range_ =
|
||||
data->allocation_zone()->New<SpillRange>(output_instr_index_ + 1, data);
|
||||
spill_range_ = data->allocation_zone()->New<SpillRange>(
|
||||
output_instr_index_ + 1, definition_block, data);
|
||||
}
|
||||
data->spilled_virtual_registers().Add(vreg());
|
||||
}
|
||||
@ -2575,6 +2594,7 @@ void MidTierOutputProcessor::InitializeBlockState(
|
||||
void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
|
||||
int block_start = block->first_instruction_index();
|
||||
bool is_deferred = block->IsDeferred();
|
||||
|
||||
for (int index = block->last_instruction_index(); index >= block_start;
|
||||
index--) {
|
||||
Instruction* instr = code()->InstructionAt(index);
|
||||
@ -2593,6 +2613,9 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
|
||||
UnallocatedOperand* unallocated_operand =
|
||||
UnallocatedOperand::cast(output);
|
||||
int virtual_register = unallocated_operand->virtual_register();
|
||||
bool is_exceptional_call_output =
|
||||
instr->IsCallWithDescriptorFlags() &&
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kHasExceptionHandler);
|
||||
if (unallocated_operand->HasFixedSlotPolicy()) {
|
||||
// If output has a fixed slot policy, allocate its spill operand now
|
||||
// so that the register allocator can use this knowledge.
|
||||
@ -2602,10 +2625,12 @@ void MidTierOutputProcessor::DefineOutputs(const InstructionBlock* block) {
|
||||
unallocated_operand->fixed_slot_index());
|
||||
VirtualRegisterDataFor(virtual_register)
|
||||
.DefineAsFixedSpillOperand(fixed_spill_operand, virtual_register,
|
||||
index, is_deferred);
|
||||
index, is_deferred,
|
||||
is_exceptional_call_output);
|
||||
} else {
|
||||
VirtualRegisterDataFor(virtual_register)
|
||||
.DefineAsUnallocatedOperand(virtual_register, index, is_deferred);
|
||||
.DefineAsUnallocatedOperand(virtual_register, index, is_deferred,
|
||||
is_exceptional_call_output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -57,8 +57,8 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
|
||||
const InstructionBlock* GetBlock(int instr_index);
|
||||
|
||||
// Returns a bitvector representing all the blocks that are dominated by the
|
||||
// output of the instruction at |instr_index|.
|
||||
const BitVector* GetBlocksDominatedBy(int instr_index);
|
||||
// output of the instruction in |block|.
|
||||
const BitVector* GetBlocksDominatedBy(const InstructionBlock* block);
|
||||
|
||||
// List of all instruction indexs that require a reference map.
|
||||
ZoneVector<int>& reference_map_instructions() {
|
||||
|
@ -711,7 +711,7 @@ void AdjustStackPointerForTailCall(Instruction* instr,
|
||||
int new_slot_above_sp,
|
||||
bool allow_shrinkage = true) {
|
||||
int stack_slot_delta;
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
|
||||
// For this special tail-call mode, the callee has the same arguments and
|
||||
// linkage as the caller, and arguments adapter frames must be preserved.
|
||||
// Thus we simply have reset the stack pointer register to its original
|
||||
@ -757,7 +757,7 @@ void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
|
||||
if (!pushes.empty() &&
|
||||
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
|
||||
first_unused_stack_slot)) {
|
||||
DCHECK(!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp));
|
||||
DCHECK(!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp));
|
||||
X64OperandConverter g(this, instr);
|
||||
for (auto move : pushes) {
|
||||
LocationOperand destination_location(
|
||||
@ -847,10 +847,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ LoadCodeObjectEntry(reg, reg);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineCall(reg);
|
||||
} else {
|
||||
__ call(reg);
|
||||
@ -875,7 +875,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
|
||||
__ near_call(wasm_code, constant.rmode());
|
||||
} else {
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineCall(wasm_code, constant.rmode());
|
||||
} else {
|
||||
__ Call(wasm_code, constant.rmode());
|
||||
@ -883,7 +883,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineCall(reg);
|
||||
} else {
|
||||
__ call(reg);
|
||||
@ -894,7 +894,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
break;
|
||||
}
|
||||
case kArchTailCallCodeObjectFromJSFunction:
|
||||
if (!HasCallDescriptorFlag(instr, CallDescriptor::kIsTailCallForTierUp)) {
|
||||
if (!instr->HasCallDescriptorFlag(CallDescriptor::kIsTailCallForTierUp)) {
|
||||
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
|
||||
i.TempRegister(0), i.TempRegister(1),
|
||||
i.TempRegister(2));
|
||||
@ -907,10 +907,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
__ LoadCodeObjectEntry(reg, reg);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineJump(reg);
|
||||
} else {
|
||||
__ jmp(reg);
|
||||
@ -933,7 +933,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
} else {
|
||||
Register reg = i.InputRegister(0);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineJump(reg);
|
||||
} else {
|
||||
__ jmp(reg);
|
||||
@ -948,9 +948,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
CHECK(!HasImmediateInput(instr, 0));
|
||||
Register reg = i.InputRegister(0);
|
||||
DCHECK_IMPLIES(
|
||||
HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
|
||||
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
|
||||
reg == kJavaScriptCallCodeStartRegister);
|
||||
if (HasCallDescriptorFlag(instr, CallDescriptor::kRetpoline)) {
|
||||
if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
|
||||
__ RetpolineJump(reg);
|
||||
} else {
|
||||
__ jmp(reg);
|
||||
|
27
test/mjsunit/regress/regress-1138075.js
Normal file
27
test/mjsunit/regress/regress-1138075.js
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax --turboprop --max-semi-space-size=1
|
||||
|
||||
function runNearStackLimit(f) {
|
||||
function t() {
|
||||
try {
|
||||
return t();
|
||||
} catch (e) {
|
||||
return f();
|
||||
}
|
||||
}
|
||||
%PrepareFunctionForOptimization(t);
|
||||
%OptimizeFunctionOnNextCall(t);
|
||||
return t();
|
||||
}
|
||||
|
||||
function foo(a) {}
|
||||
function bar(a, b) {}
|
||||
|
||||
for (let i = 0; i < 150; i++) {
|
||||
runNearStackLimit(() => {
|
||||
return foo(bar(3, 4) === false);
|
||||
});
|
||||
}
|
34
test/mjsunit/regress/regress-1138611.js
Normal file
34
test/mjsunit/regress/regress-1138611.js
Normal file
@ -0,0 +1,34 @@
|
||||
// Copyright 2020 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
// Flags: --allow-natives-syntax --turboprop --gc-interval=1000
|
||||
|
||||
function runNearStackLimit(f) {
|
||||
function t() {
|
||||
try {
|
||||
return t();
|
||||
} catch (e) {
|
||||
return f();
|
||||
}
|
||||
}
|
||||
%PrepareFunctionForOptimization(t);
|
||||
%OptimizeFunctionOnNextCall(t);
|
||||
return t();
|
||||
}
|
||||
|
||||
function foo() {
|
||||
runNearStackLimit(() => {});
|
||||
}
|
||||
|
||||
(function () {
|
||||
var a = 42;
|
||||
var b = 153;
|
||||
try {
|
||||
Object.defineProperty({});
|
||||
} catch (e) {}
|
||||
foo();
|
||||
foo();
|
||||
})();
|
||||
|
||||
runNearStackLimit(() => {});
|
Loading…
Reference in New Issue
Block a user