diff --git a/BUILD.gn b/BUILD.gn index dbe8592086..93c899c27f 100644 --- a/BUILD.gn +++ b/BUILD.gn @@ -1723,6 +1723,7 @@ v8_compiler_sources = [ "src/compiler/frame.cc", "src/compiler/frame.h", "src/compiler/functional-list.h", + "src/compiler/globals.h", "src/compiler/graph-assembler.cc", "src/compiler/graph-assembler.h", "src/compiler/graph-reducer.cc", diff --git a/src/compiler/backend/arm/code-generator-arm.cc b/src/compiler/backend/arm/code-generator-arm.cc index cdc0ec1150..655b4333ef 100644 --- a/src/compiler/backend/arm/code-generator-arm.cc +++ b/src/compiler/backend/arm/code-generator-arm.cc @@ -937,11 +937,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kArchStackPointerGreaterThan: { + // Potentially apply an offset to the current stack pointer before the + // comparison to consider the size difference of an optimized frame versus + // the contained unoptimized frames. + + Register lhs_register = sp; + uint32_t offset; + + if (ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = i.TempRegister(0); + __ sub(lhs_register, sp, Operand(offset)); + } + constexpr size_t kValueIndex = 0; DCHECK(instr->InputAt(kValueIndex)->IsRegister()); - __ cmp(sp, i.InputRegister(kValueIndex)); + __ cmp(lhs_register, i.InputRegister(kValueIndex)); break; } + case kArchStackCheckOffset: + __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); + break; case kArchTruncateDoubleToI: __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); diff --git a/src/compiler/backend/arm/instruction-selector-arm.cc b/src/compiler/backend/arm/instruction-selector-arm.cc index 365ad53711..2c0af6581e 100644 --- a/src/compiler/backend/arm/instruction-selector-arm.cc +++ b/src/compiler/backend/arm/instruction-selector-arm.cc @@ -909,11 +909,31 @@ void InstructionSelector::VisitWord32Xor(Node* node) { void InstructionSelector::VisitStackPointerGreaterThan( Node* node, FlagsContinuation* cont) { - Node* const value = node->InputAt(0); - InstructionCode opcode = kArchStackPointerGreaterThan; + StackCheckKind kind = StackCheckKindOf(node->op()); + InstructionCode opcode = + kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); ArmOperandGenerator g(this); - EmitWithContinuation(opcode, g.UseRegister(value), cont); + + // No outputs. + InstructionOperand* const outputs = nullptr; + const int output_count = 0; + + // Applying an offset to this stack check requires a temp register. Offsets + // are only applied to the first stack check. If applying an offset, we must + // ensure the input and temp registers do not alias, thus kUniqueRegister. + InstructionOperand temps[] = {g.TempRegister()}; + const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0; + const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) + ? OperandGenerator::kUniqueRegister + : OperandGenerator::kRegister; + + Node* const value = node->InputAt(0); + InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; + static constexpr int input_count = arraysize(inputs); + + EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, + temp_count, temps, cont); } namespace { diff --git a/src/compiler/backend/arm64/code-generator-arm64.cc b/src/compiler/backend/arm64/code-generator-arm64.cc index fa6ffc5cf2..769aab9453 100644 --- a/src/compiler/backend/arm64/code-generator-arm64.cc +++ b/src/compiler/backend/arm64/code-generator-arm64.cc @@ -869,11 +869,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kArchStackPointerGreaterThan: { + // Potentially apply an offset to the current stack pointer before the + // comparison to consider the size difference of an optimized frame versus + // the contained unoptimized frames. + + Register lhs_register = sp; + uint32_t offset; + + if (ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = i.TempRegister(0); + __ Sub(lhs_register, sp, offset); + } + constexpr size_t kValueIndex = 0; DCHECK(instr->InputAt(kValueIndex)->IsRegister()); - __ Cmp(sp, i.InputRegister(kValueIndex)); + __ Cmp(lhs_register, i.InputRegister(kValueIndex)); break; } + case kArchStackCheckOffset: + __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); + break; case kArchTruncateDoubleToI: __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(), i.InputDoubleRegister(0), DetermineStubCallMode()); diff --git a/src/compiler/backend/arm64/instruction-selector-arm64.cc b/src/compiler/backend/arm64/instruction-selector-arm64.cc index e0ec57aa18..7e2a510a21 100644 --- a/src/compiler/backend/arm64/instruction-selector-arm64.cc +++ b/src/compiler/backend/arm64/instruction-selector-arm64.cc @@ -1028,11 +1028,31 @@ void InstructionSelector::VisitWord64Shl(Node* node) { void InstructionSelector::VisitStackPointerGreaterThan( Node* node, FlagsContinuation* cont) { - Node* const value = node->InputAt(0); - InstructionCode opcode = kArchStackPointerGreaterThan; + StackCheckKind kind = StackCheckKindOf(node->op()); + InstructionCode opcode = + kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); Arm64OperandGenerator g(this); - EmitWithContinuation(opcode, g.UseRegister(value), cont); + + // No outputs. + InstructionOperand* const outputs = nullptr; + const int output_count = 0; + + // Applying an offset to this stack check requires a temp register. Offsets + // are only applied to the first stack check. If applying an offset, we must + // ensure the input and temp registers do not alias, thus kUniqueRegister. + InstructionOperand temps[] = {g.TempRegister()}; + const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0; + const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) + ? OperandGenerator::kUniqueRegister + : OperandGenerator::kRegister; + + Node* const value = node->InputAt(0); + InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; + static constexpr int input_count = arraysize(inputs); + + EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, + temp_count, temps, cont); } namespace { diff --git a/src/compiler/backend/code-generator.cc b/src/compiler/backend/code-generator.cc index 43eb4a1f15..b5c0912cf1 100644 --- a/src/compiler/backend/code-generator.cc +++ b/src/compiler/backend/code-generator.cc @@ -10,6 +10,7 @@ #include "src/codegen/optimized-compilation-info.h" #include "src/codegen/string-constants.h" #include "src/compiler/backend/code-generator-impl.h" +#include "src/compiler/globals.h" #include "src/compiler/linkage.h" #include "src/compiler/pipeline.h" #include "src/compiler/wasm-compiler.h" @@ -115,6 +116,32 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) { frame_access_state_ = new (zone()) FrameAccessState(frame); } +bool CodeGenerator::ShouldApplyOffsetToStackCheck(Instruction* instr, + uint32_t* offset) { + DCHECK_EQ(instr->arch_opcode(), kArchStackPointerGreaterThan); + + StackCheckKind kind = + static_cast(MiscField::decode(instr->opcode())); + if (kind != StackCheckKind::kJSFunctionEntry) return false; + + uint32_t stack_check_offset = *offset = GetStackCheckOffset(); + return stack_check_offset > 0; +} + +uint32_t CodeGenerator::GetStackCheckOffset() { + if (!frame_access_state()->has_frame()) return 0; + + int32_t optimized_frame_height = + frame()->GetTotalFrameSlotCount() * kSystemPointerSize; + DCHECK(is_int32(max_unoptimized_frame_height_)); + int32_t signed_max_unoptimized_frame_height = + static_cast(max_unoptimized_frame_height_); + + int32_t signed_offset = + std::max(signed_max_unoptimized_frame_height - optimized_frame_height, 0); + return (signed_offset <= 0) ? 0 : static_cast(signed_offset); +} + CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall( DeoptimizationExit* exit) { int deoptimization_id = exit->deoptimization_id(); diff --git a/src/compiler/backend/code-generator.h b/src/compiler/backend/code-generator.h index d56b1edae0..a2e6382cab 100644 --- a/src/compiler/backend/code-generator.h +++ b/src/compiler/backend/code-generator.h @@ -171,6 +171,13 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler { static constexpr int kBinarySearchSwitchMinimalCases = 4; + // Returns true if an offset should be applied to the given stack check. This + // is the case for stack checks on function-entry when the offset is non-zero, + // where the offset is the difference between the size of optimized and + // corresponding deoptimized frames. + bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset); + uint32_t GetStackCheckOffset(); + private: GapResolver* resolver() { return &resolver_; } SafepointTableBuilder* safepoints() { return &safepoints_; } diff --git a/src/compiler/backend/ia32/code-generator-ia32.cc b/src/compiler/backend/ia32/code-generator-ia32.cc index 3c114be25c..b731001361 100644 --- a/src/compiler/backend/ia32/code-generator-ia32.cc +++ b/src/compiler/backend/ia32/code-generator-ia32.cc @@ -924,14 +924,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kArchStackPointerGreaterThan: { + // Potentially apply an offset to the current stack pointer before the + // comparison to consider the size difference of an optimized frame versus + // the contained unoptimized frames. + Register lhs_register = esp; + uint32_t offset; + + if (ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = i.TempRegister(0); + __ lea(lhs_register, Operand(esp, -1 * static_cast(offset))); + } + constexpr size_t kValueIndex = 0; if (HasAddressingMode(instr)) { - __ cmp(esp, i.MemoryOperand(kValueIndex)); + __ cmp(lhs_register, i.MemoryOperand(kValueIndex)); } else { - __ cmp(esp, i.InputRegister(kValueIndex)); + __ cmp(lhs_register, i.InputRegister(kValueIndex)); } break; } + case kArchStackCheckOffset: + __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); + break; case kArchTruncateDoubleToI: { auto result = i.OutputRegister(); auto input = i.InputDoubleRegister(0); diff --git a/src/compiler/backend/ia32/instruction-selector-ia32.cc b/src/compiler/backend/ia32/instruction-selector-ia32.cc index 3687cdcd51..4fa42f8485 100644 --- a/src/compiler/backend/ia32/instruction-selector-ia32.cc +++ b/src/compiler/backend/ia32/instruction-selector-ia32.cc @@ -86,11 +86,10 @@ class IA32OperandGenerator final : public OperandGenerator { } } - AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base, - Node* displacement_node, - DisplacementMode displacement_mode, - InstructionOperand inputs[], - size_t* input_count) { + AddressingMode GenerateMemoryOperandInputs( + Node* index, int scale, Node* base, Node* displacement_node, + DisplacementMode displacement_mode, InstructionOperand inputs[], + size_t* input_count, RegisterMode register_mode = kRegister) { AddressingMode mode = kMode_MRI; int32_t displacement = (displacement_node == nullptr) ? 0 @@ -105,10 +104,10 @@ class IA32OperandGenerator final : public OperandGenerator { } } if (base != nullptr) { - inputs[(*input_count)++] = UseRegister(base); + inputs[(*input_count)++] = UseRegisterWithMode(base, register_mode); if (index != nullptr) { DCHECK(scale >= 0 && scale <= 3); - inputs[(*input_count)++] = UseRegister(index); + inputs[(*input_count)++] = UseRegisterWithMode(index, register_mode); if (displacement != 0) { inputs[(*input_count)++] = TempImmediate(displacement); static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I, @@ -130,7 +129,7 @@ class IA32OperandGenerator final : public OperandGenerator { } else { DCHECK(scale >= 0 && scale <= 3); if (index != nullptr) { - inputs[(*input_count)++] = UseRegister(index); + inputs[(*input_count)++] = UseRegisterWithMode(index, register_mode); if (displacement != 0) { inputs[(*input_count)++] = TempImmediate(displacement); static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I, @@ -149,9 +148,9 @@ class IA32OperandGenerator final : public OperandGenerator { return mode; } - AddressingMode GetEffectiveAddressMemoryOperand(Node* node, - InstructionOperand inputs[], - size_t* input_count) { + AddressingMode GetEffectiveAddressMemoryOperand( + Node* node, InstructionOperand inputs[], size_t* input_count, + RegisterMode register_mode = kRegister) { { LoadMatcher m(node); if (m.index().HasValue() && m.object().HasValue() && @@ -172,10 +171,12 @@ class IA32OperandGenerator final : public OperandGenerator { if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) { return GenerateMemoryOperandInputs( m.index(), m.scale(), m.base(), m.displacement(), - m.displacement_mode(), inputs, input_count); + m.displacement_mode(), inputs, input_count, register_mode); } else { - inputs[(*input_count)++] = UseRegister(node->InputAt(0)); - inputs[(*input_count)++] = UseRegister(node->InputAt(1)); + inputs[(*input_count)++] = + UseRegisterWithMode(node->InputAt(0), register_mode); + inputs[(*input_count)++] = + UseRegisterWithMode(node->InputAt(1), register_mode); return kMode_MR1; } } @@ -577,8 +578,9 @@ void InstructionSelector::VisitWord32Xor(Node* node) { void InstructionSelector::VisitStackPointerGreaterThan( Node* node, FlagsContinuation* cont) { - Node* const value = node->InputAt(0); - InstructionCode opcode = kArchStackPointerGreaterThan; + StackCheckKind kind = StackCheckKindOf(node->op()); + InstructionCode opcode = + kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); int effect_level = GetEffectLevel(node); if (cont->IsBranch()) { @@ -587,6 +589,21 @@ void InstructionSelector::VisitStackPointerGreaterThan( } IA32OperandGenerator g(this); + + // No outputs. + InstructionOperand* const outputs = nullptr; + const int output_count = 0; + + // Applying an offset to this stack check requires a temp register. Offsets + // are only applied to the first stack check. If applying an offset, we must + // ensure the input and temp registers do not alias, thus kUniqueRegister. + InstructionOperand temps[] = {g.TempRegister()}; + const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0; + const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry) + ? OperandGenerator::kUniqueRegister + : OperandGenerator::kRegister; + + Node* const value = node->InputAt(0); if (g.CanBeMemoryOperand(kIA32Cmp, node, value, effect_level)) { DCHECK_EQ(IrOpcode::kLoad, value->opcode()); @@ -595,14 +612,18 @@ void InstructionSelector::VisitStackPointerGreaterThan( size_t input_count = 0; InstructionOperand inputs[kMaxInputCount]; - AddressingMode addressing_mode = - g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count); + AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand( + value, inputs, &input_count, register_mode); opcode |= AddressingModeField::encode(addressing_mode); DCHECK_LE(input_count, kMaxInputCount); - EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont); + EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, + temp_count, temps, cont); } else { - EmitWithContinuation(opcode, g.UseRegister(value), cont); + InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)}; + static constexpr int input_count = arraysize(inputs); + EmitWithContinuation(opcode, output_count, outputs, input_count, inputs, + temp_count, temps, cont); } } diff --git a/src/compiler/backend/instruction-codes.h b/src/compiler/backend/instruction-codes.h index b76946ce47..9975f1ed9d 100644 --- a/src/compiler/backend/instruction-codes.h +++ b/src/compiler/backend/instruction-codes.h @@ -95,6 +95,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode( V(ArchStackSlot) \ V(ArchWordPoisonOnSpeculation) \ V(ArchStackPointerGreaterThan) \ + V(ArchStackCheckOffset) \ V(Word32AtomicLoadInt8) \ V(Word32AtomicLoadUint8) \ V(Word32AtomicLoadInt16) \ diff --git a/src/compiler/backend/instruction-scheduler.cc b/src/compiler/backend/instruction-scheduler.cc index e19c4b0b26..d811ea5752 100644 --- a/src/compiler/backend/instruction-scheduler.cc +++ b/src/compiler/backend/instruction-scheduler.cc @@ -248,6 +248,7 @@ void InstructionScheduler::Schedule() { int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const { switch (instr->arch_opcode()) { case kArchNop: + case kArchStackCheckOffset: case kArchFramePointer: case kArchParentFramePointer: case kArchStackSlot: // Despite its name this opcode will produce a diff --git a/src/compiler/backend/instruction-selector-impl.h b/src/compiler/backend/instruction-selector-impl.h index 13ea049eba..a8ae2a5dc4 100644 --- a/src/compiler/backend/instruction-selector-impl.h +++ b/src/compiler/backend/instruction-selector-impl.h @@ -241,6 +241,19 @@ class OperandGenerator { UnallocatedOperand::USED_AT_START, vreg); } + // The kind of register generated for memory operands. kRegister is alive + // until the start of the operation, kUniqueRegister until the end. + enum RegisterMode { + kRegister, + kUniqueRegister, + }; + + InstructionOperand UseRegisterWithMode(Node* node, + RegisterMode register_mode) { + return register_mode == kRegister ? UseRegister(node) + : UseUniqueRegister(node); + } + InstructionOperand TempDoubleRegister() { UnallocatedOperand op = UnallocatedOperand( UnallocatedOperand::MUST_HAVE_REGISTER, diff --git a/src/compiler/backend/instruction-selector.cc b/src/compiler/backend/instruction-selector.cc index 3607cd31c6..1855eba975 100644 --- a/src/compiler/backend/instruction-selector.cc +++ b/src/compiler/backend/instruction-selector.cc @@ -42,6 +42,7 @@ InstructionSelector::InstructionSelector( instructions_(zone), continuation_inputs_(sequence->zone()), continuation_outputs_(sequence->zone()), + continuation_temps_(sequence->zone()), defined_(node_count, false, zone), used_(node_count, false, zone), effect_level_(node_count, 0, zone), @@ -723,6 +724,14 @@ Instruction* InstructionSelector::EmitWithContinuation( Instruction* InstructionSelector::EmitWithContinuation( InstructionCode opcode, size_t output_count, InstructionOperand* outputs, size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) { + return EmitWithContinuation(opcode, output_count, outputs, input_count, + inputs, 0, nullptr, cont); +} + +Instruction* InstructionSelector::EmitWithContinuation( + InstructionCode opcode, size_t output_count, InstructionOperand* outputs, + size_t input_count, InstructionOperand* inputs, size_t temp_count, + InstructionOperand* temps, FlagsContinuation* cont) { OperandGenerator g(this); opcode = cont->Encode(opcode); @@ -737,6 +746,11 @@ Instruction* InstructionSelector::EmitWithContinuation( continuation_outputs_.push_back(outputs[i]); } + continuation_temps_.resize(0); + for (size_t i = 0; i < temp_count; i++) { + continuation_temps_.push_back(temps[i]); + } + if (cont->IsBranch()) { continuation_inputs_.push_back(g.Label(cont->true_block())); continuation_inputs_.push_back(g.Label(cont->false_block())); @@ -760,8 +774,10 @@ Instruction* InstructionSelector::EmitWithContinuation( size_t const emit_outputs_size = continuation_outputs_.size(); auto* emit_outputs = emit_outputs_size ? &continuation_outputs_.front() : nullptr; + size_t const emit_temps_size = continuation_temps_.size(); + auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr; return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size, - emit_inputs, 0, nullptr); + emit_inputs, emit_temps_size, emit_temps); } void InstructionSelector::AppendDeoptimizeArguments( @@ -1736,6 +1752,8 @@ void InstructionSelector::VisitNode(Node* node) { return VisitStackSlot(node); case IrOpcode::kStackPointerGreaterThan: return VisitStackPointerGreaterThan(node); + case IrOpcode::kLoadStackCheckOffset: + return VisitLoadStackCheckOffset(node); case IrOpcode::kLoadFramePointer: return VisitLoadFramePointer(node); case IrOpcode::kLoadParentFramePointer: @@ -2214,6 +2232,11 @@ void InstructionSelector::VisitStackPointerGreaterThan(Node* node) { VisitStackPointerGreaterThan(node, &cont); } +void InstructionSelector::VisitLoadStackCheckOffset(Node* node) { + OperandGenerator g(this); + Emit(kArchStackCheckOffset, g.DefineAsRegister(node)); +} + void InstructionSelector::VisitLoadFramePointer(Node* node) { OperandGenerator g(this); Emit(kArchFramePointer, g.DefineAsRegister(node)); diff --git a/src/compiler/backend/instruction-selector.h b/src/compiler/backend/instruction-selector.h index 470fd52437..2798b9917b 100644 --- a/src/compiler/backend/instruction-selector.h +++ b/src/compiler/backend/instruction-selector.h @@ -346,6 +346,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final { size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont); + Instruction* EmitWithContinuation( + InstructionCode opcode, size_t output_count, InstructionOperand* outputs, + size_t input_count, InstructionOperand* inputs, size_t temp_count, + InstructionOperand* temps, FlagsContinuation* cont); // =========================================================================== // ===== Architecture-independent deoptimization exit emission methods. ====== @@ -765,6 +769,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final { ZoneVector instructions_; InstructionOperandVector continuation_inputs_; InstructionOperandVector continuation_outputs_; + InstructionOperandVector continuation_temps_; BoolVector defined_; BoolVector used_; IntVector effect_level_; diff --git a/src/compiler/backend/x64/code-generator-x64.cc b/src/compiler/backend/x64/code-generator-x64.cc index 1d436791f7..4dc1345d8f 100644 --- a/src/compiler/backend/x64/code-generator-x64.cc +++ b/src/compiler/backend/x64/code-generator-x64.cc @@ -1022,14 +1022,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( } break; case kArchStackPointerGreaterThan: { + // Potentially apply an offset to the current stack pointer before the + // comparison to consider the size difference of an optimized frame versus + // the contained unoptimized frames. + + Register lhs_register = rsp; + uint32_t offset; + + if (ShouldApplyOffsetToStackCheck(instr, &offset)) { + lhs_register = kScratchRegister; + __ leaq(lhs_register, Operand(rsp, static_cast(offset) * -1)); + } + constexpr size_t kValueIndex = 0; if (HasAddressingMode(instr)) { - __ cmpq(rsp, i.MemoryOperand(kValueIndex)); + __ cmpq(lhs_register, i.MemoryOperand(kValueIndex)); } else { - __ cmpq(rsp, i.InputRegister(kValueIndex)); + __ cmpq(lhs_register, i.InputRegister(kValueIndex)); } break; } + case kArchStackCheckOffset: + __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset())); + break; case kArchTruncateDoubleToI: { auto result = i.OutputRegister(); auto input = i.InputDoubleRegister(0); diff --git a/src/compiler/backend/x64/instruction-selector-x64.cc b/src/compiler/backend/x64/instruction-selector-x64.cc index ad7f53ac4f..b6e6b4979e 100644 --- a/src/compiler/backend/x64/instruction-selector-x64.cc +++ b/src/compiler/backend/x64/instruction-selector-x64.cc @@ -545,8 +545,9 @@ void InstructionSelector::VisitWord64Xor(Node* node) { void InstructionSelector::VisitStackPointerGreaterThan( Node* node, FlagsContinuation* cont) { - Node* const value = node->InputAt(0); - InstructionCode opcode = kArchStackPointerGreaterThan; + StackCheckKind kind = StackCheckKindOf(node->op()); + InstructionCode opcode = + kArchStackPointerGreaterThan | MiscField::encode(static_cast(kind)); int effect_level = GetEffectLevel(node); if (cont->IsBranch()) { @@ -555,6 +556,7 @@ void InstructionSelector::VisitStackPointerGreaterThan( } X64OperandGenerator g(this); + Node* const value = node->InputAt(0); if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) { DCHECK_EQ(IrOpcode::kLoad, value->opcode()); diff --git a/src/compiler/bytecode-graph-builder.cc b/src/compiler/bytecode-graph-builder.cc index e4d9d06d49..46fa4fb7c4 100644 --- a/src/compiler/bytecode-graph-builder.cc +++ b/src/compiler/bytecode-graph-builder.cc @@ -354,8 +354,9 @@ class BytecodeGraphBuilder { void set_currently_peeled_loop_offset(int offset) { currently_peeled_loop_offset_ = offset; } - bool skip_next_stack_check() const { return skip_next_stack_check_; } - void unset_skip_next_stack_check() { skip_next_stack_check_ = false; } + bool skip_first_stack_check() const { return skip_first_stack_check_; } + bool visited_first_stack_check() const { return visited_first_stack_check_; } + void set_visited_first_stack_check() { visited_first_stack_check_ = true; } int current_exception_handler() const { return current_exception_handler_; } void set_current_exception_handler(int index) { current_exception_handler_ = index; @@ -388,7 +389,9 @@ class BytecodeGraphBuilder { Environment* environment_; bool const osr_; int currently_peeled_loop_offset_; - bool skip_next_stack_check_; + + const bool skip_first_stack_check_; + bool visited_first_stack_check_ = false; // Merge environments are snapshots of the environment at points where the // control flow merges. This models a forward data flow propagation of all @@ -964,8 +967,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder( environment_(nullptr), osr_(!osr_offset.IsNone()), currently_peeled_loop_offset_(-1), - skip_next_stack_check_(flags & - BytecodeGraphBuilderFlag::kSkipFirstStackCheck), + skip_first_stack_check_(flags & + BytecodeGraphBuilderFlag::kSkipFirstStackCheck), merge_environments_(local_zone), generator_merge_environments_(local_zone), exception_handlers_(local_zone), @@ -1276,12 +1279,6 @@ void BytecodeGraphBuilder::VisitSingleBytecode() { if (environment() != nullptr) { BuildLoopHeaderEnvironment(current_offset); - if (skip_next_stack_check() && bytecode_iterator().current_bytecode() == - interpreter::Bytecode::kStackCheck) { - unset_skip_next_stack_check(); - return; - } - switch (bytecode_iterator().current_bytecode()) { #define BYTECODE_CASE(name, ...) \ case interpreter::Bytecode::k##name: \ @@ -3259,8 +3256,20 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() { } void BytecodeGraphBuilder::VisitStackCheck() { + // Note: The stack check kind is determined heuristically: we simply assume + // that the first seen stack check is at function-entry, and all other stack + // checks are at iteration-body. An alternative precise solution would be to + // parameterize the StackCheck bytecode; but this has the caveat of increased + // code size. + StackCheckKind kind = StackCheckKind::kJSIterationBody; + if (!visited_first_stack_check()) { + set_visited_first_stack_check(); + kind = StackCheckKind::kJSFunctionEntry; + if (skip_first_stack_check()) return; + } + PrepareEagerCheckpoint(); - Node* node = NewNode(javascript()->StackCheck()); + Node* node = NewNode(javascript()->StackCheck(kind)); environment()->RecordAfterState(node, Environment::kAttachFrameState); } diff --git a/src/compiler/globals.h b/src/compiler/globals.h new file mode 100644 index 0000000000..38566b1563 --- /dev/null +++ b/src/compiler/globals.h @@ -0,0 +1,43 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef V8_COMPILER_GLOBALS_H_ +#define V8_COMPILER_GLOBALS_H_ + +#include "src/common/globals.h" + +namespace v8 { +namespace internal { +namespace compiler { + +enum class StackCheckKind { + kJSFunctionEntry = 0, + kJSIterationBody, + kCodeStubAssembler, + kWasm, +}; + +inline std::ostream& operator<<(std::ostream& os, StackCheckKind kind) { + switch (kind) { + case StackCheckKind::kJSFunctionEntry: + return os << "JSFunctionEntry"; + case StackCheckKind::kJSIterationBody: + return os << "JSIterationBody"; + case StackCheckKind::kCodeStubAssembler: + return os << "CodeStubAssembler"; + case StackCheckKind::kWasm: + return os << "Wasm"; + } + UNREACHABLE(); +} + +inline size_t hash_value(StackCheckKind kind) { + return static_cast(kind); +} + +} // namespace compiler +} // namespace internal +} // namespace v8 + +#endif // V8_COMPILER_GLOBALS_H_ diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc index d216a6bde1..e565d9ee40 100644 --- a/src/compiler/js-generic-lowering.cc +++ b/src/compiler/js-generic-lowering.cc @@ -848,6 +848,15 @@ void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) { UNREACHABLE(); // Eliminated in typed lowering. } +namespace { + +StackCheckKind StackCheckKindOfJSStackCheck(const Operator* op) { + DCHECK(op->opcode() == IrOpcode::kJSStackCheck); + return OpParameter(op); +} + +} // namespace + void JSGenericLowering::LowerJSStackCheck(Node* node) { Node* effect = NodeProperties::GetEffectInput(node); Node* control = NodeProperties::GetControlInput(node); @@ -858,8 +867,9 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { ExternalReference::address_of_jslimit(isolate())), jsgraph()->IntPtrConstant(0), effect, control); - Node* check = effect = - graph()->NewNode(machine()->StackPointerGreaterThan(), limit, effect); + StackCheckKind stack_check_kind = StackCheckKindOfJSStackCheck(node->op()); + Node* check = effect = graph()->NewNode( + machine()->StackPointerGreaterThan(stack_check_kind), limit, effect); Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control); @@ -895,8 +905,17 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) { } } - // Turn the stack check into a runtime call. - ReplaceWithRuntimeCall(node, Runtime::kStackGuard); + // Turn the stack check into a runtime call. At function entry, the runtime + // function takes an offset argument which is subtracted from the stack + // pointer prior to the stack check (i.e. the check is `sp - offset >= + // limit`). + if (stack_check_kind == StackCheckKind::kJSFunctionEntry) { + node->InsertInput(zone(), 0, + graph()->NewNode(machine()->LoadStackCheckOffset())); + ReplaceWithRuntimeCall(node, Runtime::kStackGuardWithGap); + } else { + ReplaceWithRuntimeCall(node, Runtime::kStackGuard); + } } void JSGenericLowering::LowerJSDebugger(Node* node) { diff --git a/src/compiler/js-operator.cc b/src/compiler/js-operator.cc index 5c885ded14..5738ff6992 100644 --- a/src/compiler/js-operator.cc +++ b/src/compiler/js-operator.cc @@ -692,7 +692,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) { V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \ V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \ V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \ - V(StackCheck, Operator::kNoWrite, 0, 0) \ V(Debugger, Operator::kNoProperties, 0, 0) \ V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \ V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \ @@ -1343,6 +1342,15 @@ const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback, parameters); // parameter } +const Operator* JSOperatorBuilder::StackCheck(StackCheckKind kind) { + return new (zone()) Operator1( // -- + IrOpcode::kJSStackCheck, // opcode + Operator::kNoWrite, // properties + "JSStackCheck", // name + 0, 1, 1, 0, 1, 2, // counts + kind); // parameter +} + const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() { return new (zone()) Operator( // -- IrOpcode::kJSCreateEmptyLiteralObject, // opcode diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h index d6ebb46340..235a1d58ce 100644 --- a/src/compiler/js-operator.h +++ b/src/compiler/js-operator.h @@ -6,8 +6,8 @@ #define V8_COMPILER_JS_OPERATOR_H_ #include "src/base/compiler-specific.h" -#include "src/common/globals.h" #include "src/compiler/feedback-source.h" +#include "src/compiler/globals.h" #include "src/handles/maybe-handles.h" #include "src/objects/type-hints.h" #include "src/runtime/runtime.h" @@ -895,7 +895,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final const Operator* GeneratorRestoreRegister(int index); const Operator* GeneratorRestoreInputOrDebugPos(); - const Operator* StackCheck(); + const Operator* StackCheck(StackCheckKind kind); const Operator* Debugger(); const Operator* FulfillPromise(); diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc index e49fa9f978..b58ecf0867 100644 --- a/src/compiler/machine-operator.cc +++ b/src/compiler/machine-operator.cc @@ -244,6 +244,7 @@ MachineType AtomicOpType(Operator const* op) { V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \ V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \ V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \ + V(LoadStackCheckOffset, Operator::kNoProperties, 0, 0, 1) \ V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \ V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \ V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \ @@ -898,12 +899,25 @@ struct MachineOperatorGlobalCache { }; UnsafePointerAddOperator kUnsafePointerAdd; - struct StackPointerGreaterThanOperator final : public Operator { - StackPointerGreaterThanOperator() - : Operator(IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable, - "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0) {} + struct StackPointerGreaterThanOperator : public Operator1 { + explicit StackPointerGreaterThanOperator(StackCheckKind kind) + : Operator1( + IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable, + "StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {} }; - StackPointerGreaterThanOperator kStackPointerGreaterThan; +#define STACK_POINTER_GREATER_THAN(Kind) \ + struct StackPointerGreaterThan##Kind##Operator final \ + : public StackPointerGreaterThanOperator { \ + StackPointerGreaterThan##Kind##Operator() \ + : StackPointerGreaterThanOperator(StackCheckKind::k##Kind) {} \ + }; \ + StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind; + + STACK_POINTER_GREATER_THAN(JSFunctionEntry) + STACK_POINTER_GREATER_THAN(JSIterationBody) + STACK_POINTER_GREATER_THAN(CodeStubAssembler) + STACK_POINTER_GREATER_THAN(Wasm) +#undef STACK_POINTER_GREATER_THAN }; struct CommentOperator : public Operator1 { @@ -1070,8 +1084,19 @@ const Operator* MachineOperatorBuilder::UnsafePointerAdd() { return &cache_.kUnsafePointerAdd; } -const Operator* MachineOperatorBuilder::StackPointerGreaterThan() { - return &cache_.kStackPointerGreaterThan; +const Operator* MachineOperatorBuilder::StackPointerGreaterThan( + StackCheckKind kind) { + switch (kind) { + case StackCheckKind::kJSFunctionEntry: + return &cache_.kStackPointerGreaterThanJSFunctionEntry; + case StackCheckKind::kJSIterationBody: + return &cache_.kStackPointerGreaterThanJSIterationBody; + case StackCheckKind::kCodeStubAssembler: + return &cache_.kStackPointerGreaterThanCodeStubAssembler; + case StackCheckKind::kWasm: + return &cache_.kStackPointerGreaterThanWasm; + } + UNREACHABLE(); } const Operator* MachineOperatorBuilder::BitcastWordToTagged() { @@ -1376,6 +1401,11 @@ const uint8_t* S8x16ShuffleOf(Operator const* op) { return OpParameter(op); } +StackCheckKind StackCheckKindOf(Operator const* op) { + DCHECK_EQ(IrOpcode::kStackPointerGreaterThan, op->opcode()); + return OpParameter(op); +} + #undef PURE_BINARY_OP_LIST_32 #undef PURE_BINARY_OP_LIST_64 #undef MACHINE_PURE_OP_LIST diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h index 7c73a50c28..3e73b4f7cb 100644 --- a/src/compiler/machine-operator.h +++ b/src/compiler/machine-operator.h @@ -9,6 +9,7 @@ #include "src/base/enum-set.h" #include "src/base/flags.h" #include "src/codegen/machine-type.h" +#include "src/compiler/globals.h" #include "src/compiler/write-barrier-kind.h" #include "src/zone/zone.h" @@ -115,6 +116,8 @@ MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT; V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op) V8_WARN_UNUSED_RESULT; +StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT; + // Interface for building machine-level operators. These operators are // machine-level but machine-independent and thus define a language suitable // for generating code to run on architectures such as ia32, x64, arm, etc. @@ -687,8 +690,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final const Operator* LoadFramePointer(); const Operator* LoadParentFramePointer(); - // Compares: stack_pointer > value. - const Operator* StackPointerGreaterThan(); + // Compares: stack_pointer [- offset] > value. The offset is optionally + // applied for kFunctionEntry stack checks. + const Operator* StackPointerGreaterThan(StackCheckKind kind); + + // Loads the offset that should be applied to the current stack + // pointer before a stack check. Used as input to the + // Runtime::kStackGuardWithGap call. + const Operator* LoadStackCheckOffset(); // Memory barrier. const Operator* MemBarrier(); diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h index 3bba7d8d5a..52c253d405 100644 --- a/src/compiler/opcodes.h +++ b/src/compiler/opcodes.h @@ -729,6 +729,7 @@ V(TaggedPoisonOnSpeculation) \ V(Word32PoisonOnSpeculation) \ V(Word64PoisonOnSpeculation) \ + V(LoadStackCheckOffset) \ V(LoadFramePointer) \ V(LoadParentFramePointer) \ V(UnalignedLoad) \ diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h index c0bfd84a61..5c3f3f64b9 100644 --- a/src/compiler/raw-machine-assembler.h +++ b/src/compiler/raw-machine-assembler.h @@ -579,7 +579,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { return AddNode(machine()->Word32PairSar(), low_word, high_word, shift); } Node* StackPointerGreaterThan(Node* value) { - return AddNode(machine()->StackPointerGreaterThan(), value); + return AddNode( + machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler), + value); } #define INTPTR_BINOP(prefix, name) \ diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc index 032b3d2ff3..0fe4c3b752 100644 --- a/src/compiler/verifier.cc +++ b/src/compiler/verifier.cc @@ -1856,6 +1856,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) { case IrOpcode::kTaggedPoisonOnSpeculation: case IrOpcode::kWord32PoisonOnSpeculation: case IrOpcode::kWord64PoisonOnSpeculation: + case IrOpcode::kLoadStackCheckOffset: case IrOpcode::kLoadFramePointer: case IrOpcode::kLoadParentFramePointer: case IrOpcode::kUnalignedLoad: diff --git a/src/compiler/wasm-compiler.cc b/src/compiler/wasm-compiler.cc index 272d188b00..d15095beea 100644 --- a/src/compiler/wasm-compiler.cc +++ b/src/compiler/wasm-compiler.cc @@ -329,7 +329,8 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position, *effect = limit; Node* check = graph()->NewNode( - mcgraph()->machine()->StackPointerGreaterThan(), limit, *effect); + mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm), + limit, *effect); *effect = check; Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue); diff --git a/src/deoptimizer/deoptimizer.cc b/src/deoptimizer/deoptimizer.cc index fcb4c27d0b..5d11eacef1 100644 --- a/src/deoptimizer/deoptimizer.cc +++ b/src/deoptimizer/deoptimizer.cc @@ -698,6 +698,10 @@ void Deoptimizer::DoComputeOutputFrames() { } } + StackGuard* const stack_guard = isolate()->stack_guard(); + CHECK_GT(static_cast(caller_frame_top_), + stack_guard->real_jslimit()); + if (trace_scope_ != nullptr) { timer.Start(); PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ", @@ -755,6 +759,7 @@ void Deoptimizer::DoComputeOutputFrames() { // Translate each output frame. int frame_index = 0; // output_frame_index + size_t total_output_frame_size = 0; for (size_t i = 0; i < count; ++i, ++frame_index) { // Read the ast node id, function, and frame height for this output frame. TranslatedFrame* translated_frame = &(translated_state_.frames()[i]); @@ -790,6 +795,7 @@ void Deoptimizer::DoComputeOutputFrames() { FATAL("invalid frame"); break; } + total_output_frame_size += output_[frame_index]->GetFrameSize(); } FrameDescription* topmost = output_[count - 1]; @@ -809,6 +815,15 @@ void Deoptimizer::DoComputeOutputFrames() { bailout_id_, node_id.ToInt(), output_[index]->GetPc(), caller_frame_top_, ms); } + + // The following invariant is fairly tricky to guarantee, since the size of + // an optimized frame and its deoptimized counterparts usually differs. We + // thus need to consider the case in which deoptimized frames are larger than + // the optimized frame in stack checks in optimized code. We do this by + // applying an offset to stack checks (see kArchStackPointerGreaterThan in the + // code generator). + CHECK_GT(static_cast(caller_frame_top_) - total_output_frame_size, + stack_guard->real_jslimit()); } void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame, diff --git a/src/runtime/runtime-internal.cc b/src/runtime/runtime-internal.cc index 03c9e582d8..4085aad7c1 100644 --- a/src/runtime/runtime-internal.cc +++ b/src/runtime/runtime-internal.cc @@ -284,6 +284,21 @@ RUNTIME_FUNCTION(Runtime_StackGuard) { return isolate->stack_guard()->HandleInterrupts(); } +RUNTIME_FUNCTION(Runtime_StackGuardWithGap) { + SealHandleScope shs(isolate); + DCHECK_EQ(args.length(), 1); + CONVERT_UINT32_ARG_CHECKED(gap, 0); + TRACE_EVENT0("v8.execute", "V8.StackGuard"); + + // First check if this is a real stack overflow. + StackLimitCheck check(isolate); + if (check.JsHasOverflowed(gap)) { + return isolate->StackOverflow(); + } + + return isolate->stack_guard()->HandleInterrupts(); +} + RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) { HandleScope scope(isolate); DCHECK_EQ(1, args.length()); diff --git a/src/runtime/runtime.h b/src/runtime/runtime.h index c11a087654..71fbd4f98c 100644 --- a/src/runtime/runtime.h +++ b/src/runtime/runtime.h @@ -226,6 +226,7 @@ namespace internal { F(RunMicrotaskCallback, 2, 1) \ F(PerformMicrotaskCheckpoint, 0, 1) \ F(StackGuard, 0, 1) \ + F(StackGuardWithGap, 1, 1) \ F(Throw, 1, 1) \ F(ThrowApplyNonFunction, 1, 1) \ F(ThrowCalledNonCallable, 1, 1) \ diff --git a/test/mjsunit/regress/regress-v8-9534.js b/test/mjsunit/regress/regress-v8-9534.js new file mode 100644 index 0000000000..430a78bff6 --- /dev/null +++ b/test/mjsunit/regress/regress-v8-9534.js @@ -0,0 +1,18 @@ +// Copyright 2019 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +// Flags: --allow-natives-syntax --stack-size=50 + +let i = 0; +function f() { + i++; + if (i > 10) { + %PrepareFunctionForOptimization(f); + %OptimizeFunctionOnNextCall(f); + } + + new Promise(f); + return f.x; +} +f(); diff --git a/test/unittests/compiler/machine-operator-unittest.cc b/test/unittests/compiler/machine-operator-unittest.cc index d0acbf341c..e53050ad55 100644 --- a/test/unittests/compiler/machine-operator-unittest.cc +++ b/test/unittests/compiler/machine-operator-unittest.cc @@ -245,7 +245,6 @@ const PureOperator kPureOperators[] = { PURE(Float64Equal, 2, 0, 1), // -- PURE(Float64LessThan, 2, 0, 1), // -- PURE(Float64LessThanOrEqual, 2, 0, 1), // -- - PURE(StackPointerGreaterThan, 1, 0, 1), // -- PURE(Float64ExtractLowWord32, 1, 0, 1), // -- PURE(Float64ExtractHighWord32, 1, 0, 1), // -- PURE(Float64InsertLowWord32, 2, 0, 1), // --