Reland "[compiler] Optionally apply an offset to stack checks"

This is a reland of 4a16305b65

The original CL adjust only one part of the stack check, namely the
comparison of the stack pointer against the stack limit in generated code.
There is a second part: Runtime::kStackGuard repeats this check to
distinguish between a stack overflow and an interrupt request.

This second part in runtime must apply the offset just like in generated
code. It is implemented in this reland by the StackCheckOffset operator
and a new StackGuardWithGap runtime function.

Original change's description:
> [compiler] Optionally apply an offset to stack checks
>
> The motivation behind this change is that the frame size of an optimized
> function and its unoptimized version may differ, and deoptimization
> may thus trigger a stack overflow. The solution implemented in this CL
> is to optionally apply an offset to the stack check s.t. the check
> becomes 'sp - offset > limit'. The offset is applied to stack checks at
> function-entry, and is set to the difference between the optimized and
> unoptimized frame size.
>
> A caveat: OSR may not be fully handled by this fix since we've already
> passed the function-entry stack check. A possible solution would be to
> *not* skip creation of function-entry stack checks for inlinees.
>
> This CL: 1. annotates stack check nodes with the stack check kind, where
> kind is one of {function-entry,iteration-body,unknown}. 2. potentially
> allocates a temporary register to store the result of the 'sp - offset'
> in instruction selection (and switches input registers to 'unique'
> mode). 3. Applies the offset in code generation.
>
> Drive-by: Add src/compiler/globals.h for compiler-specific globals.
>
> Bug: v8:9534,chromium:1000887
> Change-Id: I257191c4a4978ccb60cfa5805ef421f30f0e9826
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1762521
> Commit-Queue: Jakob Gruber <jgruber@chromium.org>
> Reviewed-by: Georg Neis <neis@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#63701}

Bug: v8:9534, chromium:1000887
Change-Id: I71771c281afd7d57c09aa48ea1b182d01e6dee2a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1822037
Reviewed-by: Georg Neis <neis@chromium.org>
Reviewed-by: Sigurd Schneider <sigurds@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#64634}
This commit is contained in:
Jakob Gruber 2019-10-30 09:43:29 +01:00 committed by Commit Bot
parent 6a18b44b0d
commit b875f4661a
32 changed files with 437 additions and 66 deletions

View File

@ -1723,6 +1723,7 @@ v8_compiler_sources = [
"src/compiler/frame.cc",
"src/compiler/frame.h",
"src/compiler/functional-list.h",
"src/compiler/globals.h",
"src/compiler/graph-assembler.cc",
"src/compiler/graph-assembler.h",
"src/compiler/graph-reducer.cc",

View File

@ -937,11 +937,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchStackPointerGreaterThan: {
// Potentially apply an offset to the current stack pointer before the
// comparison to consider the size difference of an optimized frame versus
// the contained unoptimized frames.
Register lhs_register = sp;
uint32_t offset;
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
lhs_register = i.TempRegister(0);
__ sub(lhs_register, sp, Operand(offset));
}
constexpr size_t kValueIndex = 0;
DCHECK(instr->InputAt(kValueIndex)->IsRegister());
__ cmp(sp, i.InputRegister(kValueIndex));
__ cmp(lhs_register, i.InputRegister(kValueIndex));
break;
}
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());

View File

@ -909,11 +909,31 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
void InstructionSelector::VisitStackPointerGreaterThan(
Node* node, FlagsContinuation* cont) {
Node* const value = node->InputAt(0);
InstructionCode opcode = kArchStackPointerGreaterThan;
StackCheckKind kind = StackCheckKindOf(node->op());
InstructionCode opcode =
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
ArmOperandGenerator g(this);
EmitWithContinuation(opcode, g.UseRegister(value), cont);
// No outputs.
InstructionOperand* const outputs = nullptr;
const int output_count = 0;
// Applying an offset to this stack check requires a temp register. Offsets
// are only applied to the first stack check. If applying an offset, we must
// ensure the input and temp registers do not alias, thus kUniqueRegister.
InstructionOperand temps[] = {g.TempRegister()};
const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
? OperandGenerator::kUniqueRegister
: OperandGenerator::kRegister;
Node* const value = node->InputAt(0);
InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
static constexpr int input_count = arraysize(inputs);
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
temp_count, temps, cont);
}
namespace {

View File

@ -869,11 +869,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchStackPointerGreaterThan: {
// Potentially apply an offset to the current stack pointer before the
// comparison to consider the size difference of an optimized frame versus
// the contained unoptimized frames.
Register lhs_register = sp;
uint32_t offset;
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
lhs_register = i.TempRegister(0);
__ Sub(lhs_register, sp, offset);
}
constexpr size_t kValueIndex = 0;
DCHECK(instr->InputAt(kValueIndex)->IsRegister());
__ Cmp(sp, i.InputRegister(kValueIndex));
__ Cmp(lhs_register, i.InputRegister(kValueIndex));
break;
}
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());

View File

@ -1028,11 +1028,31 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
void InstructionSelector::VisitStackPointerGreaterThan(
Node* node, FlagsContinuation* cont) {
Node* const value = node->InputAt(0);
InstructionCode opcode = kArchStackPointerGreaterThan;
StackCheckKind kind = StackCheckKindOf(node->op());
InstructionCode opcode =
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
Arm64OperandGenerator g(this);
EmitWithContinuation(opcode, g.UseRegister(value), cont);
// No outputs.
InstructionOperand* const outputs = nullptr;
const int output_count = 0;
// Applying an offset to this stack check requires a temp register. Offsets
// are only applied to the first stack check. If applying an offset, we must
// ensure the input and temp registers do not alias, thus kUniqueRegister.
InstructionOperand temps[] = {g.TempRegister()};
const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
? OperandGenerator::kUniqueRegister
: OperandGenerator::kRegister;
Node* const value = node->InputAt(0);
InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
static constexpr int input_count = arraysize(inputs);
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
temp_count, temps, cont);
}
namespace {

View File

@ -10,6 +10,7 @@
#include "src/codegen/optimized-compilation-info.h"
#include "src/codegen/string-constants.h"
#include "src/compiler/backend/code-generator-impl.h"
#include "src/compiler/globals.h"
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/wasm-compiler.h"
@ -115,6 +116,32 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
frame_access_state_ = new (zone()) FrameAccessState(frame);
}
bool CodeGenerator::ShouldApplyOffsetToStackCheck(Instruction* instr,
uint32_t* offset) {
DCHECK_EQ(instr->arch_opcode(), kArchStackPointerGreaterThan);
StackCheckKind kind =
static_cast<StackCheckKind>(MiscField::decode(instr->opcode()));
if (kind != StackCheckKind::kJSFunctionEntry) return false;
uint32_t stack_check_offset = *offset = GetStackCheckOffset();
return stack_check_offset > 0;
}
uint32_t CodeGenerator::GetStackCheckOffset() {
if (!frame_access_state()->has_frame()) return 0;
int32_t optimized_frame_height =
frame()->GetTotalFrameSlotCount() * kSystemPointerSize;
DCHECK(is_int32(max_unoptimized_frame_height_));
int32_t signed_max_unoptimized_frame_height =
static_cast<int32_t>(max_unoptimized_frame_height_);
int32_t signed_offset =
std::max(signed_max_unoptimized_frame_height - optimized_frame_height, 0);
return (signed_offset <= 0) ? 0 : static_cast<uint32_t>(signed_offset);
}
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
DeoptimizationExit* exit) {
int deoptimization_id = exit->deoptimization_id();

View File

@ -171,6 +171,13 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
static constexpr int kBinarySearchSwitchMinimalCases = 4;
// Returns true if an offset should be applied to the given stack check. This
// is the case for stack checks on function-entry when the offset is non-zero,
// where the offset is the difference between the size of optimized and
// corresponding deoptimized frames.
bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
uint32_t GetStackCheckOffset();
private:
GapResolver* resolver() { return &resolver_; }
SafepointTableBuilder* safepoints() { return &safepoints_; }

View File

@ -924,14 +924,28 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchStackPointerGreaterThan: {
// Potentially apply an offset to the current stack pointer before the
// comparison to consider the size difference of an optimized frame versus
// the contained unoptimized frames.
Register lhs_register = esp;
uint32_t offset;
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
lhs_register = i.TempRegister(0);
__ lea(lhs_register, Operand(esp, -1 * static_cast<int32_t>(offset)));
}
constexpr size_t kValueIndex = 0;
if (HasAddressingMode(instr)) {
__ cmp(esp, i.MemoryOperand(kValueIndex));
__ cmp(lhs_register, i.MemoryOperand(kValueIndex));
} else {
__ cmp(esp, i.InputRegister(kValueIndex));
__ cmp(lhs_register, i.InputRegister(kValueIndex));
}
break;
}
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);

View File

@ -86,11 +86,10 @@ class IA32OperandGenerator final : public OperandGenerator {
}
}
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
Node* displacement_node,
DisplacementMode displacement_mode,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode GenerateMemoryOperandInputs(
Node* index, int scale, Node* base, Node* displacement_node,
DisplacementMode displacement_mode, InstructionOperand inputs[],
size_t* input_count, RegisterMode register_mode = kRegister) {
AddressingMode mode = kMode_MRI;
int32_t displacement = (displacement_node == nullptr)
? 0
@ -105,10 +104,10 @@ class IA32OperandGenerator final : public OperandGenerator {
}
}
if (base != nullptr) {
inputs[(*input_count)++] = UseRegister(base);
inputs[(*input_count)++] = UseRegisterWithMode(base, register_mode);
if (index != nullptr) {
DCHECK(scale >= 0 && scale <= 3);
inputs[(*input_count)++] = UseRegister(index);
inputs[(*input_count)++] = UseRegisterWithMode(index, register_mode);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
@ -130,7 +129,7 @@ class IA32OperandGenerator final : public OperandGenerator {
} else {
DCHECK(scale >= 0 && scale <= 3);
if (index != nullptr) {
inputs[(*input_count)++] = UseRegister(index);
inputs[(*input_count)++] = UseRegisterWithMode(index, register_mode);
if (displacement != 0) {
inputs[(*input_count)++] = TempImmediate(displacement);
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
@ -149,9 +148,9 @@ class IA32OperandGenerator final : public OperandGenerator {
return mode;
}
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
InstructionOperand inputs[],
size_t* input_count) {
AddressingMode GetEffectiveAddressMemoryOperand(
Node* node, InstructionOperand inputs[], size_t* input_count,
RegisterMode register_mode = kRegister) {
{
LoadMatcher<ExternalReferenceMatcher> m(node);
if (m.index().HasValue() && m.object().HasValue() &&
@ -172,10 +171,12 @@ class IA32OperandGenerator final : public OperandGenerator {
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
return GenerateMemoryOperandInputs(
m.index(), m.scale(), m.base(), m.displacement(),
m.displacement_mode(), inputs, input_count);
m.displacement_mode(), inputs, input_count, register_mode);
} else {
inputs[(*input_count)++] = UseRegister(node->InputAt(0));
inputs[(*input_count)++] = UseRegister(node->InputAt(1));
inputs[(*input_count)++] =
UseRegisterWithMode(node->InputAt(0), register_mode);
inputs[(*input_count)++] =
UseRegisterWithMode(node->InputAt(1), register_mode);
return kMode_MR1;
}
}
@ -577,8 +578,9 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
void InstructionSelector::VisitStackPointerGreaterThan(
Node* node, FlagsContinuation* cont) {
Node* const value = node->InputAt(0);
InstructionCode opcode = kArchStackPointerGreaterThan;
StackCheckKind kind = StackCheckKindOf(node->op());
InstructionCode opcode =
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
int effect_level = GetEffectLevel(node);
if (cont->IsBranch()) {
@ -587,6 +589,21 @@ void InstructionSelector::VisitStackPointerGreaterThan(
}
IA32OperandGenerator g(this);
// No outputs.
InstructionOperand* const outputs = nullptr;
const int output_count = 0;
// Applying an offset to this stack check requires a temp register. Offsets
// are only applied to the first stack check. If applying an offset, we must
// ensure the input and temp registers do not alias, thus kUniqueRegister.
InstructionOperand temps[] = {g.TempRegister()};
const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
? OperandGenerator::kUniqueRegister
: OperandGenerator::kRegister;
Node* const value = node->InputAt(0);
if (g.CanBeMemoryOperand(kIA32Cmp, node, value, effect_level)) {
DCHECK_EQ(IrOpcode::kLoad, value->opcode());
@ -595,14 +612,18 @@ void InstructionSelector::VisitStackPointerGreaterThan(
size_t input_count = 0;
InstructionOperand inputs[kMaxInputCount];
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
value, inputs, &input_count, register_mode);
opcode |= AddressingModeField::encode(addressing_mode);
DCHECK_LE(input_count, kMaxInputCount);
EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
temp_count, temps, cont);
} else {
EmitWithContinuation(opcode, g.UseRegister(value), cont);
InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
static constexpr int input_count = arraysize(inputs);
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
temp_count, temps, cont);
}
}

View File

@ -95,6 +95,7 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchStackSlot) \
V(ArchWordPoisonOnSpeculation) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
V(Word32AtomicLoadInt8) \
V(Word32AtomicLoadUint8) \
V(Word32AtomicLoadInt16) \

View File

@ -248,6 +248,7 @@ void InstructionScheduler::Schedule() {
int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
switch (instr->arch_opcode()) {
case kArchNop:
case kArchStackCheckOffset:
case kArchFramePointer:
case kArchParentFramePointer:
case kArchStackSlot: // Despite its name this opcode will produce a

View File

@ -241,6 +241,19 @@ class OperandGenerator {
UnallocatedOperand::USED_AT_START, vreg);
}
// The kind of register generated for memory operands. kRegister is alive
// until the start of the operation, kUniqueRegister until the end.
enum RegisterMode {
kRegister,
kUniqueRegister,
};
InstructionOperand UseRegisterWithMode(Node* node,
RegisterMode register_mode) {
return register_mode == kRegister ? UseRegister(node)
: UseUniqueRegister(node);
}
InstructionOperand TempDoubleRegister() {
UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER,

View File

@ -42,6 +42,7 @@ InstructionSelector::InstructionSelector(
instructions_(zone),
continuation_inputs_(sequence->zone()),
continuation_outputs_(sequence->zone()),
continuation_temps_(sequence->zone()),
defined_(node_count, false, zone),
used_(node_count, false, zone),
effect_level_(node_count, 0, zone),
@ -723,6 +724,14 @@ Instruction* InstructionSelector::EmitWithContinuation(
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
return EmitWithContinuation(opcode, output_count, outputs, input_count,
inputs, 0, nullptr, cont);
}
Instruction* InstructionSelector::EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont) {
OperandGenerator g(this);
opcode = cont->Encode(opcode);
@ -737,6 +746,11 @@ Instruction* InstructionSelector::EmitWithContinuation(
continuation_outputs_.push_back(outputs[i]);
}
continuation_temps_.resize(0);
for (size_t i = 0; i < temp_count; i++) {
continuation_temps_.push_back(temps[i]);
}
if (cont->IsBranch()) {
continuation_inputs_.push_back(g.Label(cont->true_block()));
continuation_inputs_.push_back(g.Label(cont->false_block()));
@ -760,8 +774,10 @@ Instruction* InstructionSelector::EmitWithContinuation(
size_t const emit_outputs_size = continuation_outputs_.size();
auto* emit_outputs =
emit_outputs_size ? &continuation_outputs_.front() : nullptr;
size_t const emit_temps_size = continuation_temps_.size();
auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr;
return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
emit_inputs, 0, nullptr);
emit_inputs, emit_temps_size, emit_temps);
}
void InstructionSelector::AppendDeoptimizeArguments(
@ -1736,6 +1752,8 @@ void InstructionSelector::VisitNode(Node* node) {
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
return VisitStackPointerGreaterThan(node);
case IrOpcode::kLoadStackCheckOffset:
return VisitLoadStackCheckOffset(node);
case IrOpcode::kLoadFramePointer:
return VisitLoadFramePointer(node);
case IrOpcode::kLoadParentFramePointer:
@ -2214,6 +2232,11 @@ void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
VisitStackPointerGreaterThan(node, &cont);
}
void InstructionSelector::VisitLoadStackCheckOffset(Node* node) {
OperandGenerator g(this);
Emit(kArchStackCheckOffset, g.DefineAsRegister(node));
}
void InstructionSelector::VisitLoadFramePointer(Node* node) {
OperandGenerator g(this);
Emit(kArchFramePointer, g.DefineAsRegister(node));

View File

@ -346,6 +346,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
size_t input_count,
InstructionOperand* inputs,
FlagsContinuation* cont);
Instruction* EmitWithContinuation(
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
size_t input_count, InstructionOperand* inputs, size_t temp_count,
InstructionOperand* temps, FlagsContinuation* cont);
// ===========================================================================
// ===== Architecture-independent deoptimization exit emission methods. ======
@ -765,6 +769,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
ZoneVector<Instruction*> instructions_;
InstructionOperandVector continuation_inputs_;
InstructionOperandVector continuation_outputs_;
InstructionOperandVector continuation_temps_;
BoolVector defined_;
BoolVector used_;
IntVector effect_level_;

View File

@ -1022,14 +1022,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
case kArchStackPointerGreaterThan: {
// Potentially apply an offset to the current stack pointer before the
// comparison to consider the size difference of an optimized frame versus
// the contained unoptimized frames.
Register lhs_register = rsp;
uint32_t offset;
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
lhs_register = kScratchRegister;
__ leaq(lhs_register, Operand(rsp, static_cast<int32_t>(offset) * -1));
}
constexpr size_t kValueIndex = 0;
if (HasAddressingMode(instr)) {
__ cmpq(rsp, i.MemoryOperand(kValueIndex));
__ cmpq(lhs_register, i.MemoryOperand(kValueIndex));
} else {
__ cmpq(rsp, i.InputRegister(kValueIndex));
__ cmpq(lhs_register, i.InputRegister(kValueIndex));
}
break;
}
case kArchStackCheckOffset:
__ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);

View File

@ -545,8 +545,9 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
void InstructionSelector::VisitStackPointerGreaterThan(
Node* node, FlagsContinuation* cont) {
Node* const value = node->InputAt(0);
InstructionCode opcode = kArchStackPointerGreaterThan;
StackCheckKind kind = StackCheckKindOf(node->op());
InstructionCode opcode =
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
int effect_level = GetEffectLevel(node);
if (cont->IsBranch()) {
@ -555,6 +556,7 @@ void InstructionSelector::VisitStackPointerGreaterThan(
}
X64OperandGenerator g(this);
Node* const value = node->InputAt(0);
if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) {
DCHECK_EQ(IrOpcode::kLoad, value->opcode());

View File

@ -354,8 +354,9 @@ class BytecodeGraphBuilder {
void set_currently_peeled_loop_offset(int offset) {
currently_peeled_loop_offset_ = offset;
}
bool skip_next_stack_check() const { return skip_next_stack_check_; }
void unset_skip_next_stack_check() { skip_next_stack_check_ = false; }
bool skip_first_stack_check() const { return skip_first_stack_check_; }
bool visited_first_stack_check() const { return visited_first_stack_check_; }
void set_visited_first_stack_check() { visited_first_stack_check_ = true; }
int current_exception_handler() const { return current_exception_handler_; }
void set_current_exception_handler(int index) {
current_exception_handler_ = index;
@ -388,7 +389,9 @@ class BytecodeGraphBuilder {
Environment* environment_;
bool const osr_;
int currently_peeled_loop_offset_;
bool skip_next_stack_check_;
const bool skip_first_stack_check_;
bool visited_first_stack_check_ = false;
// Merge environments are snapshots of the environment at points where the
// control flow merges. This models a forward data flow propagation of all
@ -964,8 +967,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
environment_(nullptr),
osr_(!osr_offset.IsNone()),
currently_peeled_loop_offset_(-1),
skip_next_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
skip_first_stack_check_(flags &
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
merge_environments_(local_zone),
generator_merge_environments_(local_zone),
exception_handlers_(local_zone),
@ -1276,12 +1279,6 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
if (environment() != nullptr) {
BuildLoopHeaderEnvironment(current_offset);
if (skip_next_stack_check() && bytecode_iterator().current_bytecode() ==
interpreter::Bytecode::kStackCheck) {
unset_skip_next_stack_check();
return;
}
switch (bytecode_iterator().current_bytecode()) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
@ -3259,8 +3256,20 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
}
void BytecodeGraphBuilder::VisitStackCheck() {
// Note: The stack check kind is determined heuristically: we simply assume
// that the first seen stack check is at function-entry, and all other stack
// checks are at iteration-body. An alternative precise solution would be to
// parameterize the StackCheck bytecode; but this has the caveat of increased
// code size.
StackCheckKind kind = StackCheckKind::kJSIterationBody;
if (!visited_first_stack_check()) {
set_visited_first_stack_check();
kind = StackCheckKind::kJSFunctionEntry;
if (skip_first_stack_check()) return;
}
PrepareEagerCheckpoint();
Node* node = NewNode(javascript()->StackCheck());
Node* node = NewNode(javascript()->StackCheck(kind));
environment()->RecordAfterState(node, Environment::kAttachFrameState);
}

43
src/compiler/globals.h Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_GLOBALS_H_
#define V8_COMPILER_GLOBALS_H_
#include "src/common/globals.h"
namespace v8 {
namespace internal {
namespace compiler {
enum class StackCheckKind {
kJSFunctionEntry = 0,
kJSIterationBody,
kCodeStubAssembler,
kWasm,
};
inline std::ostream& operator<<(std::ostream& os, StackCheckKind kind) {
switch (kind) {
case StackCheckKind::kJSFunctionEntry:
return os << "JSFunctionEntry";
case StackCheckKind::kJSIterationBody:
return os << "JSIterationBody";
case StackCheckKind::kCodeStubAssembler:
return os << "CodeStubAssembler";
case StackCheckKind::kWasm:
return os << "Wasm";
}
UNREACHABLE();
}
inline size_t hash_value(StackCheckKind kind) {
return static_cast<size_t>(kind);
}
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_GLOBALS_H_

View File

@ -848,6 +848,15 @@ void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
UNREACHABLE(); // Eliminated in typed lowering.
}
namespace {
StackCheckKind StackCheckKindOfJSStackCheck(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kJSStackCheck);
return OpParameter<StackCheckKind>(op);
}
} // namespace
void JSGenericLowering::LowerJSStackCheck(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
@ -858,8 +867,9 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
ExternalReference::address_of_jslimit(isolate())),
jsgraph()->IntPtrConstant(0), effect, control);
Node* check = effect =
graph()->NewNode(machine()->StackPointerGreaterThan(), limit, effect);
StackCheckKind stack_check_kind = StackCheckKindOfJSStackCheck(node->op());
Node* check = effect = graph()->NewNode(
machine()->StackPointerGreaterThan(stack_check_kind), limit, effect);
Node* branch =
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
@ -895,8 +905,17 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
}
}
// Turn the stack check into a runtime call.
ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
// Turn the stack check into a runtime call. At function entry, the runtime
// function takes an offset argument which is subtracted from the stack
// pointer prior to the stack check (i.e. the check is `sp - offset >=
// limit`).
if (stack_check_kind == StackCheckKind::kJSFunctionEntry) {
node->InsertInput(zone(), 0,
graph()->NewNode(machine()->LoadStackCheckOffset()));
ReplaceWithRuntimeCall(node, Runtime::kStackGuardWithGap);
} else {
ReplaceWithRuntimeCall(node, Runtime::kStackGuard);
}
}
void JSGenericLowering::LowerJSDebugger(Node* node) {

View File

@ -692,7 +692,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \
V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
V(StackCheck, Operator::kNoWrite, 0, 0) \
V(Debugger, Operator::kNoProperties, 0, 0) \
V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \
@ -1343,6 +1342,15 @@ const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback,
parameters); // parameter
}
const Operator* JSOperatorBuilder::StackCheck(StackCheckKind kind) {
return new (zone()) Operator1<StackCheckKind>( // --
IrOpcode::kJSStackCheck, // opcode
Operator::kNoWrite, // properties
"JSStackCheck", // name
0, 1, 1, 0, 1, 2, // counts
kind); // parameter
}
const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
return new (zone()) Operator( // --
IrOpcode::kJSCreateEmptyLiteralObject, // opcode

View File

@ -6,8 +6,8 @@
#define V8_COMPILER_JS_OPERATOR_H_
#include "src/base/compiler-specific.h"
#include "src/common/globals.h"
#include "src/compiler/feedback-source.h"
#include "src/compiler/globals.h"
#include "src/handles/maybe-handles.h"
#include "src/objects/type-hints.h"
#include "src/runtime/runtime.h"
@ -895,7 +895,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
const Operator* GeneratorRestoreRegister(int index);
const Operator* GeneratorRestoreInputOrDebugPos();
const Operator* StackCheck();
const Operator* StackCheck(StackCheckKind kind);
const Operator* Debugger();
const Operator* FulfillPromise();

View File

@ -244,6 +244,7 @@ MachineType AtomicOpType(Operator const* op) {
V(Float64ExtractHighWord32, Operator::kNoProperties, 1, 0, 1) \
V(Float64InsertLowWord32, Operator::kNoProperties, 2, 0, 1) \
V(Float64InsertHighWord32, Operator::kNoProperties, 2, 0, 1) \
V(LoadStackCheckOffset, Operator::kNoProperties, 0, 0, 1) \
V(LoadFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(LoadParentFramePointer, Operator::kNoProperties, 0, 0, 1) \
V(Int32PairAdd, Operator::kNoProperties, 4, 0, 2) \
@ -898,12 +899,25 @@ struct MachineOperatorGlobalCache {
};
UnsafePointerAddOperator kUnsafePointerAdd;
struct StackPointerGreaterThanOperator final : public Operator {
StackPointerGreaterThanOperator()
: Operator(IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
"StackPointerGreaterThan", 1, 1, 0, 1, 1, 0) {}
struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
explicit StackPointerGreaterThanOperator(StackCheckKind kind)
: Operator1<StackCheckKind>(
IrOpcode::kStackPointerGreaterThan, Operator::kEliminatable,
"StackPointerGreaterThan", 1, 1, 0, 1, 1, 0, kind) {}
};
StackPointerGreaterThanOperator kStackPointerGreaterThan;
#define STACK_POINTER_GREATER_THAN(Kind) \
struct StackPointerGreaterThan##Kind##Operator final \
: public StackPointerGreaterThanOperator { \
StackPointerGreaterThan##Kind##Operator() \
: StackPointerGreaterThanOperator(StackCheckKind::k##Kind) {} \
}; \
StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind;
STACK_POINTER_GREATER_THAN(JSFunctionEntry)
STACK_POINTER_GREATER_THAN(JSIterationBody)
STACK_POINTER_GREATER_THAN(CodeStubAssembler)
STACK_POINTER_GREATER_THAN(Wasm)
#undef STACK_POINTER_GREATER_THAN
};
struct CommentOperator : public Operator1<const char*> {
@ -1070,8 +1084,19 @@ const Operator* MachineOperatorBuilder::UnsafePointerAdd() {
return &cache_.kUnsafePointerAdd;
}
const Operator* MachineOperatorBuilder::StackPointerGreaterThan() {
return &cache_.kStackPointerGreaterThan;
const Operator* MachineOperatorBuilder::StackPointerGreaterThan(
StackCheckKind kind) {
switch (kind) {
case StackCheckKind::kJSFunctionEntry:
return &cache_.kStackPointerGreaterThanJSFunctionEntry;
case StackCheckKind::kJSIterationBody:
return &cache_.kStackPointerGreaterThanJSIterationBody;
case StackCheckKind::kCodeStubAssembler:
return &cache_.kStackPointerGreaterThanCodeStubAssembler;
case StackCheckKind::kWasm:
return &cache_.kStackPointerGreaterThanWasm;
}
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::BitcastWordToTagged() {
@ -1376,6 +1401,11 @@ const uint8_t* S8x16ShuffleOf(Operator const* op) {
return OpParameter<uint8_t*>(op);
}
StackCheckKind StackCheckKindOf(Operator const* op) {
DCHECK_EQ(IrOpcode::kStackPointerGreaterThan, op->opcode());
return OpParameter<StackCheckKind>(op);
}
#undef PURE_BINARY_OP_LIST_32
#undef PURE_BINARY_OP_LIST_64
#undef MACHINE_PURE_OP_LIST

View File

@ -9,6 +9,7 @@
#include "src/base/enum-set.h"
#include "src/base/flags.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/globals.h"
#include "src/compiler/write-barrier-kind.h"
#include "src/zone/zone.h"
@ -115,6 +116,8 @@ MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op)
V8_WARN_UNUSED_RESULT;
StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT;
// Interface for building machine-level operators. These operators are
// machine-level but machine-independent and thus define a language suitable
// for generating code to run on architectures such as ia32, x64, arm, etc.
@ -687,8 +690,14 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
// Compares: stack_pointer > value.
const Operator* StackPointerGreaterThan();
// Compares: stack_pointer [- offset] > value. The offset is optionally
// applied for kFunctionEntry stack checks.
const Operator* StackPointerGreaterThan(StackCheckKind kind);
// Loads the offset that should be applied to the current stack
// pointer before a stack check. Used as input to the
// Runtime::kStackGuardWithGap call.
const Operator* LoadStackCheckOffset();
// Memory barrier.
const Operator* MemBarrier();

View File

@ -729,6 +729,7 @@
V(TaggedPoisonOnSpeculation) \
V(Word32PoisonOnSpeculation) \
V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
V(UnalignedLoad) \

View File

@ -579,7 +579,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
}
Node* StackPointerGreaterThan(Node* value) {
return AddNode(machine()->StackPointerGreaterThan(), value);
return AddNode(
machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler),
value);
}
#define INTPTR_BINOP(prefix, name) \

View File

@ -1856,6 +1856,7 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64PoisonOnSpeculation:
case IrOpcode::kLoadStackCheckOffset:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
case IrOpcode::kUnalignedLoad:

View File

@ -329,7 +329,8 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
*effect = limit;
Node* check = graph()->NewNode(
mcgraph()->machine()->StackPointerGreaterThan(), limit, *effect);
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
limit, *effect);
*effect = check;
Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);

View File

@ -698,6 +698,10 @@ void Deoptimizer::DoComputeOutputFrames() {
}
}
StackGuard* const stack_guard = isolate()->stack_guard();
CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
stack_guard->real_jslimit());
if (trace_scope_ != nullptr) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
@ -755,6 +759,7 @@ void Deoptimizer::DoComputeOutputFrames() {
// Translate each output frame.
int frame_index = 0; // output_frame_index
size_t total_output_frame_size = 0;
for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
@ -790,6 +795,7 @@ void Deoptimizer::DoComputeOutputFrames() {
FATAL("invalid frame");
break;
}
total_output_frame_size += output_[frame_index]->GetFrameSize();
}
FrameDescription* topmost = output_[count - 1];
@ -809,6 +815,15 @@ void Deoptimizer::DoComputeOutputFrames() {
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
caller_frame_top_, ms);
}
// The following invariant is fairly tricky to guarantee, since the size of
// an optimized frame and its deoptimized counterparts usually differs. We
// thus need to consider the case in which deoptimized frames are larger than
// the optimized frame in stack checks in optimized code. We do this by
// applying an offset to stack checks (see kArchStackPointerGreaterThan in the
// code generator).
CHECK_GT(static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size,
stack_guard->real_jslimit());
}
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,

View File

@ -284,6 +284,21 @@ RUNTIME_FUNCTION(Runtime_StackGuard) {
return isolate->stack_guard()->HandleInterrupts();
}
RUNTIME_FUNCTION(Runtime_StackGuardWithGap) {
SealHandleScope shs(isolate);
DCHECK_EQ(args.length(), 1);
CONVERT_UINT32_ARG_CHECKED(gap, 0);
TRACE_EVENT0("v8.execute", "V8.StackGuard");
// First check if this is a real stack overflow.
StackLimitCheck check(isolate);
if (check.JsHasOverflowed(gap)) {
return isolate->StackOverflow();
}
return isolate->stack_guard()->HandleInterrupts();
}
RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());

View File

@ -226,6 +226,7 @@ namespace internal {
F(RunMicrotaskCallback, 2, 1) \
F(PerformMicrotaskCheckpoint, 0, 1) \
F(StackGuard, 0, 1) \
F(StackGuardWithGap, 1, 1) \
F(Throw, 1, 1) \
F(ThrowApplyNonFunction, 1, 1) \
F(ThrowCalledNonCallable, 1, 1) \

View File

@ -0,0 +1,18 @@
// Copyright 2019 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --stack-size=50
let i = 0;
function f() {
i++;
if (i > 10) {
%PrepareFunctionForOptimization(f);
%OptimizeFunctionOnNextCall(f);
}
new Promise(f);
return f.x;
}
f();

View File

@ -245,7 +245,6 @@ const PureOperator kPureOperators[] = {
PURE(Float64Equal, 2, 0, 1), // --
PURE(Float64LessThan, 2, 0, 1), // --
PURE(Float64LessThanOrEqual, 2, 0, 1), // --
PURE(StackPointerGreaterThan, 1, 0, 1), // --
PURE(Float64ExtractLowWord32, 1, 0, 1), // --
PURE(Float64ExtractHighWord32, 1, 0, 1), // --
PURE(Float64InsertLowWord32, 2, 0, 1), // --