[compiler] Optionally apply an offset to stack checks
The motivation behind this change is that the frame size of an optimized function and its unoptimized version may differ, and deoptimization may thus trigger a stack overflow. The solution implemented in this CL is to optionally apply an offset to the stack check s.t. the check becomes 'sp - offset > limit'. The offset is applied to stack checks at function-entry, and is set to the difference between the optimized and unoptimized frame size. A caveat: OSR may not be fully handled by this fix since we've already passed the function-entry stack check. A possible solution would be to *not* skip creation of function-entry stack checks for inlinees. This CL: 1. annotates stack check nodes with the stack check kind, where kind is one of {function-entry,iteration-body,unknown}. 2. potentially allocates a temporary register to store the result of the 'sp - offset' in instruction selection (and switches input registers to 'unique' mode). 3. Applies the offset in code generation. Drive-by: Add src/compiler/globals.h for compiler-specific globals. Bug: v8:9534,chromium:1000887 Change-Id: I257191c4a4978ccb60cfa5805ef421f30f0e9826 Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1762521 Commit-Queue: Jakob Gruber <jgruber@chromium.org> Reviewed-by: Georg Neis <neis@chromium.org> Cr-Commit-Position: refs/heads/master@{#63701}
This commit is contained in:
parent
3d2159462c
commit
4a16305b65
1
BUILD.gn
1
BUILD.gn
@ -1794,6 +1794,7 @@ v8_compiler_sources = [
|
||||
"src/compiler/frame.cc",
|
||||
"src/compiler/frame.h",
|
||||
"src/compiler/functional-list.h",
|
||||
"src/compiler/globals.h",
|
||||
"src/compiler/graph-assembler.cc",
|
||||
"src/compiler/graph-assembler.h",
|
||||
"src/compiler/graph-reducer.cc",
|
||||
|
@ -932,9 +932,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kArchStackPointerGreaterThan: {
|
||||
// Potentially apply an offset to the current stack pointer before the
|
||||
// comparison to consider the size difference of an optimized frame versus
|
||||
// the contained unoptimized frames.
|
||||
|
||||
Register lhs_register = sp;
|
||||
uint32_t offset;
|
||||
|
||||
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
|
||||
lhs_register = i.TempRegister(0);
|
||||
__ sub(lhs_register, sp, Operand(offset));
|
||||
}
|
||||
|
||||
constexpr size_t kValueIndex = 0;
|
||||
DCHECK(instr->InputAt(kValueIndex)->IsRegister());
|
||||
__ cmp(sp, i.InputRegister(kValueIndex));
|
||||
__ cmp(lhs_register, i.InputRegister(kValueIndex));
|
||||
break;
|
||||
}
|
||||
case kArchTruncateDoubleToI:
|
||||
|
@ -888,11 +888,31 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
|
||||
|
||||
void InstructionSelector::VisitStackPointerGreaterThan(
|
||||
Node* node, FlagsContinuation* cont) {
|
||||
Node* const value = node->InputAt(0);
|
||||
InstructionCode opcode = kArchStackPointerGreaterThan;
|
||||
StackCheckKind kind = StackCheckKindOf(node->op());
|
||||
InstructionCode opcode =
|
||||
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
|
||||
|
||||
ArmOperandGenerator g(this);
|
||||
EmitWithContinuation(opcode, g.UseRegister(value), cont);
|
||||
|
||||
// No outputs.
|
||||
InstructionOperand* const outputs = nullptr;
|
||||
const int output_count = 0;
|
||||
|
||||
// Applying an offset to this stack check requires a temp register. Offsets
|
||||
// are only applied to the first stack check. If applying an offset, we must
|
||||
// ensure the input and temp registers do not alias, thus kUniqueRegister.
|
||||
InstructionOperand temps[] = {g.TempRegister()};
|
||||
const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
|
||||
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
|
||||
? OperandGenerator::kUniqueRegister
|
||||
: OperandGenerator::kRegister;
|
||||
|
||||
Node* const value = node->InputAt(0);
|
||||
InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
|
||||
static constexpr int input_count = arraysize(inputs);
|
||||
|
||||
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
|
||||
temp_count, temps, cont);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -839,9 +839,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kArchStackPointerGreaterThan: {
|
||||
// Potentially apply an offset to the current stack pointer before the
|
||||
// comparison to consider the size difference of an optimized frame versus
|
||||
// the contained unoptimized frames.
|
||||
|
||||
Register lhs_register = sp;
|
||||
uint32_t offset;
|
||||
|
||||
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
|
||||
lhs_register = i.TempRegister(0);
|
||||
__ Sub(lhs_register, sp, offset);
|
||||
}
|
||||
|
||||
constexpr size_t kValueIndex = 0;
|
||||
DCHECK(instr->InputAt(kValueIndex)->IsRegister());
|
||||
__ Cmp(sp, i.InputRegister(kValueIndex));
|
||||
__ Cmp(lhs_register, i.InputRegister(kValueIndex));
|
||||
break;
|
||||
}
|
||||
case kArchTruncateDoubleToI:
|
||||
|
@ -1004,11 +1004,31 @@ void InstructionSelector::VisitWord64Shl(Node* node) {
|
||||
|
||||
void InstructionSelector::VisitStackPointerGreaterThan(
|
||||
Node* node, FlagsContinuation* cont) {
|
||||
Node* const value = node->InputAt(0);
|
||||
InstructionCode opcode = kArchStackPointerGreaterThan;
|
||||
StackCheckKind kind = StackCheckKindOf(node->op());
|
||||
InstructionCode opcode =
|
||||
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
|
||||
|
||||
Arm64OperandGenerator g(this);
|
||||
EmitWithContinuation(opcode, g.UseRegister(value), cont);
|
||||
|
||||
// No outputs.
|
||||
InstructionOperand* const outputs = nullptr;
|
||||
const int output_count = 0;
|
||||
|
||||
// Applying an offset to this stack check requires a temp register. Offsets
|
||||
// are only applied to the first stack check. If applying an offset, we must
|
||||
// ensure the input and temp registers do not alias, thus kUniqueRegister.
|
||||
InstructionOperand temps[] = {g.TempRegister()};
|
||||
const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
|
||||
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
|
||||
? OperandGenerator::kUniqueRegister
|
||||
: OperandGenerator::kRegister;
|
||||
|
||||
Node* const value = node->InputAt(0);
|
||||
InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
|
||||
static constexpr int input_count = arraysize(inputs);
|
||||
|
||||
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
|
||||
temp_count, temps, cont);
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include "src/codegen/optimized-compilation-info.h"
|
||||
#include "src/codegen/string-constants.h"
|
||||
#include "src/compiler/backend/code-generator-impl.h"
|
||||
#include "src/compiler/globals.h"
|
||||
#include "src/compiler/linkage.h"
|
||||
#include "src/compiler/pipeline.h"
|
||||
#include "src/compiler/wasm-compiler.h"
|
||||
@ -115,6 +116,28 @@ void CodeGenerator::CreateFrameAccessState(Frame* frame) {
|
||||
frame_access_state_ = new (zone()) FrameAccessState(frame);
|
||||
}
|
||||
|
||||
bool CodeGenerator::ShouldApplyOffsetToStackCheck(Instruction* instr,
|
||||
uint32_t* offset) {
|
||||
DCHECK_EQ(ArchOpcodeField::decode(instr->opcode()),
|
||||
kArchStackPointerGreaterThan);
|
||||
|
||||
StackCheckKind kind =
|
||||
static_cast<StackCheckKind>(MiscField::decode(instr->opcode()));
|
||||
if (kind != StackCheckKind::kJSFunctionEntry) return false;
|
||||
|
||||
int32_t optimized_frame_height =
|
||||
frame()->GetTotalFrameSlotCount() * kSystemPointerSize;
|
||||
DCHECK(is_int32(max_unoptimized_frame_height_));
|
||||
int32_t signed_max_unoptimized_frame_height =
|
||||
static_cast<int32_t>(max_unoptimized_frame_height_);
|
||||
|
||||
int32_t signed_offset =
|
||||
std::max(signed_max_unoptimized_frame_height - optimized_frame_height, 0);
|
||||
*offset = static_cast<uint32_t>(signed_offset);
|
||||
|
||||
return (signed_offset > 0 && frame_access_state()->has_frame());
|
||||
}
|
||||
|
||||
CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
|
||||
DeoptimizationExit* exit) {
|
||||
int deoptimization_id = exit->deoptimization_id();
|
||||
|
@ -169,6 +169,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
|
||||
|
||||
static constexpr int kBinarySearchSwitchMinimalCases = 4;
|
||||
|
||||
// Returns true if an offset should be applied to the given stack check. This
|
||||
// is the case for stack checks on function-entry when the offset is non-zero,
|
||||
// where the offset is the difference between the size of optimized and
|
||||
// corresponding deoptimized frames.
|
||||
bool ShouldApplyOffsetToStackCheck(Instruction* instr, uint32_t* offset);
|
||||
|
||||
private:
|
||||
GapResolver* resolver() { return &resolver_; }
|
||||
SafepointTableBuilder* safepoints() { return &safepoints_; }
|
||||
|
@ -940,11 +940,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kArchStackPointerGreaterThan: {
|
||||
// Potentially apply an offset to the current stack pointer before the
|
||||
// comparison to consider the size difference of an optimized frame versus
|
||||
// the contained unoptimized frames.
|
||||
Register lhs_register = esp;
|
||||
uint32_t offset;
|
||||
|
||||
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
|
||||
lhs_register = i.TempRegister(0);
|
||||
__ lea(lhs_register, Operand(esp, -1 * static_cast<int32_t>(offset)));
|
||||
}
|
||||
|
||||
constexpr size_t kValueIndex = 0;
|
||||
if (HasAddressingMode(instr)) {
|
||||
__ cmp(esp, i.MemoryOperand(kValueIndex));
|
||||
__ cmp(lhs_register, i.MemoryOperand(kValueIndex));
|
||||
} else {
|
||||
__ cmp(esp, i.InputRegister(kValueIndex));
|
||||
__ cmp(lhs_register, i.InputRegister(kValueIndex));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -86,11 +86,10 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
}
|
||||
}
|
||||
|
||||
AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
|
||||
Node* displacement_node,
|
||||
DisplacementMode displacement_mode,
|
||||
InstructionOperand inputs[],
|
||||
size_t* input_count) {
|
||||
AddressingMode GenerateMemoryOperandInputs(
|
||||
Node* index, int scale, Node* base, Node* displacement_node,
|
||||
DisplacementMode displacement_mode, InstructionOperand inputs[],
|
||||
size_t* input_count, RegisterMode register_mode = kRegister) {
|
||||
AddressingMode mode = kMode_MRI;
|
||||
int32_t displacement = (displacement_node == nullptr)
|
||||
? 0
|
||||
@ -105,10 +104,10 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
}
|
||||
}
|
||||
if (base != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(base);
|
||||
inputs[(*input_count)++] = UseRegisterWithMode(base, register_mode);
|
||||
if (index != nullptr) {
|
||||
DCHECK(scale >= 0 && scale <= 3);
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
inputs[(*input_count)++] = UseRegisterWithMode(index, register_mode);
|
||||
if (displacement != 0) {
|
||||
inputs[(*input_count)++] = TempImmediate(displacement);
|
||||
static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
|
||||
@ -130,7 +129,7 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
} else {
|
||||
DCHECK(scale >= 0 && scale <= 3);
|
||||
if (index != nullptr) {
|
||||
inputs[(*input_count)++] = UseRegister(index);
|
||||
inputs[(*input_count)++] = UseRegisterWithMode(index, register_mode);
|
||||
if (displacement != 0) {
|
||||
inputs[(*input_count)++] = TempImmediate(displacement);
|
||||
static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
|
||||
@ -149,9 +148,9 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
return mode;
|
||||
}
|
||||
|
||||
AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
|
||||
InstructionOperand inputs[],
|
||||
size_t* input_count) {
|
||||
AddressingMode GetEffectiveAddressMemoryOperand(
|
||||
Node* node, InstructionOperand inputs[], size_t* input_count,
|
||||
RegisterMode register_mode = kRegister) {
|
||||
{
|
||||
LoadMatcher<ExternalReferenceMatcher> m(node);
|
||||
if (m.index().HasValue() && m.object().HasValue() &&
|
||||
@ -172,10 +171,12 @@ class IA32OperandGenerator final : public OperandGenerator {
|
||||
if ((m.displacement() == nullptr || CanBeImmediate(m.displacement()))) {
|
||||
return GenerateMemoryOperandInputs(
|
||||
m.index(), m.scale(), m.base(), m.displacement(),
|
||||
m.displacement_mode(), inputs, input_count);
|
||||
m.displacement_mode(), inputs, input_count, register_mode);
|
||||
} else {
|
||||
inputs[(*input_count)++] = UseRegister(node->InputAt(0));
|
||||
inputs[(*input_count)++] = UseRegister(node->InputAt(1));
|
||||
inputs[(*input_count)++] =
|
||||
UseRegisterWithMode(node->InputAt(0), register_mode);
|
||||
inputs[(*input_count)++] =
|
||||
UseRegisterWithMode(node->InputAt(1), register_mode);
|
||||
return kMode_MR1;
|
||||
}
|
||||
}
|
||||
@ -559,14 +560,30 @@ void InstructionSelector::VisitWord32Xor(Node* node) {
|
||||
|
||||
void InstructionSelector::VisitStackPointerGreaterThan(
|
||||
Node* node, FlagsContinuation* cont) {
|
||||
Node* const value = node->InputAt(0);
|
||||
InstructionCode opcode = kArchStackPointerGreaterThan;
|
||||
StackCheckKind kind = StackCheckKindOf(node->op());
|
||||
InstructionCode opcode =
|
||||
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
|
||||
|
||||
DCHECK(cont->IsBranch());
|
||||
const int effect_level =
|
||||
GetEffectLevel(cont->true_block()->PredecessorAt(0)->control_input());
|
||||
|
||||
IA32OperandGenerator g(this);
|
||||
|
||||
// No outputs.
|
||||
InstructionOperand* const outputs = nullptr;
|
||||
const int output_count = 0;
|
||||
|
||||
// Applying an offset to this stack check requires a temp register. Offsets
|
||||
// are only applied to the first stack check. If applying an offset, we must
|
||||
// ensure the input and temp registers do not alias, thus kUniqueRegister.
|
||||
InstructionOperand temps[] = {g.TempRegister()};
|
||||
const int temp_count = (kind == StackCheckKind::kJSFunctionEntry) ? 1 : 0;
|
||||
const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
|
||||
? OperandGenerator::kUniqueRegister
|
||||
: OperandGenerator::kRegister;
|
||||
|
||||
Node* const value = node->InputAt(0);
|
||||
if (g.CanBeMemoryOperand(kIA32Cmp, node, value, effect_level)) {
|
||||
DCHECK_EQ(IrOpcode::kLoad, value->opcode());
|
||||
|
||||
@ -575,14 +592,18 @@ void InstructionSelector::VisitStackPointerGreaterThan(
|
||||
|
||||
size_t input_count = 0;
|
||||
InstructionOperand inputs[kMaxInputCount];
|
||||
AddressingMode addressing_mode =
|
||||
g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
|
||||
AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
|
||||
value, inputs, &input_count, register_mode);
|
||||
opcode |= AddressingModeField::encode(addressing_mode);
|
||||
DCHECK_LE(input_count, kMaxInputCount);
|
||||
|
||||
EmitWithContinuation(opcode, 0, nullptr, input_count, inputs, cont);
|
||||
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
|
||||
temp_count, temps, cont);
|
||||
} else {
|
||||
EmitWithContinuation(opcode, g.UseRegister(value), cont);
|
||||
InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
|
||||
static constexpr int input_count = arraysize(inputs);
|
||||
EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
|
||||
temp_count, temps, cont);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -241,6 +241,19 @@ class OperandGenerator {
|
||||
UnallocatedOperand::USED_AT_START, vreg);
|
||||
}
|
||||
|
||||
// The kind of register generated for memory operands. kRegister is alive
|
||||
// until the start of the operation, kUniqueRegister until the end.
|
||||
enum RegisterMode {
|
||||
kRegister,
|
||||
kUniqueRegister,
|
||||
};
|
||||
|
||||
InstructionOperand UseRegisterWithMode(Node* node,
|
||||
RegisterMode register_mode) {
|
||||
return register_mode == kRegister ? UseRegister(node)
|
||||
: UseUniqueRegister(node);
|
||||
}
|
||||
|
||||
InstructionOperand TempDoubleRegister() {
|
||||
UnallocatedOperand op = UnallocatedOperand(
|
||||
UnallocatedOperand::MUST_HAVE_REGISTER,
|
||||
|
@ -42,6 +42,7 @@ InstructionSelector::InstructionSelector(
|
||||
instructions_(zone),
|
||||
continuation_inputs_(sequence->zone()),
|
||||
continuation_outputs_(sequence->zone()),
|
||||
continuation_temps_(sequence->zone()),
|
||||
defined_(node_count, false, zone),
|
||||
used_(node_count, false, zone),
|
||||
effect_level_(node_count, 0, zone),
|
||||
@ -723,6 +724,14 @@ Instruction* InstructionSelector::EmitWithContinuation(
|
||||
Instruction* InstructionSelector::EmitWithContinuation(
|
||||
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
|
||||
size_t input_count, InstructionOperand* inputs, FlagsContinuation* cont) {
|
||||
return EmitWithContinuation(opcode, output_count, outputs, input_count,
|
||||
inputs, 0, nullptr, cont);
|
||||
}
|
||||
|
||||
Instruction* InstructionSelector::EmitWithContinuation(
|
||||
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
|
||||
size_t input_count, InstructionOperand* inputs, size_t temp_count,
|
||||
InstructionOperand* temps, FlagsContinuation* cont) {
|
||||
OperandGenerator g(this);
|
||||
|
||||
opcode = cont->Encode(opcode);
|
||||
@ -737,6 +746,11 @@ Instruction* InstructionSelector::EmitWithContinuation(
|
||||
continuation_outputs_.push_back(outputs[i]);
|
||||
}
|
||||
|
||||
continuation_temps_.resize(0);
|
||||
for (size_t i = 0; i < temp_count; i++) {
|
||||
continuation_temps_.push_back(temps[i]);
|
||||
}
|
||||
|
||||
if (cont->IsBranch()) {
|
||||
continuation_inputs_.push_back(g.Label(cont->true_block()));
|
||||
continuation_inputs_.push_back(g.Label(cont->false_block()));
|
||||
@ -760,8 +774,10 @@ Instruction* InstructionSelector::EmitWithContinuation(
|
||||
size_t const emit_outputs_size = continuation_outputs_.size();
|
||||
auto* emit_outputs =
|
||||
emit_outputs_size ? &continuation_outputs_.front() : nullptr;
|
||||
size_t const emit_temps_size = continuation_temps_.size();
|
||||
auto* emit_temps = emit_temps_size ? &continuation_temps_.front() : nullptr;
|
||||
return Emit(opcode, emit_outputs_size, emit_outputs, emit_inputs_size,
|
||||
emit_inputs, 0, nullptr);
|
||||
emit_inputs, emit_temps_size, emit_temps);
|
||||
}
|
||||
|
||||
void InstructionSelector::AppendDeoptimizeArguments(
|
||||
|
@ -346,6 +346,10 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
|
||||
size_t input_count,
|
||||
InstructionOperand* inputs,
|
||||
FlagsContinuation* cont);
|
||||
Instruction* EmitWithContinuation(
|
||||
InstructionCode opcode, size_t output_count, InstructionOperand* outputs,
|
||||
size_t input_count, InstructionOperand* inputs, size_t temp_count,
|
||||
InstructionOperand* temps, FlagsContinuation* cont);
|
||||
|
||||
// ===========================================================================
|
||||
// ===== Architecture-independent deoptimization exit emission methods. ======
|
||||
@ -769,6 +773,7 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
|
||||
ZoneVector<Instruction*> instructions_;
|
||||
InstructionOperandVector continuation_inputs_;
|
||||
InstructionOperandVector continuation_outputs_;
|
||||
InstructionOperandVector continuation_temps_;
|
||||
BoolVector defined_;
|
||||
BoolVector used_;
|
||||
IntVector effect_level_;
|
||||
|
@ -1023,11 +1023,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
|
||||
}
|
||||
break;
|
||||
case kArchStackPointerGreaterThan: {
|
||||
// Potentially apply an offset to the current stack pointer before the
|
||||
// comparison to consider the size difference of an optimized frame versus
|
||||
// the contained unoptimized frames.
|
||||
|
||||
Register lhs_register = rsp;
|
||||
uint32_t offset;
|
||||
|
||||
if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
|
||||
lhs_register = kScratchRegister;
|
||||
__ leaq(lhs_register, Operand(rsp, static_cast<int32_t>(offset) * -1));
|
||||
}
|
||||
|
||||
constexpr size_t kValueIndex = 0;
|
||||
if (HasAddressingMode(instr)) {
|
||||
__ cmpq(rsp, i.MemoryOperand(kValueIndex));
|
||||
__ cmpq(lhs_register, i.MemoryOperand(kValueIndex));
|
||||
} else {
|
||||
__ cmpq(rsp, i.InputRegister(kValueIndex));
|
||||
__ cmpq(lhs_register, i.InputRegister(kValueIndex));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
@ -532,14 +532,16 @@ void InstructionSelector::VisitWord64Xor(Node* node) {
|
||||
|
||||
void InstructionSelector::VisitStackPointerGreaterThan(
|
||||
Node* node, FlagsContinuation* cont) {
|
||||
Node* const value = node->InputAt(0);
|
||||
InstructionCode opcode = kArchStackPointerGreaterThan;
|
||||
StackCheckKind kind = StackCheckKindOf(node->op());
|
||||
InstructionCode opcode =
|
||||
kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
|
||||
|
||||
DCHECK(cont->IsBranch());
|
||||
const int effect_level =
|
||||
GetEffectLevel(cont->true_block()->PredecessorAt(0)->control_input());
|
||||
|
||||
X64OperandGenerator g(this);
|
||||
Node* const value = node->InputAt(0);
|
||||
if (g.CanBeMemoryOperand(kX64Cmp, node, value, effect_level)) {
|
||||
DCHECK_EQ(IrOpcode::kLoad, value->opcode());
|
||||
|
||||
|
@ -347,8 +347,9 @@ class BytecodeGraphBuilder {
|
||||
void set_currently_peeled_loop_offset(int offset) {
|
||||
currently_peeled_loop_offset_ = offset;
|
||||
}
|
||||
bool skip_next_stack_check() const { return skip_next_stack_check_; }
|
||||
void unset_skip_next_stack_check() { skip_next_stack_check_ = false; }
|
||||
bool skip_first_stack_check() const { return skip_first_stack_check_; }
|
||||
bool visited_first_stack_check() const { return visited_first_stack_check_; }
|
||||
void set_visited_first_stack_check() { visited_first_stack_check_ = true; }
|
||||
int current_exception_handler() const { return current_exception_handler_; }
|
||||
void set_current_exception_handler(int index) {
|
||||
current_exception_handler_ = index;
|
||||
@ -381,7 +382,9 @@ class BytecodeGraphBuilder {
|
||||
Environment* environment_;
|
||||
bool const osr_;
|
||||
int currently_peeled_loop_offset_;
|
||||
bool skip_next_stack_check_;
|
||||
|
||||
const bool skip_first_stack_check_;
|
||||
bool visited_first_stack_check_ = false;
|
||||
|
||||
// Merge environments are snapshots of the environment at points where the
|
||||
// control flow merges. This models a forward data flow propagation of all
|
||||
@ -957,8 +960,8 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
|
||||
environment_(nullptr),
|
||||
osr_(!osr_offset.IsNone()),
|
||||
currently_peeled_loop_offset_(-1),
|
||||
skip_next_stack_check_(flags &
|
||||
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
|
||||
skip_first_stack_check_(flags &
|
||||
BytecodeGraphBuilderFlag::kSkipFirstStackCheck),
|
||||
merge_environments_(local_zone),
|
||||
generator_merge_environments_(local_zone),
|
||||
exception_handlers_(local_zone),
|
||||
@ -1269,12 +1272,6 @@ void BytecodeGraphBuilder::VisitSingleBytecode() {
|
||||
|
||||
if (environment() != nullptr) {
|
||||
BuildLoopHeaderEnvironment(current_offset);
|
||||
if (skip_next_stack_check() && bytecode_iterator().current_bytecode() ==
|
||||
interpreter::Bytecode::kStackCheck) {
|
||||
unset_skip_next_stack_check();
|
||||
return;
|
||||
}
|
||||
|
||||
switch (bytecode_iterator().current_bytecode()) {
|
||||
#define BYTECODE_CASE(name, ...) \
|
||||
case interpreter::Bytecode::k##name: \
|
||||
@ -3177,8 +3174,20 @@ void BytecodeGraphBuilder::VisitSwitchOnSmiNoFeedback() {
|
||||
}
|
||||
|
||||
void BytecodeGraphBuilder::VisitStackCheck() {
|
||||
// Note: The stack check kind is determined heuristically: we simply assume
|
||||
// that the first seen stack check is at function-entry, and all other stack
|
||||
// checks are at iteration-body. An alternative precise solution would be to
|
||||
// parameterize the StackCheck bytecode; but this has the caveat of increased
|
||||
// code size.
|
||||
StackCheckKind kind = StackCheckKind::kJSIterationBody;
|
||||
if (!visited_first_stack_check()) {
|
||||
set_visited_first_stack_check();
|
||||
kind = StackCheckKind::kJSFunctionEntry;
|
||||
if (skip_first_stack_check()) return;
|
||||
}
|
||||
|
||||
PrepareEagerCheckpoint();
|
||||
Node* node = NewNode(javascript()->StackCheck());
|
||||
Node* node = NewNode(javascript()->StackCheck(kind));
|
||||
environment()->RecordAfterState(node, Environment::kAttachFrameState);
|
||||
}
|
||||
|
||||
|
43
src/compiler/globals.h
Normal file
43
src/compiler/globals.h
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2019 the V8 project authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef V8_COMPILER_GLOBALS_H_
|
||||
#define V8_COMPILER_GLOBALS_H_
|
||||
|
||||
#include "src/common/globals.h"
|
||||
|
||||
namespace v8 {
|
||||
namespace internal {
|
||||
namespace compiler {
|
||||
|
||||
enum class StackCheckKind {
|
||||
kJSFunctionEntry = 0,
|
||||
kJSIterationBody,
|
||||
kCodeStubAssembler,
|
||||
kWasm,
|
||||
};
|
||||
|
||||
inline std::ostream& operator<<(std::ostream& os, StackCheckKind kind) {
|
||||
switch (kind) {
|
||||
case StackCheckKind::kJSFunctionEntry:
|
||||
return os << "JSFunctionEntry";
|
||||
case StackCheckKind::kJSIterationBody:
|
||||
return os << "JSIterationBody";
|
||||
case StackCheckKind::kCodeStubAssembler:
|
||||
return os << "CodeStubAssembler";
|
||||
case StackCheckKind::kWasm:
|
||||
return os << "Wasm";
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
inline size_t hash_value(StackCheckKind kind) {
|
||||
return static_cast<size_t>(kind);
|
||||
}
|
||||
|
||||
} // namespace compiler
|
||||
} // namespace internal
|
||||
} // namespace v8
|
||||
|
||||
#endif // V8_COMPILER_GLOBALS_H_
|
@ -840,6 +840,15 @@ void JSGenericLowering::LowerJSGeneratorRestoreRegister(Node* node) {
|
||||
UNREACHABLE(); // Eliminated in typed lowering.
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
StackCheckKind StackCheckKindOfJSStackCheck(const Operator* op) {
|
||||
DCHECK(op->opcode() == IrOpcode::kJSStackCheck);
|
||||
return OpParameter<StackCheckKind>(op);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void JSGenericLowering::LowerJSStackCheck(Node* node) {
|
||||
Node* effect = NodeProperties::GetEffectInput(node);
|
||||
Node* control = NodeProperties::GetControlInput(node);
|
||||
@ -850,7 +859,9 @@ void JSGenericLowering::LowerJSStackCheck(Node* node) {
|
||||
ExternalReference::address_of_jslimit(isolate())),
|
||||
jsgraph()->IntPtrConstant(0), effect, control);
|
||||
|
||||
Node* check = graph()->NewNode(machine()->StackPointerGreaterThan(), limit);
|
||||
StackCheckKind stack_check_kind = StackCheckKindOfJSStackCheck(node->op());
|
||||
Node* check = graph()->NewNode(
|
||||
machine()->StackPointerGreaterThan(stack_check_kind), limit);
|
||||
Node* branch =
|
||||
graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
|
||||
|
||||
|
@ -663,7 +663,6 @@ CompareOperationHint CompareOperationHintOf(const Operator* op) {
|
||||
V(GeneratorRestoreContinuation, Operator::kNoThrow, 1, 1) \
|
||||
V(GeneratorRestoreContext, Operator::kNoThrow, 1, 1) \
|
||||
V(GeneratorRestoreInputOrDebugPos, Operator::kNoThrow, 1, 1) \
|
||||
V(StackCheck, Operator::kNoWrite, 0, 0) \
|
||||
V(Debugger, Operator::kNoProperties, 0, 0) \
|
||||
V(FulfillPromise, Operator::kNoDeopt | Operator::kNoThrow, 2, 1) \
|
||||
V(PerformPromiseThen, Operator::kNoDeopt | Operator::kNoThrow, 4, 1) \
|
||||
@ -1293,6 +1292,15 @@ const Operator* JSOperatorBuilder::CloneObject(FeedbackSource const& feedback,
|
||||
parameters); // parameter
|
||||
}
|
||||
|
||||
const Operator* JSOperatorBuilder::StackCheck(StackCheckKind kind) {
|
||||
return new (zone()) Operator1<StackCheckKind>( // --
|
||||
IrOpcode::kJSStackCheck, // opcode
|
||||
Operator::kNoWrite, // properties
|
||||
"JSStackCheck", // name
|
||||
0, 1, 1, 0, 1, 2, // counts
|
||||
kind); // parameter
|
||||
}
|
||||
|
||||
const Operator* JSOperatorBuilder::CreateEmptyLiteralObject() {
|
||||
return new (zone()) Operator( // --
|
||||
IrOpcode::kJSCreateEmptyLiteralObject, // opcode
|
||||
|
@ -6,8 +6,8 @@
|
||||
#define V8_COMPILER_JS_OPERATOR_H_
|
||||
|
||||
#include "src/base/compiler-specific.h"
|
||||
#include "src/common/globals.h"
|
||||
#include "src/compiler/feedback-source.h"
|
||||
#include "src/compiler/globals.h"
|
||||
#include "src/handles/maybe-handles.h"
|
||||
#include "src/objects/type-hints.h"
|
||||
#include "src/runtime/runtime.h"
|
||||
@ -862,7 +862,7 @@ class V8_EXPORT_PRIVATE JSOperatorBuilder final
|
||||
const Operator* GeneratorRestoreRegister(int index);
|
||||
const Operator* GeneratorRestoreInputOrDebugPos();
|
||||
|
||||
const Operator* StackCheck();
|
||||
const Operator* StackCheck(StackCheckKind kind);
|
||||
const Operator* Debugger();
|
||||
|
||||
const Operator* FulfillPromise();
|
||||
|
@ -396,8 +396,7 @@ MachineType AtomicOpType(Operator const* op) {
|
||||
V(S1x8AnyTrue, Operator::kNoProperties, 1, 0, 1) \
|
||||
V(S1x8AllTrue, Operator::kNoProperties, 1, 0, 1) \
|
||||
V(S1x16AnyTrue, Operator::kNoProperties, 1, 0, 1) \
|
||||
V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1) \
|
||||
V(StackPointerGreaterThan, Operator::kNoProperties, 1, 0, 1)
|
||||
V(S1x16AllTrue, Operator::kNoProperties, 1, 0, 1)
|
||||
|
||||
// The format is:
|
||||
// V(Name, properties, value_input_count, control_input_count, output_count)
|
||||
@ -515,6 +514,13 @@ struct StackSlotOperator : public Operator1<StackSlotRepresentation> {
|
||||
StackSlotRepresentation(size, alignment)) {}
|
||||
};
|
||||
|
||||
struct StackPointerGreaterThanOperator : public Operator1<StackCheckKind> {
|
||||
explicit StackPointerGreaterThanOperator(StackCheckKind kind)
|
||||
: Operator1<StackCheckKind>(IrOpcode::kStackPointerGreaterThan,
|
||||
Operator::kPure, "StackPointerGreaterThan", 1,
|
||||
0, 0, 1, 0, 0, kind) {}
|
||||
};
|
||||
|
||||
struct MachineOperatorGlobalCache {
|
||||
#define PURE(Name, properties, value_input_count, control_input_count, \
|
||||
output_count) \
|
||||
@ -890,6 +896,20 @@ struct MachineOperatorGlobalCache {
|
||||
"UnsafePointerAdd", 2, 1, 1, 1, 1, 0) {}
|
||||
};
|
||||
UnsafePointerAddOperator kUnsafePointerAdd;
|
||||
|
||||
#define STACK_POINTER_GREATER_THAN(Kind) \
|
||||
struct StackPointerGreaterThan##Kind##Operator final \
|
||||
: public StackPointerGreaterThanOperator { \
|
||||
StackPointerGreaterThan##Kind##Operator() \
|
||||
: StackPointerGreaterThanOperator(StackCheckKind::k##Kind) {} \
|
||||
}; \
|
||||
StackPointerGreaterThan##Kind##Operator kStackPointerGreaterThan##Kind;
|
||||
|
||||
STACK_POINTER_GREATER_THAN(JSFunctionEntry)
|
||||
STACK_POINTER_GREATER_THAN(JSIterationBody)
|
||||
STACK_POINTER_GREATER_THAN(CodeStubAssembler)
|
||||
STACK_POINTER_GREATER_THAN(Wasm)
|
||||
#undef STACK_POINTER_GREATER_THAN
|
||||
};
|
||||
|
||||
struct CommentOperator : public Operator1<const char*> {
|
||||
@ -1080,6 +1100,21 @@ const Operator* MachineOperatorBuilder::Comment(const char* msg) {
|
||||
return new (zone_) CommentOperator(msg);
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::StackPointerGreaterThan(
|
||||
StackCheckKind kind) {
|
||||
switch (kind) {
|
||||
case StackCheckKind::kJSFunctionEntry:
|
||||
return &cache_.kStackPointerGreaterThanJSFunctionEntry;
|
||||
case StackCheckKind::kJSIterationBody:
|
||||
return &cache_.kStackPointerGreaterThanJSIterationBody;
|
||||
case StackCheckKind::kCodeStubAssembler:
|
||||
return &cache_.kStackPointerGreaterThanCodeStubAssembler;
|
||||
case StackCheckKind::kWasm:
|
||||
return &cache_.kStackPointerGreaterThanWasm;
|
||||
}
|
||||
UNREACHABLE();
|
||||
}
|
||||
|
||||
const Operator* MachineOperatorBuilder::MemBarrier() {
|
||||
return &cache_.kMemoryBarrier;
|
||||
}
|
||||
@ -1350,6 +1385,11 @@ const uint8_t* S8x16ShuffleOf(Operator const* op) {
|
||||
return OpParameter<uint8_t*>(op);
|
||||
}
|
||||
|
||||
StackCheckKind StackCheckKindOf(Operator const* op) {
|
||||
DCHECK_EQ(IrOpcode::kStackPointerGreaterThan, op->opcode());
|
||||
return OpParameter<StackCheckKind>(op);
|
||||
}
|
||||
|
||||
#undef PURE_BINARY_OP_LIST_32
|
||||
#undef PURE_BINARY_OP_LIST_64
|
||||
#undef MACHINE_PURE_OP_LIST
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "src/base/enum-set.h"
|
||||
#include "src/base/flags.h"
|
||||
#include "src/codegen/machine-type.h"
|
||||
#include "src/compiler/globals.h"
|
||||
#include "src/compiler/write-barrier-kind.h"
|
||||
#include "src/zone/zone.h"
|
||||
|
||||
@ -115,6 +116,8 @@ MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
|
||||
V8_EXPORT_PRIVATE const uint8_t* S8x16ShuffleOf(Operator const* op)
|
||||
V8_WARN_UNUSED_RESULT;
|
||||
|
||||
StackCheckKind StackCheckKindOf(Operator const* op) V8_WARN_UNUSED_RESULT;
|
||||
|
||||
// Interface for building machine-level operators. These operators are
|
||||
// machine-level but machine-independent and thus define a language suitable
|
||||
// for generating code to run on architectures such as ia32, x64, arm, etc.
|
||||
@ -677,8 +680,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
|
||||
const Operator* LoadFramePointer();
|
||||
const Operator* LoadParentFramePointer();
|
||||
|
||||
// Compares: stack_pointer > value.
|
||||
const Operator* StackPointerGreaterThan();
|
||||
// Compares: stack_pointer [- offset] > value. The offset is optionally
|
||||
// applied for kFunctionEntry stack checks.
|
||||
const Operator* StackPointerGreaterThan(StackCheckKind kind);
|
||||
|
||||
// Memory barrier.
|
||||
const Operator* MemBarrier();
|
||||
|
@ -579,7 +579,9 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
|
||||
return AddNode(machine()->Word32PairSar(), low_word, high_word, shift);
|
||||
}
|
||||
Node* StackPointerGreaterThan(Node* value) {
|
||||
return AddNode(machine()->StackPointerGreaterThan(), value);
|
||||
return AddNode(
|
||||
machine()->StackPointerGreaterThan(StackCheckKind::kCodeStubAssembler),
|
||||
value);
|
||||
}
|
||||
|
||||
#define INTPTR_BINOP(prefix, name) \
|
||||
|
@ -335,8 +335,9 @@ void WasmGraphBuilder::StackCheck(wasm::WasmCodePosition position,
|
||||
mcgraph()->IntPtrConstant(0), limit_address, *control);
|
||||
*effect = limit;
|
||||
|
||||
Node* check =
|
||||
graph()->NewNode(mcgraph()->machine()->StackPointerGreaterThan(), limit);
|
||||
Node* check = graph()->NewNode(
|
||||
mcgraph()->machine()->StackPointerGreaterThan(StackCheckKind::kWasm),
|
||||
limit);
|
||||
|
||||
Diamond stack_check(graph(), mcgraph()->common(), check, BranchHint::kTrue);
|
||||
stack_check.Chain(*control);
|
||||
|
@ -687,6 +687,10 @@ void Deoptimizer::DoComputeOutputFrames() {
|
||||
}
|
||||
}
|
||||
|
||||
StackGuard* const stack_guard = isolate()->stack_guard();
|
||||
CHECK_GT(static_cast<uintptr_t>(caller_frame_top_),
|
||||
stack_guard->real_jslimit());
|
||||
|
||||
if (trace_scope_ != nullptr) {
|
||||
timer.Start();
|
||||
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
|
||||
@ -744,6 +748,7 @@ void Deoptimizer::DoComputeOutputFrames() {
|
||||
|
||||
// Translate each output frame.
|
||||
int frame_index = 0; // output_frame_index
|
||||
size_t total_output_frame_size = 0;
|
||||
for (size_t i = 0; i < count; ++i, ++frame_index) {
|
||||
// Read the ast node id, function, and frame height for this output frame.
|
||||
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
|
||||
@ -779,6 +784,7 @@ void Deoptimizer::DoComputeOutputFrames() {
|
||||
FATAL("invalid frame");
|
||||
break;
|
||||
}
|
||||
total_output_frame_size += output_[frame_index]->GetFrameSize();
|
||||
}
|
||||
|
||||
FrameDescription* topmost = output_[count - 1];
|
||||
@ -798,6 +804,15 @@ void Deoptimizer::DoComputeOutputFrames() {
|
||||
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
|
||||
caller_frame_top_, ms);
|
||||
}
|
||||
|
||||
// The following invariant is fairly tricky to guarantee, since the size of
|
||||
// an optimized frame and its deoptimized counterparts usually differs. We
|
||||
// thus need to consider the case in which deoptimized frames are larger than
|
||||
// the optimized frame in stack checks in optimized code. We do this by
|
||||
// applying an offset to stack checks (see kArchStackPointerGreaterThan in the
|
||||
// code generator).
|
||||
CHECK_GT(static_cast<uintptr_t>(caller_frame_top_) - total_output_frame_size,
|
||||
stack_guard->real_jslimit());
|
||||
}
|
||||
|
||||
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
|
||||
|
@ -245,7 +245,6 @@ const PureOperator kPureOperators[] = {
|
||||
PURE(Float64Equal, 2, 0, 1), // --
|
||||
PURE(Float64LessThan, 2, 0, 1), // --
|
||||
PURE(Float64LessThanOrEqual, 2, 0, 1), // --
|
||||
PURE(StackPointerGreaterThan, 1, 0, 1), // --
|
||||
PURE(Float64ExtractLowWord32, 1, 0, 1), // --
|
||||
PURE(Float64ExtractHighWord32, 1, 0, 1), // --
|
||||
PURE(Float64InsertLowWord32, 2, 0, 1), // --
|
||||
|
Loading…
Reference in New Issue
Block a user