[maglev] Don't spill constants but load them on-demand

This avoids unnecessary spill moves and reduces register pressure.

Bug: v8:7700
Change-Id: I3f2c35f2b6c0a3e64408b40d59696d924af8a9b4
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3647365
Reviewed-by: Leszek Swirski <leszeks@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/main@{#80527}
This commit is contained in:
Toon Verwaest 2022-05-13 14:22:35 +02:00 committed by V8 LUCI CQ
parent 59518b083a
commit 30efa3150b
6 changed files with 273 additions and 111 deletions

View File

@ -8,6 +8,7 @@
#include "src/codegen/code-desc.h"
#include "src/codegen/register.h"
#include "src/codegen/safepoint-table.h"
#include "src/codegen/x64/register-x64.h"
#include "src/deoptimizer/translation-array.h"
#include "src/execution/frame-constants.h"
#include "src/interpreter/bytecode-register.h"
@ -41,8 +42,7 @@ std::array<T, N> repeat(T value) {
}
using RegisterMoves = std::array<Register, Register::kNumRegisters>;
using StackToRegisterMoves =
std::array<compiler::InstructionOperand, Register::kNumRegisters>;
using RegisterReloads = std::array<ValueNode*, Register::kNumRegisters>;
class MaglevCodeGeneratingNodeProcessor {
public:
@ -189,22 +189,15 @@ class MaglevCodeGeneratingNodeProcessor {
RecursivelyEmitParallelMoveChain(source, source, target, moves);
}
void EmitStackToRegisterGapMove(compiler::InstructionOperand source,
Register target) {
if (!source.IsAllocated()) return;
__ movq(target, code_gen_state_->GetStackSlot(
compiler::AllocatedOperand::cast(source)));
void EmitRegisterReload(ValueNode* node, Register target) {
if (node == nullptr) return;
node->LoadToRegister(code_gen_state_, target);
}
void RecordGapMove(compiler::AllocatedOperand source, Register target_reg,
RegisterMoves& register_moves,
StackToRegisterMoves& stack_to_register_moves) {
if (source.IsStackSlot()) {
// For stack->reg moves, don't emit the move yet, but instead record the
// move in the set of stack-to-register moves, to be executed after the
// reg->reg parallel moves.
stack_to_register_moves[target_reg.code()] = source;
} else {
void RecordGapMove(ValueNode* node, compiler::InstructionOperand source,
Register target_reg, RegisterMoves& register_moves,
RegisterReloads& register_reloads) {
if (source.IsAnyRegister()) {
// For reg->reg moves, don't emit the move yet, but instead record the
// move in the set of parallel register moves, to be resolved later.
Register source_reg = ToRegister(source);
@ -212,26 +205,31 @@ class MaglevCodeGeneratingNodeProcessor {
DCHECK(!register_moves[source_reg.code()].is_valid());
register_moves[source_reg.code()] = target_reg;
}
} else {
// For register loads from memory, don't emit the move yet, but instead
// record the move in the set of register reloads, to be executed after
// the reg->reg parallel moves.
register_reloads[target_reg.code()] = node;
}
}
void RecordGapMove(compiler::AllocatedOperand source,
void RecordGapMove(ValueNode* node, compiler::InstructionOperand source,
compiler::AllocatedOperand target,
RegisterMoves& register_moves,
StackToRegisterMoves& stack_to_register_moves) {
RegisterReloads& stack_to_register_moves) {
if (target.IsRegister()) {
RecordGapMove(source, ToRegister(target), register_moves,
RecordGapMove(node, source, ToRegister(target), register_moves,
stack_to_register_moves);
return;
}
// stack->stack and reg->stack moves should be executed before registers are
// clobbered by reg->reg or stack->reg, so emit them immediately.
// memory->stack and reg->stack moves should be executed before registers
// are clobbered by reg->reg or memory->reg, so emit them immediately.
if (source.IsRegister()) {
Register source_reg = ToRegister(source);
__ movq(code_gen_state_->GetStackSlot(target), source_reg);
} else {
__ movq(kScratchRegister, code_gen_state_->GetStackSlot(source));
EmitRegisterReload(node, kScratchRegister);
__ movq(code_gen_state_->GetStackSlot(target), kScratchRegister);
}
}
@ -253,34 +251,38 @@ class MaglevCodeGeneratingNodeProcessor {
RegisterMoves register_moves =
repeat<Register::kNumRegisters>(Register::no_reg());
// Save stack to register moves in an array, so that we can execute them
// after the parallel moves have read the register values. Note that the
// mapping is:
// Save registers restored from a memory location in an array, so that we
// can execute them after the parallel moves have read the register values.
// Note that the mapping is:
//
// stack_to_register_moves[target] = source.
StackToRegisterMoves stack_to_register_moves;
// register_reloads[target] = node.
ValueNode* n = nullptr;
RegisterReloads register_reloads = repeat<Register::kNumRegisters>(n);
__ RecordComment("-- Gap moves:");
target->state()->register_state().ForEachGeneralRegister(
[&](Register reg, RegisterState& state) {
ValueNode* node;
RegisterMerge* merge;
if (LoadMergeState(state, &merge)) {
compiler::AllocatedOperand source = merge->operand(predecessor_id);
if (LoadMergeState(state, &node, &merge)) {
compiler::InstructionOperand source =
merge->operand(predecessor_id);
if (FLAG_code_comments) {
std::stringstream ss;
ss << "-- * " << source << "" << reg;
__ RecordComment(ss.str());
}
RecordGapMove(source, reg, register_moves, stack_to_register_moves);
RecordGapMove(node, source, reg, register_moves, register_reloads);
}
});
if (target->has_phi()) {
Phi::List* phis = target->phis();
for (Phi* phi : *phis) {
compiler::AllocatedOperand source = compiler::AllocatedOperand::cast(
phi->input(state.block()->predecessor_id()).operand());
Input& input = phi->input(state.block()->predecessor_id());
ValueNode* node = input.node();
compiler::InstructionOperand source = input.operand();
compiler::AllocatedOperand target =
compiler::AllocatedOperand::cast(phi->result().operand());
if (FLAG_code_comments) {
@ -289,7 +291,7 @@ class MaglevCodeGeneratingNodeProcessor {
<< graph_labeller()->NodeId(phi) << ")";
__ RecordComment(ss.str());
}
RecordGapMove(source, target, register_moves, stack_to_register_moves);
RecordGapMove(node, source, target, register_moves, register_reloads);
}
}
@ -298,7 +300,7 @@ class MaglevCodeGeneratingNodeProcessor {
#undef EMIT_MOVE_FOR_REG
#define EMIT_MOVE_FOR_REG(Name) \
EmitStackToRegisterGapMove(stack_to_register_moves[Name.code()], Name);
EmitRegisterReload(register_reloads[Name.code()], Name);
ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
#undef EMIT_MOVE_FOR_REG
}
@ -520,13 +522,18 @@ class MaglevCodeGeneratorImpl final {
void EmitDeoptFrameSingleValue(ValueNode* value,
const InputLocation& input_location) {
const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input_location.operand());
ValueRepresentation repr = value->properties().value_representation();
if (operand.IsRegister()) {
EmitDeoptStoreRegister(operand, repr);
if (input_location.operand().IsConstant()) {
translation_array_builder_.StoreLiteral(
GetDeoptLiteral(*value->Reify(isolate())));
} else {
EmitDeoptStoreStackSlot(operand, repr);
const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input_location.operand());
ValueRepresentation repr = value->properties().value_representation();
if (operand.IsRegister()) {
EmitDeoptStoreRegister(operand, repr);
} else {
EmitDeoptStoreStackSlot(operand, repr);
}
}
}

View File

@ -61,6 +61,10 @@ void DefineAsRegister(MaglevVregAllocationState* vreg_state, Node* node) {
compiler::UnallocatedOperand::MUST_HAVE_REGISTER,
vreg_state->AllocateVirtualRegister());
}
void DefineAsConstant(MaglevVregAllocationState* vreg_state, Node* node) {
node->result().SetUnallocated(compiler::UnallocatedOperand::NONE,
vreg_state->AllocateVirtualRegister());
}
void DefineAsFixed(MaglevVregAllocationState* vreg_state, Node* node,
Register reg) {
@ -98,16 +102,21 @@ void UseFixed(Input& input, DoubleRegister reg) {
// ---
void PushInput(MaglevCodeGenState* code_gen_state, const Input& input) {
// TODO(leszeks): Consider special casing the value. (Toon: could possibly
// be done through Input directly?)
const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input.operand());
if (operand.IsRegister()) {
__ Push(operand.GetRegister());
if (input.operand().IsConstant()) {
input.node()->LoadToRegister(code_gen_state, kScratchRegister);
__ Push(kScratchRegister);
} else {
DCHECK(operand.IsStackSlot());
__ Push(code_gen_state->GetStackSlot(operand));
// TODO(leszeks): Consider special casing the value. (Toon: could possibly
// be done through Input directly?)
const compiler::AllocatedOperand& operand =
compiler::AllocatedOperand::cast(input.operand());
if (operand.IsRegister()) {
__ Push(operand.GetRegister());
} else {
DCHECK(operand.IsStackSlot());
__ Push(code_gen_state->GetStackSlot(operand));
}
}
}
@ -390,13 +399,79 @@ DeoptInfo::DeoptInfo(Zone* zone, const MaglevCompilationUnit& compilation_unit,
// ---
// Nodes
// ---
void ValueNode::LoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoLoadToRegister(code_gen_state, op);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::LoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoLoadToRegister(code_gen_state, reg);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
DCHECK(is_spilled());
__ movq(reg, code_gen_state->GetStackSlot(
compiler::AllocatedOperand::cast(spill_slot())));
}
Handle<Object> ValueNode::Reify(Isolate* isolate) {
switch (opcode()) {
#define V(Name) \
case Opcode::k##Name: \
return this->Cast<Name>()->DoReify(isolate);
VALUE_NODE_LIST(V)
#undef V
default:
UNREACHABLE();
}
}
void ValueNode::SetNoSpillOrHint() {
DCHECK_EQ(state_, kLastUse);
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
if (Is<Constant>() || Is<SmiConstant>() || Is<RootConstant>() ||
Is<Int32Constant>() || Is<Float64Constant>()) {
spill_or_hint_ = compiler::ConstantOperand(
compiler::UnallocatedOperand::cast(result().operand())
.virtual_register());
} else {
spill_or_hint_ = compiler::InstructionOperand();
}
}
void SmiConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
DefineAsConstant(vreg_state, this);
}
void SmiConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
__ Move(ToRegister(result()), Immediate(value()));
const ProcessingState& state) {}
void SmiConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
}
Handle<Object> SmiConstant::DoReify(Isolate* isolate) {
return handle(value_, isolate);
}
void SmiConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ Move(reg, Immediate(value()));
}
void SmiConstant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
@ -405,11 +480,20 @@ void SmiConstant::PrintParams(std::ostream& os,
void Float64Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
DefineAsConstant(vreg_state, this);
}
void Float64Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
__ Move(ToDoubleRegister(result()), value());
const ProcessingState& state) {}
void Float64Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetDoubleRegister());
}
Handle<Object> Float64Constant::DoReify(Isolate* isolate) {
return isolate->factory()->NewNumber(value_);
}
void Float64Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
DoubleRegister reg) {
__ Move(reg, value());
}
void Float64Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
@ -418,12 +502,19 @@ void Float64Constant::PrintParams(std::ostream& os,
void Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
DefineAsConstant(vreg_state, this);
}
void Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
__ Move(ToRegister(result()), object_.object());
const ProcessingState& state) {}
void Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
}
void Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ Move(reg, object_.object());
}
Handle<Object> Constant::DoReify(Isolate* isolate) { return object_.object(); }
void Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << object_ << ")";
@ -493,15 +584,21 @@ void RegisterInput::PrintParams(std::ostream& os,
void RootConstant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
DefineAsConstant(vreg_state, this);
}
void RootConstant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
if (!has_valid_live_range()) return;
Register reg = ToRegister(result());
const ProcessingState& state) {}
void RootConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
}
void RootConstant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ LoadRoot(reg, index());
}
Handle<Object> RootConstant::DoReify(Isolate* isolate) {
return isolate->root_handle(index());
}
void RootConstant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {
os << "(" << RootsTable::name(index()) << ")";
@ -742,7 +839,9 @@ void GapMove::AllocateVreg(MaglevVregAllocationState* vreg_state,
}
void GapMove::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
if (source().IsRegister()) {
if (source().IsConstant()) {
node_->LoadToRegister(code_gen_state, target());
} else if (source().IsRegister()) {
Register source_reg = ToRegister(source());
if (target().IsAnyRegister()) {
DCHECK(target().IsRegister());
@ -875,11 +974,20 @@ void CheckedSmiTag::GenerateCode(MaglevCodeGenState* code_gen_state,
void Int32Constant::AllocateVreg(MaglevVregAllocationState* vreg_state,
const ProcessingState& state) {
DefineAsRegister(vreg_state, this);
DefineAsConstant(vreg_state, this);
}
void Int32Constant::GenerateCode(MaglevCodeGenState* code_gen_state,
const ProcessingState& state) {
__ Move(ToRegister(result()), Immediate(value()));
const ProcessingState& state) {}
void Int32Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
compiler::AllocatedOperand op) {
DoLoadToRegister(code_gen_state, op.GetRegister());
}
void Int32Constant::DoLoadToRegister(MaglevCodeGenState* code_gen_state,
Register reg) {
__ Move(reg, Immediate(value()));
}
Handle<Object> Int32Constant::DoReify(Isolate* isolate) {
return isolate->factory()->NewNumber(value());
}
void Int32Constant::PrintParams(std::ostream& os,
MaglevGraphLabeller* graph_labeller) const {

View File

@ -295,9 +295,8 @@ class ValueLocation {
}
// Only to be used on inputs that inherit allocation.
template <typename... Args>
void InjectAllocated(Args&&... args) {
operand_ = compiler::AllocatedOperand(args...);
void InjectLocation(compiler::InstructionOperand location) {
operand_ = location;
}
template <typename... Args>
@ -703,27 +702,33 @@ class ValueNode : public Node {
return spill_or_hint_;
}
void SetNoSpillOrHint() {
DCHECK_EQ(state_, kLastUse);
#ifdef DEBUG
state_ = kSpillOrHint;
#endif // DEBUG
spill_or_hint_ = compiler::InstructionOperand();
bool is_loadable() const {
DCHECK_EQ(state_, kSpillOrHint);
return spill_or_hint_.IsConstant() || spill_or_hint_.IsAnyStackSlot();
}
bool is_spilled() const {
DCHECK_EQ(state_, kSpillOrHint);
return spill_or_hint_.IsAnyStackSlot();
}
bool is_spilled() const { return spill_or_hint_.IsAnyStackSlot(); }
void SetNoSpillOrHint();
/* For constants only. */
void LoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void LoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> Reify(Isolate* isolate);
void Spill(compiler::AllocatedOperand operand) {
#ifdef DEBUG
if (state_ == kLastUse) {
state_ = kSpillOrHint;
} else {
DCHECK(!is_spilled());
DCHECK(!is_loadable());
}
#endif // DEBUG
DCHECK(!Is<Constant>());
DCHECK(!Is<SmiConstant>());
DCHECK(!Is<RootConstant>());
DCHECK(!Is<Int32Constant>());
DCHECK(!Is<Float64Constant>());
DCHECK(operand.IsAnyStackSlot());
spill_or_hint_ = operand;
DCHECK(spill_or_hint_.IsAnyStackSlot());
@ -811,14 +816,14 @@ class ValueNode : public Node {
return registers_with_result_ != kEmptyRegList;
}
compiler::AllocatedOperand allocation() const {
compiler::InstructionOperand allocation() const {
if (has_register()) {
return compiler::AllocatedOperand(compiler::LocationOperand::REGISTER,
GetMachineRepresentation(),
FirstRegisterCode());
}
DCHECK(is_spilled());
return compiler::AllocatedOperand::cast(spill_or_hint_);
DCHECK(is_loadable());
return spill_or_hint_;
}
protected:
@ -844,6 +849,12 @@ class ValueNode : public Node {
return registers_with_result_.first().code();
}
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand) {
UNREACHABLE();
}
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate) { UNREACHABLE(); }
// Rename for better pairing with `end_id`.
NodeIdT start_id() const { return id(); }
@ -1056,6 +1067,8 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
using Base = FixedInputValueNodeT<0, Int32Constant>;
public:
using OutputRegister = Register;
explicit Int32Constant(uint32_t bitfield, int32_t value)
: Base(bitfield), value_(value) {}
@ -1067,6 +1080,10 @@ class Int32Constant : public FixedInputValueNodeT<0, Int32Constant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private:
const int32_t value_;
};
@ -1075,6 +1092,8 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
using Base = FixedInputValueNodeT<0, Float64Constant>;
public:
using OutputRegister = DoubleRegister;
explicit Float64Constant(uint32_t bitfield, double value)
: Base(bitfield), value_(value) {}
@ -1086,6 +1105,11 @@ class Float64Constant : public FixedInputValueNodeT<0, Float64Constant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register) { UNREACHABLE(); }
void DoLoadToRegister(MaglevCodeGenState*, DoubleRegister);
Handle<Object> DoReify(Isolate* isolate);
private:
const double value_;
};
@ -1223,6 +1247,10 @@ class SmiConstant : public FixedInputValueNodeT<0, SmiConstant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private:
const Smi value_;
};
@ -1238,6 +1266,10 @@ class Constant : public FixedInputValueNodeT<0, Constant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private:
const compiler::HeapObjectRef object_;
};
@ -1255,6 +1287,10 @@ class RootConstant : public FixedInputValueNodeT<0, RootConstant> {
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
void DoLoadToRegister(MaglevCodeGenState*, compiler::AllocatedOperand);
void DoLoadToRegister(MaglevCodeGenState*, Register);
Handle<Object> DoReify(Isolate* isolate);
private:
const RootIndex index_;
};
@ -1519,19 +1555,22 @@ class GapMove : public FixedInputNodeT<0, GapMove> {
using Base = FixedInputNodeT<0, GapMove>;
public:
GapMove(uint32_t bitfield, compiler::AllocatedOperand source,
GapMove(uint32_t bitfield, ValueNode* node,
compiler::InstructionOperand source,
compiler::AllocatedOperand target)
: Base(bitfield), source_(source), target_(target) {}
: Base(bitfield), node_(node), source_(source), target_(target) {}
compiler::AllocatedOperand source() const { return source_; }
compiler::InstructionOperand source() const { return source_; }
compiler::AllocatedOperand target() const { return target_; }
ValueNode* node() const { return node_; }
void AllocateVreg(MaglevVregAllocationState*, const ProcessingState&);
void GenerateCode(MaglevCodeGenState*, const ProcessingState&);
void PrintParams(std::ostream&, MaglevGraphLabeller*) const;
private:
compiler::AllocatedOperand source_;
ValueNode* node_;
compiler::InstructionOperand source_;
compiler::AllocatedOperand target_;
};

View File

@ -67,10 +67,10 @@ constexpr bool operator==(const RegisterStateFlags& left,
typedef base::PointerWithPayload<void, RegisterStateFlags, 2> RegisterState;
struct RegisterMerge {
compiler::AllocatedOperand* operands() {
return reinterpret_cast<compiler::AllocatedOperand*>(this + 1);
compiler::InstructionOperand* operands() {
return reinterpret_cast<compiler::InstructionOperand*>(this + 1);
}
compiler::AllocatedOperand& operand(size_t i) { return operands()[i]; }
compiler::InstructionOperand& operand(size_t i) { return operands()[i]; }
ValueNode* node;
};

View File

@ -386,7 +386,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
// Skip over the result location.
if (reg == deopt_info.result_location) return;
InputLocation* input = &deopt_info.input_locations[index++];
input->InjectAllocated(node->allocation());
input->InjectLocation(node->allocation());
UpdateUse(node, input);
});
}
@ -402,7 +402,7 @@ void StraightForwardRegisterAllocator::UpdateUse(
checkpoint_state->ForEachValue(
unit, [&](ValueNode* node, interpreter::Register reg) {
InputLocation* input = &input_locations[index++];
input->InjectAllocated(node->allocation());
input->InjectLocation(node->allocation());
UpdateUse(node, input);
});
}
@ -495,10 +495,15 @@ void StraightForwardRegisterAllocator::AllocateNodeResult(ValueNode* node) {
break;
}
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
case compiler::UnallocatedOperand::NONE:
DCHECK(node->Is<Constant>() || node->Is<RootConstant>() ||
node->Is<SmiConstant>() || node->Is<Int32Constant>() ||
node->Is<Float64Constant>());
break;
case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
UNREACHABLE();
}
@ -527,8 +532,9 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
// Remove the register from the node's list.
node->RemoveRegister(reg);
// Return if the removed value already has another register or is spilled.
if (node->has_register() || node->is_spilled()) return;
// Return if the removed value already has another register or is loadable
// from memory.
if (node->has_register() || node->is_loadable()) return;
// If we are at the end of the current node, and the last use of the given
// node is the current node, allow it to be dropped.
@ -551,7 +557,7 @@ void StraightForwardRegisterAllocator::DropRegisterValue(
<< "gap move: " << PrintNodeLabel(graph_labeller(), node) << ": "
<< target << "" << source << std::endl;
}
AddMoveBeforeCurrentNode(source, target);
AddMoveBeforeCurrentNode(node, source, target);
return;
}
@ -607,7 +613,7 @@ void StraightForwardRegisterAllocator::AllocateControlNode(ControlNode* node,
Phi::List* phis = target->phis();
for (Phi* phi : *phis) {
Input& input = phi->input(block->predecessor_id());
input.InjectAllocated(input.node()->allocation());
input.InjectLocation(input.node()->allocation());
}
for (Phi* phi : *phis) UpdateUse(&phi->input(block->predecessor_id()));
}
@ -661,9 +667,10 @@ void StraightForwardRegisterAllocator::TryAllocateToInput(Phi* phi) {
}
void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
compiler::AllocatedOperand source, compiler::AllocatedOperand target) {
ValueNode* node, compiler::InstructionOperand source,
compiler::AllocatedOperand target) {
GapMove* gap_move =
Node::New<GapMove>(compilation_info_->zone(), {}, source, target);
Node::New<GapMove>(compilation_info_->zone(), {}, node, source, target);
if (compilation_info_->has_graph_labeller()) {
graph_labeller()->RegisterNode(gap_move);
}
@ -678,7 +685,7 @@ void StraightForwardRegisterAllocator::AddMoveBeforeCurrentNode(
}
void StraightForwardRegisterAllocator::Spill(ValueNode* node) {
if (node->is_spilled()) return;
if (node->is_loadable()) return;
AllocateSpillSlot(node);
if (FLAG_trace_maglev_regalloc) {
printing_visitor_->os()
@ -691,13 +698,12 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
compiler::UnallocatedOperand operand =
compiler::UnallocatedOperand::cast(input.operand());
ValueNode* node = input.node();
compiler::AllocatedOperand location = node->allocation();
compiler::InstructionOperand location = node->allocation();
switch (operand.extended_policy()) {
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::REGISTER_OR_SLOT_OR_CONSTANT:
input.SetAllocated(location);
break;
input.InjectLocation(location);
return;
case compiler::UnallocatedOperand::FIXED_REGISTER: {
Register reg = Register::from_code(operand.fixed_register_index());
@ -707,7 +713,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
case compiler::UnallocatedOperand::MUST_HAVE_REGISTER:
if (location.IsAnyRegister()) {
input.SetAllocated(location);
input.SetAllocated(compiler::AllocatedOperand::cast(location));
} else {
input.SetAllocated(AllocateRegister(node, AllocationStage::kAtStart));
}
@ -720,6 +726,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
break;
}
case compiler::UnallocatedOperand::REGISTER_OR_SLOT:
case compiler::UnallocatedOperand::SAME_AS_INPUT:
case compiler::UnallocatedOperand::NONE:
case compiler::UnallocatedOperand::MUST_HAVE_SLOT:
@ -733,7 +740,7 @@ void StraightForwardRegisterAllocator::AssignInput(Input& input) {
printing_visitor_->os()
<< "gap move: " << allocated << "" << location << std::endl;
}
AddMoveBeforeCurrentNode(location, allocated);
AddMoveBeforeCurrentNode(node, location, allocated);
}
}
@ -761,7 +768,7 @@ void StraightForwardRegisterAllocator::SpillAndClearRegisters() {
}
void StraightForwardRegisterAllocator::AllocateSpillSlot(ValueNode* node) {
DCHECK(!node->is_spilled());
DCHECK(!node->is_loadable());
uint32_t free_slot;
bool is_tagged = (node->properties().value_representation() ==
ValueRepresentation::kTagged);
@ -1062,14 +1069,14 @@ void StraightForwardRegisterAllocator::MergeRegisterValues(ControlNode* control,
// If there's a value in the incoming state, that value is either
// already spilled or in another place in the merge state.
if (incoming != nullptr && incoming->is_spilled()) {
if (incoming != nullptr && incoming->is_loadable()) {
EnsureInRegister(target_state, incoming);
}
return;
}
DCHECK_IMPLIES(node == nullptr, incoming != nullptr);
if (node == nullptr && !incoming->is_spilled()) {
if (node == nullptr && !incoming->is_loadable()) {
// If the register is unallocated at the merge point, and the incoming
// value isn't spilled, that means we must have seen it already in a
// different register.

View File

@ -133,7 +133,8 @@ class StraightForwardRegisterAllocator {
void AssignTemporaries(NodeBase* node);
void TryAllocateToInput(Phi* phi);
void AddMoveBeforeCurrentNode(compiler::AllocatedOperand source,
void AddMoveBeforeCurrentNode(ValueNode* node,
compiler::InstructionOperand source,
compiler::AllocatedOperand target);
void AllocateSpillSlot(ValueNode* node);