[TurboProp] Add support for spill slot allocation to fast reg alloc

Adds support for tracking the instruction range of spilled operands,
and then allocating spill slots to these ranges. It also adds some
unittests covering spill slot allocation.

Spill slots are allocated in a linear fashion, running through the
instruction stream in a linear order, ensuring that no spill operand
is allocated to a same spill slot that is already assigned to during
this whole start / end range. This isn’t optimal, since it doesn’t
take into account holes in these ranges (e.g, blocks between start
and end that aren’t dominated by the start), but in practice rarely
leads to more than one extra spill slot being allocated compared to
the current allocator.

BUG=v8:9684

Change-Id: Iedee7bcf552080e5b4b6a2f4e96b78b6c1396cab
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2297470
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69107}
This commit is contained in:
Ross McIlroy 2020-07-27 18:18:17 +01:00 committed by Commit Bot
parent 6ef0ec94a2
commit 5b0c6cde37
5 changed files with 521 additions and 11 deletions

View File

@ -36,6 +36,8 @@ MidTierRegisterAllocationData::MidTierRegisterAllocationData(
config_(config),
virtual_register_data_(code->VirtualRegisterCount(), allocation_zone()),
reference_map_instructions_(allocation_zone()),
spilled_virtual_registers_(code->VirtualRegisterCount(),
allocation_zone()),
tick_counter_(tick_counter) {}
MoveOperands* MidTierRegisterAllocationData::AddGapMove(
@ -46,6 +48,11 @@ MoveOperands* MidTierRegisterAllocationData::AddGapMove(
return moves->AddMove(from, to);
}
MoveOperands* MidTierRegisterAllocationData::AddPendingOperandGapMove(
int instr_index, Instruction::GapPosition position) {
return AddGapMove(instr_index, position, PendingOperand(), PendingOperand());
}
MachineRepresentation MidTierRegisterAllocationData::RepresentationFor(
int virtual_register) {
if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
@ -106,6 +113,33 @@ class RegisterIndex final {
int8_t index_;
};
// A Range from [start, end] of instructions, inclusive of start and end.
class Range {
public:
Range() : start_(kMaxInt), end_(0) {}
Range(int start, int end) : start_(start), end_(end) {}
void AddInstr(int index) {
start_ = std::min(start_, index);
end_ = std::max(end_, index);
}
void AddRange(const Range& other) {
start_ = std::min(start_, other.start_);
end_ = std::max(end_, other.end_);
}
// Returns true if index is greater than start and less than or equal to end.
bool Contains(int index) { return index >= start_ && index <= end_; }
int start() const { return start_; }
int end() const { return end_; }
private:
int start_;
int end_;
};
// VirtualRegisterData stores data specific to a particular virtual register,
// and tracks spilled operands for that virtual register.
class VirtualRegisterData final {
@ -154,6 +188,9 @@ class VirtualRegisterData final {
}
bool NeedsSpillAtOutput() const;
// Allocates pending spill operands to the |allocated| spill slot.
void AllocatePendingSpillOperand(const AllocatedOperand& allocated);
int vreg() const { return vreg_; }
int output_instr_index() const { return output_instr_index_; }
bool is_constant() const { return is_constant_; }
@ -161,11 +198,58 @@ class VirtualRegisterData final {
bool is_phi() const { return is_phi_; }
void set_is_phi(bool value) { is_phi_ = value; }
// Represents the range of instructions for which this virtual register needs
// to be spilled on the stack.
class SpillRange : public ZoneObject {
public:
// Defines a spill range for an output operand.
SpillRange(int definition_instr_index, MidTierRegisterAllocationData* data)
: live_range_(definition_instr_index, definition_instr_index) {}
// Defines a spill range for a Phi variable.
SpillRange(const InstructionBlock* phi_block,
MidTierRegisterAllocationData* data)
: live_range_(phi_block->first_instruction_index(),
phi_block->first_instruction_index()) {
// For phis, add the gap move instructions in the predecssor blocks to
// the live range.
for (RpoNumber pred_rpo : phi_block->predecessors()) {
const InstructionBlock* block = data->GetBlock(pred_rpo);
live_range_.AddInstr(block->last_instruction_index());
}
}
bool IsLiveAt(int instr_index, InstructionBlock* block) {
// TODO(rmcilroy): Only include basic blocks dominated by the variable.
return live_range_.Contains(instr_index);
}
void ExtendRangeTo(int instr_index) { live_range_.AddInstr(instr_index); }
Range& live_range() { return live_range_; }
private:
Range live_range_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
bool HasSpillRange() const { return spill_range_ != nullptr; }
SpillRange* spill_range() const {
DCHECK(HasSpillRange());
return spill_range_;
}
private:
void Initialize(int virtual_register, InstructionOperand* spill_operand,
int instr_index, bool is_phi, bool is_constant);
void AddPendingSpillOperand(PendingOperand* pending_operand);
void AddSpillUse(int instr_index, MidTierRegisterAllocationData* data);
void EnsureSpillRange(MidTierRegisterAllocationData* data);
InstructionOperand* spill_operand_;
SpillRange* spill_range_;
int output_instr_index_;
int vreg_;
@ -186,6 +270,7 @@ void VirtualRegisterData::Initialize(int virtual_register,
bool is_constant) {
vreg_ = virtual_register;
spill_operand_ = spill_operand;
spill_range_ = nullptr;
output_instr_index_ = instr_index;
is_phi_ = is_phi;
is_constant_ = is_constant;
@ -211,14 +296,42 @@ void VirtualRegisterData::DefineAsPhi(int virtual_register, int instr_index) {
Initialize(virtual_register, nullptr, instr_index, true, false);
}
void VirtualRegisterData::EnsureSpillRange(
MidTierRegisterAllocationData* data) {
DCHECK(!is_constant());
if (HasSpillRange()) return;
if (is_phi()) {
// Define a spill slot that is defined for the phi's range.
const InstructionBlock* definition_block =
data->code()->InstructionAt(output_instr_index_)->block();
spill_range_ =
data->allocation_zone()->New<SpillRange>(definition_block, data);
} else {
// The spill slot will be defined after the instruction that outputs it.
spill_range_ =
data->allocation_zone()->New<SpillRange>(output_instr_index_ + 1, data);
}
data->spilled_virtual_registers().Add(vreg());
}
void VirtualRegisterData::AddSpillUse(int instr_index,
MidTierRegisterAllocationData* data) {
if (is_constant()) return;
EnsureSpillRange(data);
spill_range_->ExtendRangeTo(instr_index);
}
void VirtualRegisterData::SpillOperand(InstructionOperand* operand,
int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
InstructionOperand::ReplaceWith(operand, spill_operand());
} else {
// TODO(rmcilroy): Implement.
UNREACHABLE();
PendingOperand pending_op;
InstructionOperand::ReplaceWith(operand, &pending_op);
AddPendingSpillOperand(PendingOperand::cast(operand));
}
}
@ -229,25 +342,31 @@ bool VirtualRegisterData::NeedsSpillAtOutput() const {
void VirtualRegisterData::EmitGapMoveToInputFromSpillSlot(
AllocatedOperand to_operand, int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
DCHECK(!to_operand.IsPending());
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
data->AddGapMove(instr_index, Instruction::END, *spill_operand(),
to_operand);
} else {
// TODO(rmcilroy): Implement.
UNREACHABLE();
MoveOperands* move_ops =
data->AddPendingOperandGapMove(instr_index, Instruction::END);
AddPendingSpillOperand(PendingOperand::cast(&move_ops->source()));
InstructionOperand::ReplaceWith(&move_ops->destination(), &to_operand);
}
}
void VirtualRegisterData::EmitGapMoveToSpillSlot(
AllocatedOperand from_operand, int instr_index,
MidTierRegisterAllocationData* data) {
AddSpillUse(instr_index, data);
if (HasAllocatedSpillOperand() || HasConstantSpillOperand()) {
data->AddGapMove(instr_index, Instruction::START, from_operand,
*spill_operand());
} else {
// TODO(rmcilroy): Implement.
UNREACHABLE();
MoveOperands* move_ops =
data->AddPendingOperandGapMove(instr_index, Instruction::START);
InstructionOperand::ReplaceWith(&move_ops->source(), &from_operand);
AddPendingSpillOperand(PendingOperand::cast(&move_ops->destination()));
}
}
@ -269,6 +388,26 @@ void VirtualRegisterData::EmitGapMoveFromOutputToSpillSlot(
}
}
void VirtualRegisterData::AddPendingSpillOperand(PendingOperand* pending_op) {
DCHECK(HasSpillRange());
DCHECK_NULL(pending_op->next());
if (HasSpillOperand()) {
pending_op->set_next(PendingOperand::cast(spill_operand()));
}
spill_operand_ = pending_op;
}
void VirtualRegisterData::AllocatePendingSpillOperand(
const AllocatedOperand& allocated) {
DCHECK(!HasAllocatedSpillOperand() && !HasConstantSpillOperand());
PendingOperand* current = PendingOperand::cast(spill_operand_);
while (current) {
PendingOperand* next = current->next();
InstructionOperand::ReplaceWith(current, &allocated);
current = next;
}
}
// RegisterState represents the state of the |kind| registers at a particular
// point in program execution. The RegisterState can be cloned or merged with
// other RegisterStates to model branches and merges in program control flow.
@ -1381,6 +1520,8 @@ void MidTierRegisterAllocator::AllocateRegisters() {
AllocateRegisters(block);
}
UpdateSpillRangesForLoops();
data()->frame()->SetAllocatedRegisters(
general_reg_allocator().assigned_registers());
data()->frame()->SetAllocatedDoubleRegisters(
@ -1541,6 +1682,140 @@ void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
}
}
void MidTierRegisterAllocator::UpdateSpillRangesForLoops() {
// Extend the spill range of any spill that crosses a loop header to
// the full loop.
for (InstructionBlock* block : code()->instruction_blocks()) {
if (block->IsLoopHeader()) {
RpoNumber last_loop_block =
RpoNumber::FromInt(block->loop_end().ToInt() - 1);
int last_loop_instr =
data()->GetBlock(last_loop_block)->last_instruction_index();
// Extend spill range for all spilled values that are live on entry to the
// loop header.
BitVector::Iterator iterator(&data()->spilled_virtual_registers());
for (; !iterator.Done(); iterator.Advance()) {
const VirtualRegisterData& vreg_data =
VirtualRegisterDataFor(iterator.Current());
if (vreg_data.HasSpillRange() &&
vreg_data.spill_range()->IsLiveAt(block->first_instruction_index(),
block)) {
vreg_data.spill_range()->ExtendRangeTo(last_loop_instr);
}
}
}
}
}
class MidTierSpillSlotAllocator::SpillSlot : public ZoneObject {
public:
SpillSlot(int stack_slot, int byte_width)
: stack_slot_(stack_slot), byte_width_(byte_width), range_() {}
void AddRange(const Range& range) { range_.AddRange(range); }
AllocatedOperand ToOperand(MachineRepresentation rep) const {
return AllocatedOperand(AllocatedOperand::STACK_SLOT, rep, stack_slot_);
}
int byte_width() const { return byte_width_; }
int last_use() const { return range_.end(); }
private:
int stack_slot_;
int byte_width_;
Range range_;
DISALLOW_COPY_AND_ASSIGN(SpillSlot);
};
bool MidTierSpillSlotAllocator::OrderByLastUse::operator()(
const SpillSlot* a, const SpillSlot* b) const {
return a->last_use() > b->last_use();
}
MidTierSpillSlotAllocator::MidTierSpillSlotAllocator(
MidTierRegisterAllocationData* data)
: data_(data),
allocated_slots_(data->allocation_zone()),
free_slots_(data->allocation_zone()),
position_(0) {}
void MidTierSpillSlotAllocator::AdvanceTo(int instr_index) {
// Move any slots that are no longer in use to the free slots list.
DCHECK_LE(position_, instr_index);
while (!allocated_slots_.empty() &&
instr_index > allocated_slots_.top()->last_use()) {
free_slots_.push_front(allocated_slots_.top());
allocated_slots_.pop();
}
position_ = instr_index;
}
MidTierSpillSlotAllocator::SpillSlot*
MidTierSpillSlotAllocator::GetFreeSpillSlot(int byte_width) {
for (auto it = free_slots_.begin(); it != free_slots_.end(); ++it) {
SpillSlot* slot = *it;
if (slot->byte_width() == byte_width) {
free_slots_.erase(it);
return slot;
}
}
return nullptr;
}
void MidTierSpillSlotAllocator::Allocate(
VirtualRegisterData* virtual_register) {
DCHECK(virtual_register->HasPendingSpillOperand());
VirtualRegisterData::SpillRange* spill_range =
virtual_register->spill_range();
MachineRepresentation rep =
data()->RepresentationFor(virtual_register->vreg());
int byte_width = ByteWidthForStackSlot(rep);
Range live_range = spill_range->live_range();
AdvanceTo(live_range.start());
// Try to re-use an existing free spill slot.
SpillSlot* slot = GetFreeSpillSlot(byte_width);
if (slot == nullptr) {
// Otherwise allocate a new slot.
int stack_slot_ = frame()->AllocateSpillSlot(byte_width);
slot = zone()->New<SpillSlot>(stack_slot_, byte_width);
}
// Extend the range of the slot to include this spill range, and allocate the
// pending spill operands with this slot.
slot->AddRange(live_range);
virtual_register->AllocatePendingSpillOperand(slot->ToOperand(rep));
allocated_slots_.push(slot);
}
void MidTierSpillSlotAllocator::AllocateSpillSlots() {
ZoneVector<VirtualRegisterData*> spilled(zone());
BitVector::Iterator iterator(&data()->spilled_virtual_registers());
for (; !iterator.Done(); iterator.Advance()) {
VirtualRegisterData& vreg_data =
data()->VirtualRegisterDataFor(iterator.Current());
if (vreg_data.HasPendingSpillOperand()) {
spilled.push_back(&vreg_data);
}
}
// Sort the spill ranges by order of their first use to enable linear
// allocation of spill slots.
std::sort(spilled.begin(), spilled.end(),
[](const VirtualRegisterData* a, const VirtualRegisterData* b) {
return a->spill_range()->live_range().start() <
b->spill_range()->live_range().start();
});
// Allocate a spill slot for each virtual register with a spill range.
for (VirtualRegisterData* spill : spilled) {
Allocate(spill);
}
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -48,6 +48,10 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
const InstructionOperand& from,
const InstructionOperand& to);
// Adds a gap move where both sides are PendingOperand operands.
MoveOperands* AddPendingOperandGapMove(int instr_index,
Instruction::GapPosition position);
// Helpers to get a block from an |rpo_number| or |instr_index|.
const InstructionBlock* GetBlock(const RpoNumber rpo_number);
const InstructionBlock* GetBlock(int instr_index);
@ -57,6 +61,9 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
return reference_map_instructions_;
}
// Returns a bitvector representing the virtual registers that were spilled.
BitVector& spilled_virtual_registers() { return spilled_virtual_registers_; }
// This zone is for data structures only needed during register allocation
// phases.
Zone* allocation_zone() const { return allocation_zone_; }
@ -80,6 +87,7 @@ class MidTierRegisterAllocationData final : public RegisterAllocationData {
ZoneVector<VirtualRegisterData> virtual_register_data_;
ZoneVector<int> reference_map_instructions_;
BitVector spilled_virtual_registers_;
TickCounter* const tick_counter_;
@ -105,6 +113,7 @@ class MidTierRegisterAllocator final {
// Allocate registers operations.
void AllocateRegisters(const InstructionBlock* block);
void UpdateSpillRangesForLoops();
bool IsFixedRegisterPolicy(const UnallocatedOperand* operand);
void ReserveFixedRegisters(int instr_index);
@ -137,6 +146,39 @@ class MidTierRegisterAllocator final {
DISALLOW_COPY_AND_ASSIGN(MidTierRegisterAllocator);
};
// Spill slot allocator for mid-tier register allocation.
class MidTierSpillSlotAllocator final {
public:
explicit MidTierSpillSlotAllocator(MidTierRegisterAllocationData* data);
// Phase 3: assign spilled operands to specific spill slots.
void AllocateSpillSlots();
private:
class SpillSlot;
void Allocate(VirtualRegisterData* virtual_register);
void AdvanceTo(int instr_index);
SpillSlot* GetFreeSpillSlot(int byte_width);
MidTierRegisterAllocationData* data() const { return data_; }
InstructionSequence* code() const { return data()->code(); }
Frame* frame() const { return data()->frame(); }
Zone* zone() const { return data()->allocation_zone(); }
struct OrderByLastUse {
bool operator()(const SpillSlot* a, const SpillSlot* b) const;
};
MidTierRegisterAllocationData* data_;
ZonePriorityQueue<SpillSlot*, OrderByLastUse> allocated_slots_;
ZoneLinkedList<SpillSlot*> free_slots_;
int position_;
DISALLOW_COPY_AND_ASSIGN(MidTierSpillSlotAllocator);
};
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -2291,6 +2291,16 @@ struct MidTierRegisterAllocatorPhase {
}
};
struct MidTierSpillSlotAllocatorPhase {
DECL_PIPELINE_PHASE_CONSTANTS(MidTierSpillSlotAllocator)
void Run(PipelineData* data, Zone* temp_zone) {
MidTierSpillSlotAllocator spill_allocator(
data->mid_tier_register_allocator_data());
spill_allocator.AllocateSpillSlots();
}
};
struct OptimizeMovesPhase {
DECL_PIPELINE_PHASE_CONSTANTS(OptimizeMoves)
@ -3659,8 +3669,9 @@ void PipelineImpl::AllocateRegistersForMidTier(
Run<MidTierRegisterAllocatorPhase>();
// TODO(rmcilroy): Run spill slot allocation and reference map population
// phases
Run<MidTierSpillSlotAllocatorPhase>();
// TODO(rmcilroy): Run reference map population phase.
TraceSequence(info(), data, "after register allocation");

View File

@ -908,6 +908,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EffectLinearization) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, EscapeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierSpillSlotAllocator) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FinalizeCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \

View File

@ -12,7 +12,7 @@ namespace compiler {
namespace {
class FastRegisterAllocatorTest : public InstructionSequenceTest {
class MidTierRegisterAllocatorTest : public InstructionSequenceTest {
public:
void Allocate() {
WireBlocks();
@ -20,7 +20,7 @@ class FastRegisterAllocatorTest : public InstructionSequenceTest {
}
};
TEST_F(FastRegisterAllocatorTest, CanAllocateThreeRegisters) {
TEST_F(MidTierRegisterAllocatorTest, CanAllocateThreeRegisters) {
// return p0 + p1;
StartBlock();
auto a_reg = Parameter();
@ -32,7 +32,7 @@ TEST_F(FastRegisterAllocatorTest, CanAllocateThreeRegisters) {
Allocate();
}
TEST_F(FastRegisterAllocatorTest, CanAllocateFPRegisters) {
TEST_F(MidTierRegisterAllocatorTest, CanAllocateFPRegisters) {
StartBlock();
TestOperand inputs[] = {
Reg(FPParameter(kFloat64)), Reg(FPParameter(kFloat64)),
@ -45,6 +45,187 @@ TEST_F(FastRegisterAllocatorTest, CanAllocateFPRegisters) {
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, MoveLotsOfConstants) {
FLAG_trace_turbo = true;
StartBlock();
VReg constants[Register::kNumRegisters];
for (size_t i = 0; i < arraysize(constants); ++i) {
constants[i] = DefineConstant();
}
TestOperand call_ops[Register::kNumRegisters * 2];
for (int i = 0; i < Register::kNumRegisters; ++i) {
call_ops[i] = Reg(constants[i], i);
}
for (int i = 0; i < Register::kNumRegisters; ++i) {
call_ops[i + Register::kNumRegisters] = Slot(constants[i], i);
}
EmitCall(Slot(-1), arraysize(call_ops), call_ops);
EndBlock(Last());
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SplitBeforeInstruction) {
const int kNumRegs = 6;
SetNumRegs(kNumRegs, kNumRegs);
StartBlock();
// Stack parameters/spilled values.
auto p_0 = Define(Slot(-1));
auto p_1 = Define(Slot(-2));
// Fill registers.
VReg values[kNumRegs];
for (size_t i = 0; i < arraysize(values); ++i) {
values[i] = Define(Reg(static_cast<int>(i)));
}
// values[0] will be split in the second half of this instruction.
// Models Intel mod instructions.
EmitOI(Reg(0), Reg(p_0, 1), UniqueReg(p_1));
EmitI(Reg(values[0], 0));
EndBlock(Last());
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SplitBeforeInstruction2) {
const int kNumRegs = 6;
SetNumRegs(kNumRegs, kNumRegs);
StartBlock();
// Stack parameters/spilled values.
auto p_0 = Define(Slot(-1));
auto p_1 = Define(Slot(-2));
// Fill registers.
VReg values[kNumRegs];
for (size_t i = 0; i < arraysize(values); ++i) {
values[i] = Define(Reg(static_cast<int>(i)));
}
// values[0] and [1] will be split in the second half of this instruction.
EmitOOI(Reg(0), Reg(1), Reg(p_0, 0), Reg(p_1, 1));
EmitI(Reg(values[0]), Reg(values[1]));
EndBlock(Last());
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SplitBeforeAndMove) {
StartBlock();
// Fill registers.
VReg values[Register::kNumRegisters];
for (size_t i = 0; i < arraysize(values); ++i) {
if (i == 0 || i == 1) continue; // Leave a hole for c_1 to take.
values[i] = Define(Reg(static_cast<int>(i)));
}
auto c_0 = DefineConstant();
auto c_1 = DefineConstant();
EmitOI(Reg(1), Reg(c_0, 0), UniqueReg(c_1));
// Use previous values to force c_1 to split before the previous instruction.
for (size_t i = 0; i < arraysize(values); ++i) {
if (i == 0 || i == 1) continue;
EmitI(Reg(values[i], static_cast<int>(i)));
}
EndBlock(Last());
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SpillTwice) {
StartBlock();
auto p_0 = Parameter(Reg(1));
EmitCall(Slot(-2), Unique(p_0), Reg(p_0, 1));
EndBlock(Last());
Allocate();
}
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };
const ParameterType kParameterTypes[] = {
ParameterType::kFixedSlot, ParameterType::kSlot, ParameterType::kRegister,
ParameterType::kFixedRegister};
class MidTierRegAllocSlotConstraintTest
: public MidTierRegisterAllocatorTest,
public ::testing::WithParamInterface<
::testing::tuple<ParameterType, int>> {
public:
static const int kMaxVariant = 5;
protected:
ParameterType parameter_type() const {
return ::testing::get<0>(B::GetParam());
}
int variant() const { return ::testing::get<1>(B::GetParam()); }
private:
using B = ::testing::WithParamInterface<::testing::tuple<ParameterType, int>>;
};
} // namespace
TEST_P(MidTierRegAllocSlotConstraintTest, SlotConstraint) {
FLAG_trace_turbo = true;
StartBlock();
VReg p_0;
switch (parameter_type()) {
case ParameterType::kFixedSlot:
p_0 = Parameter(Slot(-1));
break;
case ParameterType::kSlot:
p_0 = Parameter(Slot(-1));
break;
case ParameterType::kRegister:
p_0 = Parameter(Reg());
break;
case ParameterType::kFixedRegister:
p_0 = Parameter(Reg(1));
break;
}
switch (variant()) {
case 0:
EmitI(Slot(p_0), Reg(p_0));
break;
case 1:
EmitI(Slot(p_0));
break;
case 2:
EmitI(Reg(p_0));
EmitI(Slot(p_0));
break;
case 3:
EmitI(Slot(p_0));
EmitI(Reg(p_0));
break;
case 4:
EmitI(Slot(p_0, -1), Slot(p_0), Reg(p_0), Reg(p_0, 1));
break;
default:
UNREACHABLE();
}
EndBlock(Last());
Allocate();
}
INSTANTIATE_TEST_SUITE_P(
MidTierRegisterAllocatorTest, MidTierRegAllocSlotConstraintTest,
::testing::Combine(
::testing::ValuesIn(kParameterTypes),
::testing::Range(0, MidTierRegAllocSlotConstraintTest::kMaxVariant)));
} // namespace
} // namespace compiler
} // namespace internal