[TurboProp] Add support for Phis to fast reg alloc.

Adds support for Phis to be allocated to the fast register
allocator. Registers used for Phis are marked specially between
the point where the Phi is defined, and the gap-move's in the
predecessor blocks which populate the Phi value, since if the
Phi is spilled then all predecessor blocks must also spill the
Phi even if they were already allocated.

BUG=v8:9684

Change-Id: Iebe90495b83df655d3335a7d55874123f3b27f8d
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2299366
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69139}
This commit is contained in:
Ross McIlroy 2020-07-30 10:57:52 +01:00 committed by Commit Bot
parent 3c6d9aac45
commit 88f3e03161
3 changed files with 581 additions and 9 deletions

View File

@ -28,14 +28,23 @@ class RegisterState;
class BlockState final {
public:
BlockState(int block_count, Zone* zone)
: dominated_blocks_(block_count, zone) {}
: dominated_blocks_(block_count, zone), successors_phi_index_(-1) {}
// Returns a bitvector representing all the basic blocks that are dominated
// by this basic block.
BitVector* dominated_blocks() { return &dominated_blocks_; }
// Set / get this block's index for successor's phi operations. Will return
// -1 if this block has no successor's with phi operations.
int successors_phi_index() const { return successors_phi_index_; }
void set_successors_phi_index(int index) {
DCHECK_EQ(successors_phi_index_, -1);
successors_phi_index_ = index;
}
private:
BitVector dominated_blocks_;
int successors_phi_index_;
};
MidTierRegisterAllocationData::MidTierRegisterAllocationData(
@ -462,10 +471,10 @@ class RegisterState final : public ZoneObject {
// Commit the |reg| with the |allocated| operand.
void Commit(RegisterIndex reg, AllocatedOperand allocated,
InstructionOperand* operand, MidTierRegisterAllocationData* data);
// Spill the contents of |reg| using the |allocated| operand to commit the
// spill gap move.
// Spill the contents of |reg| for an instruction in |current_block| using
// the |allocated| operand to commit the spill gap move.
void Spill(RegisterIndex reg, AllocatedOperand allocated,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
// Allocate |reg| to |virtual_register| for the instruction at |instr_index|.
@ -508,6 +517,7 @@ class RegisterState final : public ZoneObject {
// Operations for committing, spilling and allocating uses of the register.
void Commit(AllocatedOperand allocated_operand);
void Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data);
void Use(int virtual_register, int instr_index);
void PendingUse(InstructionOperand* operand, int virtual_register,
@ -517,6 +527,10 @@ class RegisterState final : public ZoneObject {
return virtual_register_ != InstructionOperand::kInvalidVirtualRegister;
}
// Mark register as holding a phi.
void MarkAsPhiMove();
bool is_phi_gap_move() const { return is_phi_gap_move_; }
// The current virtual register held by this register.
int virtual_register() const { return virtual_register_; }
@ -537,8 +551,12 @@ class RegisterState final : public ZoneObject {
private:
void SpillPendingUses(MidTierRegisterAllocationData* data);
void SpillPhiGapMove(AllocatedOperand allocated_op,
const InstructionBlock* block,
MidTierRegisterAllocationData* data);
bool needs_gap_move_on_spill_;
bool is_phi_gap_move_;
int last_use_instr_index_;
int virtual_register_;
PendingOperand* pending_uses_;
@ -562,6 +580,7 @@ class RegisterState final : public ZoneObject {
RegisterState::Register::Register() { Reset(); }
void RegisterState::Register::Reset() {
is_phi_gap_move_ = false;
needs_gap_move_on_spill_ = false;
last_use_instr_index_ = -1;
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
@ -593,6 +612,11 @@ void RegisterState::Register::PendingUse(InstructionOperand* operand,
pending_uses_ = PendingOperand::cast(operand);
}
void RegisterState::Register::MarkAsPhiMove() {
DCHECK(is_allocated());
is_phi_gap_move_ = true;
}
void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
DCHECK(is_allocated());
@ -607,7 +631,11 @@ void RegisterState::Register::Commit(AllocatedOperand allocated_op) {
}
void RegisterState::Register::Spill(AllocatedOperand allocated_op,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data) {
if (is_phi_gap_move()) {
SpillPhiGapMove(allocated_op, current_block, data);
}
if (needs_gap_move_on_spill()) {
VirtualRegisterData& vreg_data =
data->VirtualRegisterDataFor(virtual_register());
@ -618,6 +646,28 @@ void RegisterState::Register::Spill(AllocatedOperand allocated_op,
virtual_register_ = InstructionOperand::kInvalidVirtualRegister;
}
void RegisterState::Register::SpillPhiGapMove(
AllocatedOperand allocated_op, const InstructionBlock* current_block,
MidTierRegisterAllocationData* data) {
DCHECK_EQ(current_block->SuccessorCount(), 1);
const InstructionBlock* phi_block =
data->GetBlock(current_block->successors()[0]);
// Add gap moves to the spilled phi for all blocks we previously allocated
// assuming the the phi was in a register.
VirtualRegisterData& vreg_data =
data->VirtualRegisterDataFor(virtual_register());
for (RpoNumber predecessor : phi_block->predecessors()) {
// If the predecessor has a lower rpo number than the current block, then
// we have already processed it, so add the required gap move.
if (predecessor > current_block->rpo_number()) {
const InstructionBlock* predecessor_block = data->GetBlock(predecessor);
vreg_data.EmitGapMoveToSpillSlot(
allocated_op, predecessor_block->last_instruction_index(), data);
}
}
}
void RegisterState::Register::SpillPendingUses(
MidTierRegisterAllocationData* data) {
VirtualRegisterData& vreg_data =
@ -649,6 +699,11 @@ int RegisterState::VirtualRegisterForRegister(RegisterIndex reg) {
}
}
bool RegisterState::IsPhiGapMove(RegisterIndex reg) {
DCHECK(RegisterState::IsAllocated(reg));
return reg_data(reg).is_phi_gap_move();
}
void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
InstructionOperand* operand,
MidTierRegisterAllocationData* data) {
@ -660,9 +715,10 @@ void RegisterState::Commit(RegisterIndex reg, AllocatedOperand allocated,
}
void RegisterState::Spill(RegisterIndex reg, AllocatedOperand allocated,
const InstructionBlock* current_block,
MidTierRegisterAllocationData* data) {
DCHECK(IsAllocated(reg));
reg_data(reg).Spill(allocated, data);
reg_data(reg).Spill(allocated, current_block, data);
ResetDataFor(reg);
}
@ -680,6 +736,11 @@ void RegisterState::AllocatePendingUse(RegisterIndex reg, int virtual_register,
reg_data(reg).PendingUse(operand, virtual_register, instr_index);
}
void RegisterState::UseForPhiGapMove(RegisterIndex reg) {
DCHECK(IsAllocated(reg));
reg_data(reg).MarkAsPhiMove();
}
RegisterState::Register& RegisterState::reg_data(RegisterIndex reg) {
DCHECK(HasRegisterData(reg));
return *register_data_[reg.ToInt()];
@ -733,6 +794,8 @@ class SinglePassRegisterAllocator final {
UnallocatedOperand* input, int instr_index);
void AllocateGapMoveInput(UnallocatedOperand* operand, int instr_index);
void AllocateTemp(UnallocatedOperand* operand, int instr_index);
void AllocatePhi(int virtual_register, const InstructionBlock* block);
void AllocatePhiGapMove(int to_vreg, int from_vreg, int instr_index);
// Reserve any fixed registers for the operands on an instruction before doing
// allocation on the operands.
@ -867,6 +930,7 @@ class SinglePassRegisterAllocator final {
}
int num_allocatable_registers() const { return num_allocatable_registers_; }
const InstructionBlock* current_block() const { return current_block_; }
MidTierRegisterAllocationData* data() const { return data_; }
// Virtual register to register mapping.
@ -875,6 +939,9 @@ class SinglePassRegisterAllocator final {
// Current register state during allocation.
RegisterState* register_state_;
// The current block being processed.
const InstructionBlock* current_block_;
const RegisterKind kind_;
const int num_allocatable_registers_;
ZoneVector<RegisterIndex> reg_code_to_index_;
@ -895,6 +962,7 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
: virtual_register_to_reg_(data->code()->VirtualRegisterCount(),
data->allocation_zone()),
register_state_(nullptr),
current_block_(nullptr),
kind_(kind),
num_allocatable_registers_(
GetAllocatableRegisterCount(data->config(), kind)),
@ -930,14 +998,19 @@ void SinglePassRegisterAllocator::EndInstruction() {
void SinglePassRegisterAllocator::StartBlock(const InstructionBlock* block) {
DCHECK(!HasRegisterState());
DCHECK_NULL(current_block_);
DCHECK_EQ(in_use_at_instr_start_bits_, 0);
DCHECK_EQ(in_use_at_instr_end_bits_, 0);
DCHECK_EQ(allocated_registers_bits_, 0);
// Update the current block we are processing.
current_block_ = block;
}
void SinglePassRegisterAllocator::EndBlock(const InstructionBlock* block) {
DCHECK_EQ(in_use_at_instr_start_bits_, 0);
DCHECK_EQ(in_use_at_instr_end_bits_, 0);
current_block_ = nullptr;
register_state_ = nullptr;
}
@ -993,7 +1066,8 @@ void SinglePassRegisterAllocator::EmitGapMoveFromOutput(InstructionOperand from,
int instr_index) {
DCHECK(from.IsAllocated());
DCHECK(to.IsAllocated());
const InstructionBlock* block = data()->GetBlock(instr_index);
const InstructionBlock* block = current_block();
DCHECK_EQ(data()->GetBlock(instr_index), block);
if (instr_index == block->last_instruction_index()) {
// Add gap move to the first instruction of every successor block.
for (const RpoNumber succ : block->successors()) {
@ -1145,7 +1219,7 @@ void SinglePassRegisterAllocator::SpillRegister(RegisterIndex reg) {
// Spill the register and free register.
int virtual_register = VirtualRegisterForRegister(reg);
AllocatedOperand allocated = AllocatedOperandForReg(reg, virtual_register);
register_state()->Spill(reg, allocated, data());
register_state()->Spill(reg, allocated, current_block(), data());
FreeRegister(reg, virtual_register);
}
@ -1351,8 +1425,8 @@ RegisterIndex SinglePassRegisterAllocator::AllocateOutput(
}
if (vreg_data.NeedsSpillAtOutput()) {
vreg_data.EmitGapMoveFromOutputToSpillSlot(
*AllocatedOperand::cast(operand), data()->GetBlock(instr_index),
instr_index, data());
*AllocatedOperand::cast(operand), current_block(), instr_index,
data());
}
}
@ -1471,6 +1545,61 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
MarkRegisterUse(reg, rep, pos);
}
void SinglePassRegisterAllocator::AllocatePhiGapMove(int to_vreg, int from_vreg,
int instr_index) {
EnsureRegisterState();
RegisterIndex from_register = RegisterForVirtualRegister(from_vreg);
RegisterIndex to_register = RegisterForVirtualRegister(to_vreg);
// If to_register isn't marked as a phi gap move, we can't use it as such.
if (to_register.is_valid() && !register_state()->IsPhiGapMove(to_register)) {
to_register = RegisterIndex::Invalid();
}
if (to_register.is_valid() && !from_register.is_valid()) {
// If |to| virtual register is allocated to a register, and the |from|
// virtual register isn't allocated, then commit this register and
// re-allocate it to the |from| virtual register.
InstructionOperand operand;
CommitRegister(to_register, to_vreg, &operand, UsePosition::kAll);
AllocateUse(to_register, from_vreg, &operand, instr_index,
UsePosition::kAll);
} else {
// Otherwise add a gap move.
MoveOperands* move =
data()->AddPendingOperandGapMove(instr_index, Instruction::END);
PendingOperand* to_operand = PendingOperand::cast(&move->destination());
PendingOperand* from_operand = PendingOperand::cast(&move->source());
// Commit the |to| side to either a register or the pending spills.
if (to_register.is_valid()) {
CommitRegister(to_register, to_vreg, to_operand, UsePosition::kAll);
} else {
VirtualRegisterDataFor(to_vreg).SpillOperand(to_operand, instr_index,
data());
}
// The from side is unconstrained.
UnallocatedOperand unconstrained_input(UnallocatedOperand::REGISTER_OR_SLOT,
from_vreg);
InstructionOperand::ReplaceWith(from_operand, &unconstrained_input);
}
}
void SinglePassRegisterAllocator::AllocatePhi(int virtual_register,
const InstructionBlock* block) {
VirtualRegisterData& vreg_data = VirtualRegisterDataFor(virtual_register);
if (vreg_data.NeedsSpillAtOutput() || block->IsLoopHeader()) {
// If the Phi needs to be spilled, just spill here directly so that all
// gap moves into the Phi move into the spill slot.
SpillRegisterForVirtualRegister(virtual_register);
} else {
RegisterIndex reg = RegisterForVirtualRegister(virtual_register);
DCHECK(reg.is_valid());
register_state()->UseForPhiGapMove(reg);
}
}
void SinglePassRegisterAllocator::EnsureRegisterState() {
if (!HasRegisterState()) {
register_state_ = RegisterState::New(kind(), num_allocatable_registers_,
@ -1500,6 +1629,14 @@ void MidTierRegisterAllocator::DefineOutputs() {
void MidTierRegisterAllocator::InitializeBlockState(
const InstructionBlock* block) {
// Update our predecessor blocks with their successors_phi_index if we have
// phis.
if (block->phis().size()) {
for (int i = 0; i < static_cast<int>(block->PredecessorCount()); ++i) {
data()->block_state(block->predecessors()[i]).set_successors_phi_index(i);
}
}
// Mark this block as dominating itself.
BlockState& block_state = data()->block_state(block->rpo_number());
block_state.dominated_blocks()->Add(block->rpo_number().ToInt());
@ -1652,6 +1789,12 @@ void MidTierRegisterAllocator::AllocateRegisters(
AllocatorFor(input).AllocateInput(input, instr_index);
}
// If we are allocating for the last instruction in the block, allocate any
// phi gap move operations that are needed to resolve phis in our successor.
if (instr_index == block->last_instruction_index()) {
AllocatePhiGapMoves(block);
}
// Allocate any unallocated gap move inputs.
ParallelMove* moves = instr->GetParallelMove(Instruction::END);
if (moves != nullptr) {
@ -1669,6 +1812,8 @@ void MidTierRegisterAllocator::AllocateRegisters(
double_reg_allocator().EndInstruction();
}
AllocatePhis(block);
// TODO(rmcilroy): Add support for cross-block allocations.
general_reg_allocator().SpillAllRegisters();
double_reg_allocator().SpillAllRegisters();
@ -1735,6 +1880,41 @@ void MidTierRegisterAllocator::ReserveFixedRegisters(int instr_index) {
}
}
void MidTierRegisterAllocator::AllocatePhiGapMoves(
const InstructionBlock* block) {
int successors_phi_index =
data()->block_state(block->rpo_number()).successors_phi_index();
// If successors_phi_index is -1 there are no phi's in the successor.
if (successors_phi_index == -1) return;
// The last instruction of a block with phis can't require reference maps
// since we won't record phi gap moves that get spilled when populating the
// reference maps
int instr_index = block->last_instruction_index();
DCHECK(!code()->InstructionAt(instr_index)->HasReferenceMap());
// If there are phis, we only have a single successor due to edge-split form.
DCHECK_EQ(block->SuccessorCount(), 1);
const InstructionBlock* successor = data()->GetBlock(block->successors()[0]);
for (PhiInstruction* phi : successor->phis()) {
int to_vreg = phi->virtual_register();
int from_vreg = phi->operands()[successors_phi_index];
MachineRepresentation rep = RepresentationFor(to_vreg);
AllocatorFor(rep).AllocatePhiGapMove(to_vreg, from_vreg, instr_index);
}
}
void MidTierRegisterAllocator::AllocatePhis(const InstructionBlock* block) {
for (PhiInstruction* phi : block->phis()) {
int virtual_register = phi->virtual_register();
MachineRepresentation rep = RepresentationFor(virtual_register);
AllocatorFor(rep).AllocatePhi(virtual_register, block);
}
}
void MidTierRegisterAllocator::UpdateSpillRangesForLoops() {
// Extend the spill range of any spill that crosses a loop header to
// the full loop.

View File

@ -121,6 +121,8 @@ class MidTierRegisterAllocator final {
// Allocate registers operations.
void AllocateRegisters(const InstructionBlock* block);
void AllocatePhis(const InstructionBlock* block);
void AllocatePhiGapMoves(const InstructionBlock* block);
void UpdateSpillRangesForLoops();
bool IsFixedRegisterPolicy(const UnallocatedOperand* operand);

View File

@ -45,6 +45,230 @@ TEST_F(MidTierRegisterAllocatorTest, CanAllocateFPRegisters) {
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SimpleLoop) {
// i = K;
// while(true) { i++ }
StartBlock();
auto i_reg = DefineConstant();
// Add a branch around the loop to ensure the end-block
// is connected.
EndBlock(Branch(Reg(DefineConstant()), 3, 1));
StartBlock();
EndBlock();
{
StartLoop(1);
StartBlock();
auto phi = Phi(i_reg, 2);
auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
SetInput(phi, 1, ipp);
EndBlock(Jump(0));
EndLoop();
}
StartBlock();
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SimpleBranch) {
// return i ? K1 : K2
StartBlock();
auto i = DefineConstant();
EndBlock(Branch(Reg(i), 1, 2));
StartBlock();
Return(DefineConstant());
EndBlock(Last());
StartBlock();
Return(DefineConstant());
EndBlock(Last());
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SimpleDiamond) {
// return p0 ? p0 : p0
StartBlock();
auto param = Parameter();
EndBlock(Branch(Reg(param), 1, 2));
StartBlock();
EndBlock(Jump(2));
StartBlock();
EndBlock(Jump(1));
StartBlock();
Return(param);
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SimpleDiamondPhi) {
// return i ? K1 : K2
StartBlock();
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
StartBlock();
auto t_val = DefineConstant();
EndBlock(Jump(2));
StartBlock();
auto f_val = DefineConstant();
EndBlock(Jump(1));
StartBlock();
Return(Reg(Phi(t_val, f_val)));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, DiamondManyPhis) {
constexpr int kPhis = Register::kNumRegisters * 2;
StartBlock();
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
StartBlock();
VReg t_vals[kPhis];
for (int i = 0; i < kPhis; ++i) {
t_vals[i] = DefineConstant();
}
EndBlock(Jump(2));
StartBlock();
VReg f_vals[kPhis];
for (int i = 0; i < kPhis; ++i) {
f_vals[i] = DefineConstant();
}
EndBlock(Jump(1));
StartBlock();
TestOperand merged[kPhis];
for (int i = 0; i < kPhis; ++i) {
merged[i] = Use(Phi(t_vals[i], f_vals[i]));
}
Return(EmitCall(Slot(-1), kPhis, merged));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
constexpr int kPhis = Register::kNumRegisters * 2;
// First diamond.
StartBlock();
VReg vals[kPhis];
for (int i = 0; i < kPhis; ++i) {
vals[i] = Parameter(Slot(-1 - i));
}
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
StartBlock();
EndBlock(Jump(2));
StartBlock();
EndBlock(Jump(1));
// Second diamond.
StartBlock();
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
StartBlock();
EndBlock(Jump(2));
StartBlock();
EndBlock(Jump(1));
StartBlock();
TestOperand merged[kPhis];
for (int i = 0; i < kPhis; ++i) {
merged[i] = Use(Phi(vals[i], vals[i]));
}
Return(EmitCall(Reg(0), kPhis, merged));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
const size_t kNumRegs = 3;
const size_t kParams = kNumRegs + 1;
// Override number of registers.
SetNumRegs(kNumRegs, kNumRegs);
StartBlock();
auto constant = DefineConstant();
VReg parameters[kParams];
for (size_t i = 0; i < arraysize(parameters); ++i) {
parameters[i] = DefineConstant();
}
EndBlock();
PhiInstruction* phis[kParams];
{
StartLoop(2);
// Loop header.
StartBlock();
for (size_t i = 0; i < arraysize(parameters); ++i) {
phis[i] = Phi(parameters[i], 2);
}
// Perform some computations.
// something like phi[i] += const
for (size_t i = 0; i < arraysize(parameters); ++i) {
auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
SetInput(phis[i], 1, result);
}
EndBlock(Branch(Reg(DefineConstant()), 1, 2));
// Jump back to loop header.
StartBlock();
EndBlock(Jump(-1));
EndLoop();
}
StartBlock();
Return(DefineConstant());
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SpillPhi) {
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
auto left = Define(Reg(0));
EndBlock(Jump(2));
StartBlock();
auto right = Define(Reg(0));
EndBlock();
StartBlock();
auto phi = Phi(left, right);
EmitCall(Slot(-1));
Return(Reg(phi));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, MoveLotsOfConstants) {
FLAG_trace_turbo = true;
StartBlock();
@ -114,6 +338,98 @@ TEST_F(MidTierRegisterAllocatorTest, SplitBeforeInstruction2) {
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, NestedDiamondPhiMerge) {
// Outer diamond.
StartBlock();
EndBlock(Branch(Imm(), 1, 5));
// Diamond 1
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
auto ll = Define(Reg());
EndBlock(Jump(2));
StartBlock();
auto lr = Define(Reg());
EndBlock();
StartBlock();
auto l_phi = Phi(ll, lr);
EndBlock(Jump(5));
// Diamond 2
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
auto rl = Define(Reg());
EndBlock(Jump(2));
StartBlock();
auto rr = Define(Reg());
EndBlock();
StartBlock();
auto r_phi = Phi(rl, rr);
EndBlock();
// Outer diamond merge.
StartBlock();
auto phi = Phi(l_phi, r_phi);
Return(Reg(phi));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
// Outer diamond.
StartBlock();
EndBlock(Branch(Imm(), 1, 5));
// Diamond 1
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
auto ll = Define(Reg(0));
EndBlock(Jump(2));
StartBlock();
auto lr = Define(Reg(1));
EndBlock();
StartBlock();
auto l_phi = Phi(ll, lr);
EndBlock(Jump(5));
// Diamond 2
StartBlock();
EndBlock(Branch(Imm(), 1, 2));
StartBlock();
auto rl = Define(Reg(2));
EndBlock(Jump(2));
StartBlock();
auto rr = Define(Reg(3));
EndBlock();
StartBlock();
auto r_phi = Phi(rl, rr);
EndBlock();
// Outer diamond merge.
StartBlock();
auto phi = Phi(l_phi, r_phi);
Return(Reg(phi));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, SplitBeforeAndMove) {
StartBlock();
@ -149,6 +465,80 @@ TEST_F(MidTierRegisterAllocatorTest, SpillTwice) {
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, RegressionLoadConstantBeforeSpill) {
StartBlock();
// Fill registers.
VReg values[Register::kNumRegisters];
for (size_t i = arraysize(values); i > 0; --i) {
values[i - 1] = Define(Reg(static_cast<int>(i - 1)));
}
auto c = DefineConstant();
auto to_spill = Define(Reg());
EndBlock(Jump(1));
{
StartLoop(1);
StartBlock();
// Create a use for c in second half of prev block's last gap
Phi(c);
for (size_t i = arraysize(values); i > 0; --i) {
Phi(values[i - 1]);
}
EndBlock(Jump(1));
EndLoop();
}
StartBlock();
// Force c to split within to_spill's definition.
EmitI(Reg(c));
EmitI(Reg(to_spill));
EndBlock(Last());
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, DiamondWithCallFirstBlock) {
StartBlock();
auto x = EmitOI(Reg(0));
EndBlock(Branch(Reg(x), 1, 2));
StartBlock();
EmitCall(Slot(-1));
auto occupy = EmitOI(Reg(0));
EndBlock(Jump(2));
StartBlock();
EndBlock(FallThrough());
StartBlock();
Use(occupy);
Return(Reg(x));
EndBlock();
Allocate();
}
TEST_F(MidTierRegisterAllocatorTest, DiamondWithCallSecondBlock) {
StartBlock();
auto x = EmitOI(Reg(0));
EndBlock(Branch(Reg(x), 1, 2));
StartBlock();
EndBlock(Jump(2));
StartBlock();
EmitCall(Slot(-1));
auto occupy = EmitOI(Reg(0));
EndBlock(FallThrough());
StartBlock();
Use(occupy);
Return(Reg(x));
EndBlock();
Allocate();
}
namespace {
enum class ParameterType { kFixedSlot, kSlot, kRegister, kFixedRegister };