[turbofan] smash GapInstruction into Instruction
R=titzer@chromium.org BUG= Review URL: https://codereview.chromium.org/1041163002 Cr-Commit-Position: refs/heads/master@{#27538}
This commit is contained in:
parent
e9e8ac7afc
commit
e39750a26e
@ -186,10 +186,8 @@ void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
|
||||
|
||||
|
||||
void CodeGenerator::AssembleInstruction(Instruction* instr) {
|
||||
if (instr->IsGapMoves()) {
|
||||
// Handle parallel moves associated with the gap instruction.
|
||||
AssembleGap(GapInstruction::cast(instr));
|
||||
} else if (instr->IsSourcePosition()) {
|
||||
AssembleGaps(instr);
|
||||
if (instr->IsSourcePosition()) {
|
||||
AssembleSourcePosition(SourcePositionInstruction::cast(instr));
|
||||
} else {
|
||||
// Assemble architecture-specific code for the instruction.
|
||||
@ -258,13 +256,13 @@ void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
|
||||
}
|
||||
|
||||
|
||||
void CodeGenerator::AssembleGap(GapInstruction* instr) {
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
GapInstruction::InnerPosition inner_pos =
|
||||
static_cast<GapInstruction::InnerPosition>(i);
|
||||
void CodeGenerator::AssembleGaps(Instruction* instr) {
|
||||
for (int i = Instruction::FIRST_GAP_POSITION;
|
||||
i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
Instruction::GapPosition inner_pos =
|
||||
static_cast<Instruction::GapPosition>(i);
|
||||
ParallelMove* move = instr->GetParallelMove(inner_pos);
|
||||
if (move != NULL) resolver()->Resolve(move);
|
||||
if (move != nullptr) resolver()->Resolve(move);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
|
||||
// Assemble code for the specified instruction.
|
||||
void AssembleInstruction(Instruction* instr);
|
||||
void AssembleSourcePosition(SourcePositionInstruction* instr);
|
||||
void AssembleGap(GapInstruction* gap);
|
||||
void AssembleGaps(Instruction* instr);
|
||||
|
||||
// ===========================================================================
|
||||
// ============= Architecture-specific code generation methods. ==============
|
||||
|
@ -593,10 +593,12 @@ void GraphC1Visualizer::PrintSchedule(const char* phase,
|
||||
if (instruction_block->code_start() >= 0) {
|
||||
int first_index = instruction_block->first_instruction_index();
|
||||
int last_index = instruction_block->last_instruction_index();
|
||||
PrintIntProperty("first_lir_id", LifetimePosition::FromInstructionIndex(
|
||||
first_index).Value());
|
||||
PrintIntProperty("last_lir_id", LifetimePosition::FromInstructionIndex(
|
||||
last_index).Value());
|
||||
PrintIntProperty(
|
||||
"first_lir_id",
|
||||
LifetimePosition::GapFromInstructionIndex(first_index).Value());
|
||||
PrintIntProperty("last_lir_id",
|
||||
LifetimePosition::InstructionFromInstructionIndex(
|
||||
last_index).Value());
|
||||
}
|
||||
|
||||
{
|
||||
|
@ -110,7 +110,10 @@ Instruction::Instruction(InstructionCode opcode)
|
||||
: opcode_(opcode),
|
||||
bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
|
||||
TempCountField::encode(0) | IsCallField::encode(false)),
|
||||
pointer_map_(NULL) {}
|
||||
pointer_map_(NULL) {
|
||||
parallel_moves_[0] = nullptr;
|
||||
parallel_moves_[1] = nullptr;
|
||||
}
|
||||
|
||||
|
||||
Instruction::Instruction(InstructionCode opcode, size_t output_count,
|
||||
@ -123,6 +126,8 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
|
||||
TempCountField::encode(temp_count) |
|
||||
IsCallField::encode(false)),
|
||||
pointer_map_(NULL) {
|
||||
parallel_moves_[0] = nullptr;
|
||||
parallel_moves_[1] = nullptr;
|
||||
size_t offset = 0;
|
||||
for (size_t i = 0; i < output_count; ++i) {
|
||||
DCHECK(!outputs[i].IsInvalid());
|
||||
@ -139,11 +144,12 @@ Instruction::Instruction(InstructionCode opcode, size_t output_count,
|
||||
}
|
||||
|
||||
|
||||
bool GapInstruction::IsRedundant() const {
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant())
|
||||
bool Instruction::AreMovesRedundant() const {
|
||||
for (int i = Instruction::FIRST_GAP_POSITION;
|
||||
i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
if (parallel_moves_[i] != nullptr && !parallel_moves_[i]->IsRedundant()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -289,6 +295,19 @@ std::ostream& operator<<(std::ostream& os,
|
||||
const Instruction& instr = *printable.instr_;
|
||||
PrintableInstructionOperand printable_op = {printable.register_configuration_,
|
||||
NULL};
|
||||
os << "gap ";
|
||||
for (int i = Instruction::FIRST_GAP_POSITION;
|
||||
i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
os << "(";
|
||||
if (instr.parallel_moves()[i] != NULL) {
|
||||
PrintableParallelMove ppm = {printable.register_configuration_,
|
||||
instr.parallel_moves()[i]};
|
||||
os << ppm;
|
||||
}
|
||||
os << ") ";
|
||||
}
|
||||
os << "\n ";
|
||||
|
||||
if (instr.OutputCount() > 1) os << "(";
|
||||
for (size_t i = 0; i < instr.OutputCount(); i++) {
|
||||
if (i > 0) os << ", ";
|
||||
@ -299,20 +318,7 @@ std::ostream& operator<<(std::ostream& os,
|
||||
if (instr.OutputCount() > 1) os << ") = ";
|
||||
if (instr.OutputCount() == 1) os << " = ";
|
||||
|
||||
if (instr.IsGapMoves()) {
|
||||
const GapInstruction* gap = GapInstruction::cast(&instr);
|
||||
os << "gap ";
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
os << "(";
|
||||
if (gap->parallel_moves_[i] != NULL) {
|
||||
PrintableParallelMove ppm = {printable.register_configuration_,
|
||||
gap->parallel_moves_[i]};
|
||||
os << ppm;
|
||||
}
|
||||
os << ") ";
|
||||
}
|
||||
} else if (instr.IsSourcePosition()) {
|
||||
if (instr.IsSourcePosition()) {
|
||||
const SourcePositionInstruction* pos =
|
||||
SourcePositionInstruction::cast(&instr);
|
||||
os << "position (" << pos->source_position().raw() << ")";
|
||||
@ -494,9 +500,9 @@ int InstructionSequence::NextVirtualRegister() {
|
||||
}
|
||||
|
||||
|
||||
GapInstruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
|
||||
Instruction* InstructionSequence::GetBlockStart(RpoNumber rpo) const {
|
||||
const InstructionBlock* block = InstructionBlockAt(rpo);
|
||||
return GapInstruction::cast(InstructionAt(block->code_start()));
|
||||
return InstructionAt(block->code_start());
|
||||
}
|
||||
|
||||
|
||||
@ -522,8 +528,6 @@ void InstructionSequence::EndBlock(RpoNumber rpo) {
|
||||
|
||||
|
||||
int InstructionSequence::AddInstruction(Instruction* instr) {
|
||||
GapInstruction* gap = GapInstruction::New(zone());
|
||||
instructions_.push_back(gap);
|
||||
int index = static_cast<int>(instructions_.size());
|
||||
instructions_.push_back(instr);
|
||||
if (instr->NeedsPointerMap()) {
|
||||
@ -571,13 +575,6 @@ void InstructionSequence::MarkAsDouble(int virtual_register) {
|
||||
}
|
||||
|
||||
|
||||
void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
|
||||
InstructionOperand* to) {
|
||||
GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
|
||||
from, to, zone());
|
||||
}
|
||||
|
||||
|
||||
InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
|
||||
FrameStateDescriptor* descriptor) {
|
||||
int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
|
||||
|
@ -25,8 +25,7 @@ namespace compiler {
|
||||
class Schedule;
|
||||
|
||||
// A couple of reserved opcodes are used for internal use.
|
||||
const InstructionCode kGapInstruction = -1;
|
||||
const InstructionCode kSourcePositionInstruction = -2;
|
||||
const InstructionCode kSourcePositionInstruction = -1;
|
||||
|
||||
#define INSTRUCTION_OPERAND_LIST(V) \
|
||||
V(Constant, CONSTANT) \
|
||||
@ -543,7 +542,6 @@ class Instruction {
|
||||
bool NeedsPointerMap() const { return IsCall(); }
|
||||
bool HasPointerMap() const { return pointer_map_ != NULL; }
|
||||
|
||||
bool IsGapMoves() const { return opcode() == kGapInstruction; }
|
||||
bool IsSourcePosition() const {
|
||||
return opcode() == kSourcePositionInstruction;
|
||||
}
|
||||
@ -570,8 +568,37 @@ class Instruction {
|
||||
OutputCount() == 0 && TempCount() == 0;
|
||||
}
|
||||
|
||||
enum GapPosition {
|
||||
START,
|
||||
END,
|
||||
FIRST_GAP_POSITION = START,
|
||||
LAST_GAP_POSITION = END
|
||||
};
|
||||
|
||||
ParallelMove* GetOrCreateParallelMove(GapPosition pos, Zone* zone) {
|
||||
if (parallel_moves_[pos] == nullptr) {
|
||||
parallel_moves_[pos] = new (zone) ParallelMove(zone);
|
||||
}
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
ParallelMove* GetParallelMove(GapPosition pos) {
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
const ParallelMove* GetParallelMove(GapPosition pos) const {
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
bool AreMovesRedundant() const;
|
||||
|
||||
ParallelMove* const* parallel_moves() const { return ¶llel_moves_[0]; }
|
||||
ParallelMove** parallel_moves() { return ¶llel_moves_[0]; }
|
||||
|
||||
protected:
|
||||
explicit Instruction(InstructionCode opcode);
|
||||
|
||||
private:
|
||||
Instruction(InstructionCode opcode, size_t output_count,
|
||||
InstructionOperand* outputs, size_t input_count,
|
||||
InstructionOperand* inputs, size_t temp_count,
|
||||
@ -584,6 +611,7 @@ class Instruction {
|
||||
|
||||
InstructionCode opcode_;
|
||||
uint32_t bit_field_;
|
||||
ParallelMove* parallel_moves_[2];
|
||||
PointerMap* pointer_map_;
|
||||
InstructionOperand operands_[1];
|
||||
|
||||
@ -599,65 +627,6 @@ struct PrintableInstruction {
|
||||
std::ostream& operator<<(std::ostream& os, const PrintableInstruction& instr);
|
||||
|
||||
|
||||
// Represents moves inserted before an instruction due to register allocation.
|
||||
// TODO(titzer): squash GapInstruction back into Instruction, since essentially
|
||||
// every instruction can possibly have moves inserted before it.
|
||||
class GapInstruction : public Instruction {
|
||||
public:
|
||||
enum InnerPosition {
|
||||
START,
|
||||
END,
|
||||
FIRST_INNER_POSITION = START,
|
||||
LAST_INNER_POSITION = END
|
||||
};
|
||||
|
||||
ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
|
||||
if (parallel_moves_[pos] == NULL) {
|
||||
parallel_moves_[pos] = new (zone) ParallelMove(zone);
|
||||
}
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
ParallelMove* GetParallelMove(InnerPosition pos) {
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
const ParallelMove* GetParallelMove(InnerPosition pos) const {
|
||||
return parallel_moves_[pos];
|
||||
}
|
||||
|
||||
bool IsRedundant() const;
|
||||
|
||||
ParallelMove** parallel_moves() { return parallel_moves_; }
|
||||
|
||||
static GapInstruction* New(Zone* zone) {
|
||||
void* buffer = zone->New(sizeof(GapInstruction));
|
||||
return new (buffer) GapInstruction(kGapInstruction);
|
||||
}
|
||||
|
||||
static GapInstruction* cast(Instruction* instr) {
|
||||
DCHECK(instr->IsGapMoves());
|
||||
return static_cast<GapInstruction*>(instr);
|
||||
}
|
||||
|
||||
static const GapInstruction* cast(const Instruction* instr) {
|
||||
DCHECK(instr->IsGapMoves());
|
||||
return static_cast<const GapInstruction*>(instr);
|
||||
}
|
||||
|
||||
protected:
|
||||
explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
|
||||
parallel_moves_[START] = NULL;
|
||||
parallel_moves_[END] = NULL;
|
||||
}
|
||||
|
||||
private:
|
||||
friend std::ostream& operator<<(std::ostream& os,
|
||||
const PrintableInstruction& instr);
|
||||
ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
|
||||
};
|
||||
|
||||
|
||||
class SourcePositionInstruction FINAL : public Instruction {
|
||||
public:
|
||||
static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
|
||||
@ -982,19 +951,13 @@ class InstructionSequence FINAL : public ZoneObject {
|
||||
void MarkAsReference(int virtual_register);
|
||||
void MarkAsDouble(int virtual_register);
|
||||
|
||||
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
|
||||
|
||||
GapInstruction* GetBlockStart(RpoNumber rpo) const;
|
||||
Instruction* GetBlockStart(RpoNumber rpo) const;
|
||||
|
||||
typedef InstructionDeque::const_iterator const_iterator;
|
||||
const_iterator begin() const { return instructions_.begin(); }
|
||||
const_iterator end() const { return instructions_.end(); }
|
||||
const InstructionDeque& instructions() const { return instructions_; }
|
||||
|
||||
GapInstruction* GapAt(int index) const {
|
||||
return GapInstruction::cast(InstructionAt(index));
|
||||
}
|
||||
bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
|
||||
Instruction* InstructionAt(int index) const {
|
||||
DCHECK(index >= 0);
|
||||
DCHECK(index < static_cast<int>(instructions_.size()));
|
||||
|
@ -76,10 +76,10 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
|
||||
RpoNumber fw = block->rpo_number();
|
||||
for (int i = block->code_start(); i < block->code_end(); ++i) {
|
||||
Instruction* instr = code->InstructionAt(i);
|
||||
if (instr->IsGapMoves() && GapInstruction::cast(instr)->IsRedundant()) {
|
||||
// skip redundant gap moves.
|
||||
TRACE(" nop gap\n");
|
||||
continue;
|
||||
if (!instr->AreMovesRedundant()) {
|
||||
// can't skip instructions with non redundant moves.
|
||||
TRACE(" parallel move\n");
|
||||
fallthru = false;
|
||||
} else if (instr->IsSourcePosition()) {
|
||||
// skip source positions.
|
||||
TRACE(" src pos\n");
|
||||
|
@ -16,15 +16,14 @@ typedef ZoneSet<InstructionOperand> OperandSet;
|
||||
|
||||
|
||||
bool GapsCanMoveOver(Instruction* instr) {
|
||||
DCHECK(!instr->IsGapMoves());
|
||||
return instr->IsSourcePosition() || instr->IsNop();
|
||||
}
|
||||
|
||||
|
||||
int FindFirstNonEmptySlot(GapInstruction* gap) {
|
||||
int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
auto move = gap->parallel_moves()[i];
|
||||
int FindFirstNonEmptySlot(Instruction* instr) {
|
||||
int i = Instruction::FIRST_GAP_POSITION;
|
||||
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
auto move = instr->parallel_moves()[i];
|
||||
if (move == nullptr) continue;
|
||||
auto move_ops = move->move_operands();
|
||||
auto op = move_ops->begin();
|
||||
@ -97,52 +96,45 @@ void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
|
||||
void MoveOptimizer::CompressBlock(InstructionBlock* block) {
|
||||
auto temp_vector = temp_vector_0();
|
||||
DCHECK(temp_vector.empty());
|
||||
GapInstruction* prev_gap = nullptr;
|
||||
Instruction* prev_instr = nullptr;
|
||||
for (int index = block->code_start(); index < block->code_end(); ++index) {
|
||||
auto instr = code()->instructions()[index];
|
||||
if (!instr->IsGapMoves()) {
|
||||
if (GapsCanMoveOver(instr)) continue;
|
||||
if (prev_gap != nullptr) to_finalize_.push_back(prev_gap);
|
||||
prev_gap = nullptr;
|
||||
continue;
|
||||
}
|
||||
auto gap = GapInstruction::cast(instr);
|
||||
int i = FindFirstNonEmptySlot(gap);
|
||||
// Nothing to do here.
|
||||
if (i == GapInstruction::LAST_INNER_POSITION + 1) {
|
||||
if (prev_gap != nullptr) {
|
||||
// Slide prev_gap down so we always know where to look for it.
|
||||
std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
|
||||
prev_gap = gap;
|
||||
int i = FindFirstNonEmptySlot(instr);
|
||||
if (i <= Instruction::LAST_GAP_POSITION) {
|
||||
// Move the first non-empty gap to position 0.
|
||||
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[i]);
|
||||
auto left = instr->parallel_moves()[0];
|
||||
// Compress everything into position 0.
|
||||
for (++i; i <= Instruction::LAST_GAP_POSITION; ++i) {
|
||||
auto move = instr->parallel_moves()[i];
|
||||
if (move == nullptr) continue;
|
||||
CompressMoves(&temp_vector, left, move);
|
||||
}
|
||||
if (prev_instr != nullptr) {
|
||||
// Smash left into prev_instr, killing left.
|
||||
auto pred_moves = prev_instr->parallel_moves()[0];
|
||||
CompressMoves(&temp_vector, pred_moves, left);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
// Move the first non-empty gap to position 0.
|
||||
std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
|
||||
auto left = gap->parallel_moves()[0];
|
||||
// Compress everything into position 0.
|
||||
for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
|
||||
auto move = gap->parallel_moves()[i];
|
||||
if (move == nullptr) continue;
|
||||
CompressMoves(&temp_vector, left, move);
|
||||
if (prev_instr != nullptr) {
|
||||
// Slide prev_instr down so we always know where to look for it.
|
||||
std::swap(prev_instr->parallel_moves()[0], instr->parallel_moves()[0]);
|
||||
}
|
||||
if (prev_gap != nullptr) {
|
||||
// Smash left into prev_gap, killing left.
|
||||
auto pred_moves = prev_gap->parallel_moves()[0];
|
||||
CompressMoves(&temp_vector, pred_moves, left);
|
||||
// Slide prev_gap down so we always know where to look for it.
|
||||
std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
|
||||
prev_instr = instr->parallel_moves()[0] == nullptr ? nullptr : instr;
|
||||
if (GapsCanMoveOver(instr)) continue;
|
||||
if (prev_instr != nullptr) {
|
||||
to_finalize_.push_back(prev_instr);
|
||||
prev_instr = nullptr;
|
||||
}
|
||||
prev_gap = gap;
|
||||
}
|
||||
if (prev_gap != nullptr) to_finalize_.push_back(prev_gap);
|
||||
if (prev_instr != nullptr) {
|
||||
to_finalize_.push_back(prev_instr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
GapInstruction* MoveOptimizer::LastGap(InstructionBlock* block) {
|
||||
int gap_index = block->last_instruction_index() - 1;
|
||||
auto instr = code()->instructions()[gap_index];
|
||||
return GapInstruction::cast(instr);
|
||||
Instruction* MoveOptimizer::LastInstruction(InstructionBlock* block) {
|
||||
return code()->instructions()[block->last_instruction_index()];
|
||||
}
|
||||
|
||||
|
||||
@ -153,7 +145,6 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
|
||||
for (auto pred_index : block->predecessors()) {
|
||||
auto pred = code()->InstructionBlockAt(pred_index);
|
||||
auto last_instr = code()->instructions()[pred->last_instruction_index()];
|
||||
DCHECK(!last_instr->IsGapMoves());
|
||||
if (last_instr->IsSourcePosition()) continue;
|
||||
if (last_instr->IsCall()) return;
|
||||
if (last_instr->TempCount() != 0) return;
|
||||
@ -169,12 +160,12 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
|
||||
// Accumulate set of shared moves.
|
||||
for (auto pred_index : block->predecessors()) {
|
||||
auto pred = code()->InstructionBlockAt(pred_index);
|
||||
auto gap = LastGap(pred);
|
||||
if (gap->parallel_moves()[0] == nullptr ||
|
||||
gap->parallel_moves()[0]->move_operands()->is_empty()) {
|
||||
auto instr = LastInstruction(pred);
|
||||
if (instr->parallel_moves()[0] == nullptr ||
|
||||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
|
||||
return;
|
||||
}
|
||||
auto move_ops = gap->parallel_moves()[0]->move_operands();
|
||||
auto move_ops = instr->parallel_moves()[0]->move_operands();
|
||||
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
|
||||
if (op->IsRedundant()) continue;
|
||||
auto src = *op->source();
|
||||
@ -191,34 +182,30 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
|
||||
}
|
||||
if (move_map.empty() || correct_counts != move_map.size()) return;
|
||||
// Find insertion point.
|
||||
GapInstruction* gap = nullptr;
|
||||
Instruction* instr = nullptr;
|
||||
for (int i = block->first_instruction_index();
|
||||
i <= block->last_instruction_index(); ++i) {
|
||||
auto instr = code()->instructions()[i];
|
||||
if (instr->IsGapMoves()) {
|
||||
gap = GapInstruction::cast(instr);
|
||||
continue;
|
||||
}
|
||||
if (!GapsCanMoveOver(instr)) break;
|
||||
instr = code()->instructions()[i];
|
||||
if (!GapsCanMoveOver(instr) || !instr->AreMovesRedundant()) break;
|
||||
}
|
||||
DCHECK(gap != nullptr);
|
||||
DCHECK(instr != nullptr);
|
||||
bool gap_initialized = true;
|
||||
if (gap->parallel_moves()[0] == nullptr ||
|
||||
gap->parallel_moves()[0]->move_operands()->is_empty()) {
|
||||
to_finalize_.push_back(gap);
|
||||
if (instr->parallel_moves()[0] == nullptr ||
|
||||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
|
||||
to_finalize_.push_back(instr);
|
||||
} else {
|
||||
// Will compress after insertion.
|
||||
gap_initialized = false;
|
||||
std::swap(gap->parallel_moves()[0], gap->parallel_moves()[1]);
|
||||
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
|
||||
}
|
||||
auto move = gap->GetOrCreateParallelMove(
|
||||
static_cast<GapInstruction::InnerPosition>(0), code_zone());
|
||||
auto move = instr->GetOrCreateParallelMove(
|
||||
static_cast<Instruction::GapPosition>(0), code_zone());
|
||||
// Delete relevant entries in predecessors and move everything to block.
|
||||
bool first_iteration = true;
|
||||
for (auto pred_index : block->predecessors()) {
|
||||
auto pred = code()->InstructionBlockAt(pred_index);
|
||||
auto gap = LastGap(pred);
|
||||
auto move_ops = gap->parallel_moves()[0]->move_operands();
|
||||
auto instr = LastInstruction(pred);
|
||||
auto move_ops = instr->parallel_moves()[0]->move_operands();
|
||||
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
|
||||
if (op->IsRedundant()) continue;
|
||||
MoveKey key = {*op->source(), *op->destination()};
|
||||
@ -234,20 +221,20 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
|
||||
}
|
||||
// Compress.
|
||||
if (!gap_initialized) {
|
||||
CompressMoves(&temp_vector_0(), gap->parallel_moves()[0],
|
||||
gap->parallel_moves()[1]);
|
||||
CompressMoves(&temp_vector_0(), instr->parallel_moves()[0],
|
||||
instr->parallel_moves()[1]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Split multiple loads of the same constant or stack slot off into the second
|
||||
// slot and keep remaining moves in the first slot.
|
||||
void MoveOptimizer::FinalizeMoves(GapInstruction* gap) {
|
||||
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
|
||||
auto loads = temp_vector_0();
|
||||
DCHECK(loads.empty());
|
||||
auto new_moves = temp_vector_1();
|
||||
DCHECK(new_moves.empty());
|
||||
auto move_ops = gap->parallel_moves()[0]->move_operands();
|
||||
auto move_ops = instr->parallel_moves()[0]->move_operands();
|
||||
for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
|
||||
if (move->IsRedundant()) {
|
||||
move->Eliminate();
|
||||
@ -294,8 +281,8 @@ void MoveOptimizer::FinalizeMoves(GapInstruction* gap) {
|
||||
loads.clear();
|
||||
if (new_moves.empty()) return;
|
||||
// Insert all new moves into slot 1.
|
||||
auto slot_1 = gap->GetOrCreateParallelMove(
|
||||
static_cast<GapInstruction::InnerPosition>(1), code_zone());
|
||||
auto slot_1 = instr->GetOrCreateParallelMove(
|
||||
static_cast<Instruction::GapPosition>(1), code_zone());
|
||||
DCHECK(slot_1->move_operands()->is_empty());
|
||||
slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
|
||||
static_cast<int>(new_moves.size()),
|
||||
|
@ -19,7 +19,7 @@ class MoveOptimizer FINAL {
|
||||
|
||||
private:
|
||||
typedef ZoneVector<MoveOperands*> MoveOpVector;
|
||||
typedef ZoneVector<GapInstruction*> GapInstructions;
|
||||
typedef ZoneVector<Instruction*> Instructions;
|
||||
|
||||
InstructionSequence* code() const { return code_; }
|
||||
Zone* local_zone() const { return local_zone_; }
|
||||
@ -30,13 +30,13 @@ class MoveOptimizer FINAL {
|
||||
void CompressBlock(InstructionBlock* blocke);
|
||||
void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
|
||||
ParallelMove* right);
|
||||
GapInstruction* LastGap(InstructionBlock* block);
|
||||
Instruction* LastInstruction(InstructionBlock* block);
|
||||
void OptimizeMerge(InstructionBlock* block);
|
||||
void FinalizeMoves(GapInstruction* gap);
|
||||
void FinalizeMoves(Instruction* instr);
|
||||
|
||||
Zone* const local_zone_;
|
||||
InstructionSequence* const code_;
|
||||
GapInstructions to_finalize_;
|
||||
Instructions to_finalize_;
|
||||
MoveOpVector temp_vector_0_;
|
||||
MoveOpVector temp_vector_1_;
|
||||
|
||||
|
@ -15,12 +15,12 @@ static size_t OperandCount(const Instruction* instr) {
|
||||
}
|
||||
|
||||
|
||||
static void VerifyGapEmpty(const GapInstruction* gap) {
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
GapInstruction::InnerPosition inner_pos =
|
||||
static_cast<GapInstruction::InnerPosition>(i);
|
||||
CHECK(!gap->GetParallelMove(inner_pos));
|
||||
static void VerifyEmptyGaps(const Instruction* instr) {
|
||||
for (int i = Instruction::FIRST_GAP_POSITION;
|
||||
i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
Instruction::GapPosition inner_pos =
|
||||
static_cast<Instruction::GapPosition>(i);
|
||||
CHECK(instr->GetParallelMove(inner_pos) == nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,6 +60,8 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
|
||||
// Construct OperandConstraints for all InstructionOperands, eliminating
|
||||
// kSameAsFirst along the way.
|
||||
for (const auto* instr : sequence->instructions()) {
|
||||
// All gaps should be totally unallocated at this point.
|
||||
VerifyEmptyGaps(instr);
|
||||
const size_t operand_count = OperandCount(instr);
|
||||
auto* op_constraints = zone->NewArray<OperandConstraint>(operand_count);
|
||||
size_t count = 0;
|
||||
@ -80,11 +82,6 @@ RegisterAllocatorVerifier::RegisterAllocatorVerifier(
|
||||
}
|
||||
VerifyOutput(op_constraints[count]);
|
||||
}
|
||||
// All gaps should be totally unallocated at this point.
|
||||
if (instr->IsGapMoves()) {
|
||||
CHECK(operand_count == 0);
|
||||
VerifyGapEmpty(GapInstruction::cast(instr));
|
||||
}
|
||||
InstructionConstraint instr_constraint = {instr, operand_count,
|
||||
op_constraints};
|
||||
constraints()->push_back(instr_constraint);
|
||||
@ -329,11 +326,11 @@ class OperandMap : public ZoneObject {
|
||||
map().insert(to_insert.begin(), to_insert.end());
|
||||
}
|
||||
|
||||
void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
|
||||
for (int i = GapInstruction::FIRST_INNER_POSITION;
|
||||
i <= GapInstruction::LAST_INNER_POSITION; i++) {
|
||||
auto inner_pos = static_cast<GapInstruction::InnerPosition>(i);
|
||||
auto move = gap->GetParallelMove(inner_pos);
|
||||
void RunGaps(Zone* zone, const Instruction* instr) {
|
||||
for (int i = Instruction::FIRST_GAP_POSITION;
|
||||
i <= Instruction::LAST_GAP_POSITION; i++) {
|
||||
auto inner_pos = static_cast<Instruction::GapPosition>(i);
|
||||
auto move = instr->GetParallelMove(inner_pos);
|
||||
if (move == nullptr) continue;
|
||||
RunParallelMoves(zone, move);
|
||||
}
|
||||
@ -648,11 +645,7 @@ void RegisterAllocatorVerifier::VerifyGapMoves(BlockMaps* block_maps,
|
||||
++instr_index) {
|
||||
const auto& instr_constraint = constraints_[instr_index];
|
||||
const auto instr = instr_constraint.instruction_;
|
||||
if (instr->IsSourcePosition()) continue;
|
||||
if (instr->IsGapMoves()) {
|
||||
current->RunGapInstruction(zone(), GapInstruction::cast(instr));
|
||||
continue;
|
||||
}
|
||||
current->RunGaps(zone(), instr);
|
||||
const auto op_constraints = instr_constraint.operand_constraints_;
|
||||
size_t count = 0;
|
||||
for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
|
||||
|
@ -169,8 +169,8 @@ void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
|
||||
auto zone = sequence->zone();
|
||||
for (auto to_spill = spills_at_definition_; to_spill != nullptr;
|
||||
to_spill = to_spill->next) {
|
||||
auto gap = sequence->GapAt(to_spill->gap_index);
|
||||
auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
|
||||
auto instr = sequence->InstructionAt(to_spill->gap_index);
|
||||
auto move = instr->GetOrCreateParallelMove(Instruction::START, zone);
|
||||
// Skip insertion if it's possible that the move exists already as a
|
||||
// constraint move from a fixed output register to a slot.
|
||||
if (might_be_duplicated) {
|
||||
@ -264,8 +264,7 @@ bool LiveRange::CanBeSpilled(LifetimePosition pos) {
|
||||
// at the current or the immediate next position.
|
||||
auto use_pos = NextRegisterPosition(pos);
|
||||
if (use_pos == nullptr) return true;
|
||||
return use_pos->pos().Value() >
|
||||
pos.NextInstruction().InstructionEnd().Value();
|
||||
return use_pos->pos().Value() > pos.NextStart().End().Value();
|
||||
}
|
||||
|
||||
|
||||
@ -685,10 +684,10 @@ void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
|
||||
BitVector* live_out) {
|
||||
// Add an interval that includes the entire block to the live range for
|
||||
// each live_out value.
|
||||
auto start =
|
||||
LifetimePosition::FromInstructionIndex(block->first_instruction_index());
|
||||
auto end = LifetimePosition::FromInstructionIndex(
|
||||
block->last_instruction_index()).NextInstruction();
|
||||
auto start = LifetimePosition::GapFromInstructionIndex(
|
||||
block->first_instruction_index());
|
||||
auto end = LifetimePosition::InstructionFromInstructionIndex(
|
||||
block->last_instruction_index()).NextStart();
|
||||
BitVector::Iterator iterator(live_out);
|
||||
while (!iterator.Done()) {
|
||||
int operand_index = iterator.Current();
|
||||
@ -780,15 +779,17 @@ LiveRange* RegisterAllocator::LiveRangeFor(int index) {
|
||||
}
|
||||
|
||||
|
||||
GapInstruction* RegisterAllocator::GetLastGap(const InstructionBlock* block) {
|
||||
int last_instruction = block->last_instruction_index();
|
||||
return code()->GapAt(last_instruction - 1);
|
||||
Instruction* RegisterAllocator::GetLastInstruction(
|
||||
const InstructionBlock* block) {
|
||||
return code()->InstructionAt(block->last_instruction_index());
|
||||
}
|
||||
|
||||
|
||||
LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
|
||||
if (operand->IsUnallocated()) {
|
||||
return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
|
||||
} else if (operand->IsConstant()) {
|
||||
return LiveRangeFor(ConstantOperand::cast(operand)->index());
|
||||
} else if (operand->IsRegister()) {
|
||||
return FixedLiveRangeFor(operand->index());
|
||||
} else if (operand->IsDoubleRegister()) {
|
||||
@ -807,9 +808,8 @@ void RegisterAllocator::Define(LifetimePosition position,
|
||||
|
||||
if (range->IsEmpty() || range->Start().Value() > position.Value()) {
|
||||
// Can happen if there is a definition without use.
|
||||
range->AddUseInterval(position, position.NextInstruction(), local_zone());
|
||||
range->AddUsePosition(position.NextInstruction(), nullptr, nullptr,
|
||||
local_zone());
|
||||
range->AddUseInterval(position, position.NextStart(), local_zone());
|
||||
range->AddUsePosition(position.NextStart(), nullptr, nullptr, local_zone());
|
||||
} else {
|
||||
range->ShortenTo(position);
|
||||
}
|
||||
@ -835,12 +835,11 @@ void RegisterAllocator::Use(LifetimePosition block_start,
|
||||
}
|
||||
|
||||
|
||||
void RegisterAllocator::AddGapMove(int index,
|
||||
GapInstruction::InnerPosition position,
|
||||
void RegisterAllocator::AddGapMove(int index, Instruction::GapPosition position,
|
||||
InstructionOperand* from,
|
||||
InstructionOperand* to) {
|
||||
auto gap = code()->GapAt(index);
|
||||
auto move = gap->GetOrCreateParallelMove(position, code_zone());
|
||||
auto instr = code()->InstructionAt(index);
|
||||
auto move = instr->GetOrCreateParallelMove(position, code_zone());
|
||||
move->AddMove(from, to, code_zone());
|
||||
}
|
||||
|
||||
@ -1024,8 +1023,8 @@ bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
|
||||
LiveRange* op_range = LiveRangeFor(op);
|
||||
if (op_range->GetSpillRange() == nullptr) continue;
|
||||
auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
|
||||
auto pred_end =
|
||||
LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
|
||||
auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
|
||||
pred->last_instruction_index());
|
||||
while (op_range != nullptr && !op_range->CanCover(pred_end)) {
|
||||
op_range = op_range->next();
|
||||
}
|
||||
@ -1068,9 +1067,7 @@ bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
|
||||
// If the range does not need register soon, spill it to the merged
|
||||
// spill range.
|
||||
auto next_pos = range->Start();
|
||||
if (code()->IsGapAt(next_pos.InstructionIndex())) {
|
||||
next_pos = next_pos.NextInstruction();
|
||||
}
|
||||
if (next_pos.IsGapPosition()) next_pos = next_pos.NextStart();
|
||||
auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
|
||||
if (pos == nullptr) {
|
||||
auto spill_range = range->TopLevel()->HasSpillRange()
|
||||
@ -1079,7 +1076,7 @@ bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
|
||||
CHECK(first_op_spill->TryMerge(spill_range));
|
||||
Spill(range);
|
||||
return true;
|
||||
} else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
|
||||
} else if (pos->pos().Value() > range->Start().NextStart().Value()) {
|
||||
auto spill_range = range->TopLevel()->HasSpillRange()
|
||||
? range->TopLevel()->GetSpillRange()
|
||||
: AssignSpillRangeToLiveRange(range->TopLevel());
|
||||
@ -1097,19 +1094,11 @@ void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
|
||||
int end = block->last_instruction_index();
|
||||
DCHECK_NE(-1, start);
|
||||
for (int i = start; i <= end; ++i) {
|
||||
if (code()->IsGapAt(i)) {
|
||||
Instruction* instr = nullptr;
|
||||
Instruction* prev_instr = nullptr;
|
||||
if (i < end) instr = InstructionAt(i + 1);
|
||||
if (i > start) prev_instr = InstructionAt(i - 1);
|
||||
MeetConstraintsBetween(prev_instr, instr, i);
|
||||
}
|
||||
MeetConstraintsBefore(i);
|
||||
if (i != end) MeetConstraintsAfter(i);
|
||||
}
|
||||
|
||||
// Meet register constraints for the instruction in the end.
|
||||
if (!code()->IsGapAt(end)) {
|
||||
MeetRegisterConstraintsForLastInstructionInBlock(block);
|
||||
}
|
||||
MeetRegisterConstraintsForLastInstructionInBlock(block);
|
||||
}
|
||||
|
||||
|
||||
@ -1138,14 +1127,12 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
|
||||
const InstructionBlock* successor = code()->InstructionBlockAt(succ);
|
||||
DCHECK(successor->PredecessorCount() == 1);
|
||||
int gap_index = successor->first_instruction_index();
|
||||
DCHECK(code()->IsGapAt(gap_index));
|
||||
|
||||
// Create an unconstrained operand for the same virtual register
|
||||
// and insert a gap move from the fixed output to the operand.
|
||||
UnallocatedOperand* output_copy =
|
||||
UnallocatedOperand(UnallocatedOperand::ANY, output_vreg)
|
||||
.Copy(code_zone());
|
||||
AddGapMove(gap_index, GapInstruction::START, output, output_copy);
|
||||
AddGapMove(gap_index, Instruction::START, output, output_copy);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1162,101 +1149,90 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
|
||||
}
|
||||
|
||||
|
||||
void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
|
||||
Instruction* second,
|
||||
int gap_index) {
|
||||
if (first != nullptr) {
|
||||
// Handle fixed temporaries.
|
||||
for (size_t i = 0; i < first->TempCount(); i++) {
|
||||
auto temp = UnallocatedOperand::cast(first->TempAt(i));
|
||||
if (temp->HasFixedPolicy()) {
|
||||
AllocateFixed(temp, gap_index - 1, false);
|
||||
}
|
||||
void RegisterAllocator::MeetConstraintsAfter(int instr_index) {
|
||||
auto first = InstructionAt(instr_index);
|
||||
// Handle fixed temporaries.
|
||||
for (size_t i = 0; i < first->TempCount(); i++) {
|
||||
auto temp = UnallocatedOperand::cast(first->TempAt(i));
|
||||
if (temp->HasFixedPolicy()) AllocateFixed(temp, instr_index, false);
|
||||
}
|
||||
// Handle constant/fixed output operands.
|
||||
for (size_t i = 0; i < first->OutputCount(); i++) {
|
||||
InstructionOperand* output = first->OutputAt(i);
|
||||
if (output->IsConstant()) {
|
||||
int output_vreg = output->index();
|
||||
auto range = LiveRangeFor(output_vreg);
|
||||
range->SetSpillStartIndex(instr_index + 1);
|
||||
range->SetSpillOperand(output);
|
||||
continue;
|
||||
}
|
||||
auto first_output = UnallocatedOperand::cast(output);
|
||||
auto range = LiveRangeFor(first_output->virtual_register());
|
||||
bool assigned = false;
|
||||
if (first_output->HasFixedPolicy()) {
|
||||
auto output_copy = first_output->CopyUnconstrained(code_zone());
|
||||
bool is_tagged = HasTaggedValue(first_output->virtual_register());
|
||||
AllocateFixed(first_output, instr_index, is_tagged);
|
||||
|
||||
// Handle constant/fixed output operands.
|
||||
for (size_t i = 0; i < first->OutputCount(); i++) {
|
||||
InstructionOperand* output = first->OutputAt(i);
|
||||
if (output->IsConstant()) {
|
||||
int output_vreg = output->index();
|
||||
auto range = LiveRangeFor(output_vreg);
|
||||
range->SetSpillStartIndex(gap_index - 1);
|
||||
range->SetSpillOperand(output);
|
||||
} else {
|
||||
auto first_output = UnallocatedOperand::cast(output);
|
||||
auto range = LiveRangeFor(first_output->virtual_register());
|
||||
bool assigned = false;
|
||||
if (first_output->HasFixedPolicy()) {
|
||||
auto output_copy = first_output->CopyUnconstrained(code_zone());
|
||||
bool is_tagged = HasTaggedValue(first_output->virtual_register());
|
||||
AllocateFixed(first_output, gap_index, is_tagged);
|
||||
|
||||
// This value is produced on the stack, we never need to spill it.
|
||||
if (first_output->IsStackSlot()) {
|
||||
DCHECK(first_output->index() < frame_->GetSpillSlotCount());
|
||||
range->SetSpillOperand(first_output);
|
||||
range->SetSpillStartIndex(gap_index - 1);
|
||||
assigned = true;
|
||||
}
|
||||
AddGapMove(gap_index, GapInstruction::START, first_output,
|
||||
output_copy);
|
||||
}
|
||||
|
||||
// Make sure we add a gap move for spilling (if we have not done
|
||||
// so already).
|
||||
if (!assigned) {
|
||||
range->SpillAtDefinition(local_zone(), gap_index, first_output);
|
||||
range->SetSpillStartIndex(gap_index);
|
||||
}
|
||||
// This value is produced on the stack, we never need to spill it.
|
||||
if (first_output->IsStackSlot()) {
|
||||
DCHECK(first_output->index() < frame_->GetSpillSlotCount());
|
||||
range->SetSpillOperand(first_output);
|
||||
range->SetSpillStartIndex(instr_index + 1);
|
||||
assigned = true;
|
||||
}
|
||||
AddGapMove(instr_index + 1, Instruction::START, first_output,
|
||||
output_copy);
|
||||
}
|
||||
// Make sure we add a gap move for spilling (if we have not done
|
||||
// so already).
|
||||
if (!assigned) {
|
||||
range->SpillAtDefinition(local_zone(), instr_index + 1, first_output);
|
||||
range->SetSpillStartIndex(instr_index + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (second != nullptr) {
|
||||
// Handle fixed input operands of second instruction.
|
||||
for (size_t i = 0; i < second->InputCount(); i++) {
|
||||
auto input = second->InputAt(i);
|
||||
if (input->IsImmediate()) continue; // Ignore immediates.
|
||||
auto cur_input = UnallocatedOperand::cast(input);
|
||||
if (cur_input->HasFixedPolicy()) {
|
||||
auto input_copy = cur_input->CopyUnconstrained(code_zone());
|
||||
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
|
||||
AllocateFixed(cur_input, gap_index + 1, is_tagged);
|
||||
AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
|
||||
}
|
||||
|
||||
void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
|
||||
auto second = InstructionAt(instr_index);
|
||||
// Handle fixed input operands of second instruction.
|
||||
for (size_t i = 0; i < second->InputCount(); i++) {
|
||||
auto input = second->InputAt(i);
|
||||
if (input->IsImmediate()) continue; // Ignore immediates.
|
||||
auto cur_input = UnallocatedOperand::cast(input);
|
||||
if (cur_input->HasFixedPolicy()) {
|
||||
auto input_copy = cur_input->CopyUnconstrained(code_zone());
|
||||
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
|
||||
AllocateFixed(cur_input, instr_index, is_tagged);
|
||||
AddGapMove(instr_index, Instruction::END, input_copy, cur_input);
|
||||
}
|
||||
|
||||
// Handle "output same as input" for second instruction.
|
||||
for (size_t i = 0; i < second->OutputCount(); i++) {
|
||||
auto output = second->OutputAt(i);
|
||||
if (!output->IsUnallocated()) continue;
|
||||
auto second_output = UnallocatedOperand::cast(output);
|
||||
if (second_output->HasSameAsInputPolicy()) {
|
||||
DCHECK(i == 0); // Only valid for first output.
|
||||
UnallocatedOperand* cur_input =
|
||||
UnallocatedOperand::cast(second->InputAt(0));
|
||||
int output_vreg = second_output->virtual_register();
|
||||
int input_vreg = cur_input->virtual_register();
|
||||
|
||||
auto input_copy = cur_input->CopyUnconstrained(code_zone());
|
||||
cur_input->set_virtual_register(second_output->virtual_register());
|
||||
AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
|
||||
|
||||
if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
|
||||
int index = gap_index + 1;
|
||||
Instruction* instr = InstructionAt(index);
|
||||
if (instr->HasPointerMap()) {
|
||||
instr->pointer_map()->RecordPointer(input_copy, code_zone());
|
||||
}
|
||||
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
|
||||
// The input is assumed to immediately have a tagged representation,
|
||||
// before the pointer map can be used. I.e. the pointer map at the
|
||||
// instruction will include the output operand (whose value at the
|
||||
// beginning of the instruction is equal to the input operand). If
|
||||
// this is not desired, then the pointer map at this instruction needs
|
||||
// to be adjusted manually.
|
||||
}
|
||||
}
|
||||
// Handle "output same as input" for second instruction.
|
||||
for (size_t i = 0; i < second->OutputCount(); i++) {
|
||||
auto output = second->OutputAt(i);
|
||||
if (!output->IsUnallocated()) continue;
|
||||
auto second_output = UnallocatedOperand::cast(output);
|
||||
if (!second_output->HasSameAsInputPolicy()) continue;
|
||||
DCHECK(i == 0); // Only valid for first output.
|
||||
UnallocatedOperand* cur_input =
|
||||
UnallocatedOperand::cast(second->InputAt(0));
|
||||
int output_vreg = second_output->virtual_register();
|
||||
int input_vreg = cur_input->virtual_register();
|
||||
auto input_copy = cur_input->CopyUnconstrained(code_zone());
|
||||
cur_input->set_virtual_register(second_output->virtual_register());
|
||||
AddGapMove(instr_index, Instruction::END, input_copy, cur_input);
|
||||
if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
|
||||
if (second->HasPointerMap()) {
|
||||
second->pointer_map()->RecordPointer(input_copy, code_zone());
|
||||
}
|
||||
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
|
||||
// The input is assumed to immediately have a tagged representation,
|
||||
// before the pointer map can be used. I.e. the pointer map at the
|
||||
// instruction will include the output operand (whose value at the
|
||||
// beginning of the instruction is equal to the input operand). If
|
||||
// this is not desired, then the pointer map at this instruction needs
|
||||
// to be adjusted manually.
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1285,132 +1261,132 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
|
||||
BitVector* live) {
|
||||
int block_start = block->first_instruction_index();
|
||||
auto block_start_position =
|
||||
LifetimePosition::FromInstructionIndex(block_start);
|
||||
LifetimePosition::GapFromInstructionIndex(block_start);
|
||||
|
||||
for (int index = block->last_instruction_index(); index >= block_start;
|
||||
index--) {
|
||||
auto curr_position = LifetimePosition::FromInstructionIndex(index);
|
||||
auto curr_position =
|
||||
LifetimePosition::InstructionFromInstructionIndex(index);
|
||||
auto instr = InstructionAt(index);
|
||||
DCHECK(instr != nullptr);
|
||||
if (instr->IsGapMoves()) {
|
||||
// Process the moves of the gap instruction, making their sources live.
|
||||
auto gap = code()->GapAt(index);
|
||||
const GapInstruction::InnerPosition kPositions[] = {
|
||||
GapInstruction::END, GapInstruction::START};
|
||||
for (auto position : kPositions) {
|
||||
auto move = gap->GetParallelMove(position);
|
||||
if (move == nullptr) continue;
|
||||
if (position == GapInstruction::END) {
|
||||
curr_position = curr_position.InstructionEnd();
|
||||
} else {
|
||||
curr_position = curr_position.InstructionStart();
|
||||
DCHECK(curr_position.IsInstructionPosition());
|
||||
// Process output, inputs, and temps of this instruction.
|
||||
for (size_t i = 0; i < instr->OutputCount(); i++) {
|
||||
auto output = instr->OutputAt(i);
|
||||
if (output->IsUnallocated()) {
|
||||
// Unsupported.
|
||||
DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
|
||||
int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
|
||||
live->Remove(out_vreg);
|
||||
} else if (output->IsConstant()) {
|
||||
int out_vreg = output->index();
|
||||
live->Remove(out_vreg);
|
||||
}
|
||||
Define(curr_position, output, nullptr);
|
||||
}
|
||||
|
||||
if (instr->ClobbersRegisters()) {
|
||||
for (int i = 0; i < config()->num_general_registers(); ++i) {
|
||||
if (!IsOutputRegisterOf(instr, i)) {
|
||||
auto range = FixedLiveRangeFor(i);
|
||||
range->AddUseInterval(curr_position, curr_position.End(),
|
||||
local_zone());
|
||||
}
|
||||
auto move_ops = move->move_operands();
|
||||
for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
|
||||
auto from = cur->source();
|
||||
auto to = cur->destination();
|
||||
auto hint = to;
|
||||
if (to->IsUnallocated()) {
|
||||
int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
|
||||
auto to_range = LiveRangeFor(to_vreg);
|
||||
if (to_range->is_phi()) {
|
||||
DCHECK(!FLAG_turbo_delay_ssa_decon);
|
||||
if (to_range->is_non_loop_phi()) {
|
||||
hint = to_range->current_hint_operand();
|
||||
}
|
||||
} else {
|
||||
if (live->Contains(to_vreg)) {
|
||||
Define(curr_position, to, from);
|
||||
live->Remove(to_vreg);
|
||||
} else {
|
||||
cur->Eliminate();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (instr->ClobbersDoubleRegisters()) {
|
||||
for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
|
||||
if (!IsOutputDoubleRegisterOf(instr, i)) {
|
||||
auto range = FixedDoubleLiveRangeFor(i);
|
||||
range->AddUseInterval(curr_position, curr_position.End(),
|
||||
local_zone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < instr->InputCount(); i++) {
|
||||
auto input = instr->InputAt(i);
|
||||
if (input->IsImmediate()) continue; // Ignore immediates.
|
||||
LifetimePosition use_pos;
|
||||
if (input->IsUnallocated() &&
|
||||
UnallocatedOperand::cast(input)->IsUsedAtStart()) {
|
||||
use_pos = curr_position;
|
||||
} else {
|
||||
use_pos = curr_position.End();
|
||||
}
|
||||
|
||||
if (input->IsUnallocated()) {
|
||||
UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
|
||||
int vreg = unalloc->virtual_register();
|
||||
live->Add(vreg);
|
||||
if (unalloc->HasSlotPolicy()) {
|
||||
LiveRangeFor(vreg)->set_has_slot_use(true);
|
||||
}
|
||||
}
|
||||
Use(block_start_position, use_pos, input, nullptr);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < instr->TempCount(); i++) {
|
||||
auto temp = instr->TempAt(i);
|
||||
// Unsupported.
|
||||
DCHECK_IMPLIES(temp->IsUnallocated(),
|
||||
!UnallocatedOperand::cast(temp)->HasSlotPolicy());
|
||||
if (instr->ClobbersTemps()) {
|
||||
if (temp->IsRegister()) continue;
|
||||
if (temp->IsUnallocated()) {
|
||||
UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
|
||||
if (temp_unalloc->HasFixedPolicy()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
Use(block_start_position, curr_position.End(), temp, nullptr);
|
||||
Define(curr_position, temp, nullptr);
|
||||
}
|
||||
|
||||
// Process the moves of the instruction's gaps, making their sources live.
|
||||
const Instruction::GapPosition kPositions[] = {Instruction::END,
|
||||
Instruction::START};
|
||||
curr_position = curr_position.PrevStart();
|
||||
DCHECK(curr_position.IsGapPosition());
|
||||
for (auto position : kPositions) {
|
||||
auto move = instr->GetParallelMove(position);
|
||||
if (move == nullptr) continue;
|
||||
if (position == Instruction::END) {
|
||||
curr_position = curr_position.End();
|
||||
} else {
|
||||
curr_position = curr_position.Start();
|
||||
}
|
||||
auto move_ops = move->move_operands();
|
||||
for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
|
||||
auto from = cur->source();
|
||||
auto to = cur->destination();
|
||||
auto hint = to;
|
||||
if (to->IsUnallocated()) {
|
||||
int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
|
||||
auto to_range = LiveRangeFor(to_vreg);
|
||||
if (to_range->is_phi()) {
|
||||
DCHECK(!FLAG_turbo_delay_ssa_decon);
|
||||
if (to_range->is_non_loop_phi()) {
|
||||
hint = to_range->current_hint_operand();
|
||||
}
|
||||
} else {
|
||||
Define(curr_position, to, from);
|
||||
}
|
||||
Use(block_start_position, curr_position, from, hint);
|
||||
if (from->IsUnallocated()) {
|
||||
live->Add(UnallocatedOperand::cast(from)->virtual_register());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Process output, inputs, and temps of this non-gap instruction.
|
||||
for (size_t i = 0; i < instr->OutputCount(); i++) {
|
||||
auto output = instr->OutputAt(i);
|
||||
if (output->IsUnallocated()) {
|
||||
// Unsupported.
|
||||
DCHECK(!UnallocatedOperand::cast(output)->HasSlotPolicy());
|
||||
int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
|
||||
live->Remove(out_vreg);
|
||||
} else if (output->IsConstant()) {
|
||||
int out_vreg = output->index();
|
||||
live->Remove(out_vreg);
|
||||
}
|
||||
Define(curr_position, output, nullptr);
|
||||
}
|
||||
|
||||
if (instr->ClobbersRegisters()) {
|
||||
for (int i = 0; i < config()->num_general_registers(); ++i) {
|
||||
if (!IsOutputRegisterOf(instr, i)) {
|
||||
auto range = FixedLiveRangeFor(i);
|
||||
range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
|
||||
local_zone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (instr->ClobbersDoubleRegisters()) {
|
||||
for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
|
||||
if (!IsOutputDoubleRegisterOf(instr, i)) {
|
||||
auto range = FixedDoubleLiveRangeFor(i);
|
||||
range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
|
||||
local_zone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < instr->InputCount(); i++) {
|
||||
auto input = instr->InputAt(i);
|
||||
if (input->IsImmediate()) continue; // Ignore immediates.
|
||||
LifetimePosition use_pos;
|
||||
if (input->IsUnallocated() &&
|
||||
UnallocatedOperand::cast(input)->IsUsedAtStart()) {
|
||||
use_pos = curr_position;
|
||||
} else {
|
||||
use_pos = curr_position.InstructionEnd();
|
||||
}
|
||||
|
||||
if (input->IsUnallocated()) {
|
||||
UnallocatedOperand* unalloc = UnallocatedOperand::cast(input);
|
||||
int vreg = unalloc->virtual_register();
|
||||
live->Add(vreg);
|
||||
if (unalloc->HasSlotPolicy()) {
|
||||
LiveRangeFor(vreg)->set_has_slot_use(true);
|
||||
}
|
||||
}
|
||||
Use(block_start_position, use_pos, input, nullptr);
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < instr->TempCount(); i++) {
|
||||
auto temp = instr->TempAt(i);
|
||||
// Unsupported.
|
||||
DCHECK_IMPLIES(temp->IsUnallocated(),
|
||||
!UnallocatedOperand::cast(temp)->HasSlotPolicy());
|
||||
if (instr->ClobbersTemps()) {
|
||||
if (temp->IsRegister()) continue;
|
||||
if (temp->IsUnallocated()) {
|
||||
UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
|
||||
if (temp_unalloc->HasFixedPolicy()) {
|
||||
if (live->Contains(to_vreg)) {
|
||||
Define(curr_position, to, from);
|
||||
live->Remove(to_vreg);
|
||||
} else {
|
||||
cur->Eliminate();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Define(curr_position, to, from);
|
||||
}
|
||||
Use(block_start_position, curr_position, from, hint);
|
||||
if (from->IsUnallocated()) {
|
||||
live->Add(UnallocatedOperand::cast(from)->virtual_register());
|
||||
}
|
||||
Use(block_start_position, curr_position.InstructionEnd(), temp,
|
||||
nullptr);
|
||||
Define(curr_position, temp, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1429,7 +1405,7 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
|
||||
for (size_t i = 0; i < phi->operands().size(); ++i) {
|
||||
InstructionBlock* cur_block =
|
||||
code()->InstructionBlockAt(block->predecessors()[i]);
|
||||
AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
|
||||
AddGapMove(cur_block->last_instruction_index(), Instruction::END,
|
||||
&phi->inputs()[i], &output);
|
||||
DCHECK(!InstructionAt(cur_block->last_instruction_index())
|
||||
->HasPointerMap());
|
||||
@ -1464,7 +1440,7 @@ void RegisterAllocator::ResolvePhis() {
|
||||
|
||||
const InstructionBlock* RegisterAllocator::GetInstructionBlock(
|
||||
LifetimePosition pos) {
|
||||
return code()->GetInstructionBlock(pos.InstructionIndex());
|
||||
return code()->GetInstructionBlock(pos.ToInstructionIndex());
|
||||
}
|
||||
|
||||
|
||||
@ -1487,19 +1463,20 @@ void RegisterAllocator::ConnectRanges() {
|
||||
auto prev_operand = first_range->GetAssignedOperand(operand_cache());
|
||||
auto cur_operand = second_range->GetAssignedOperand(operand_cache());
|
||||
if (prev_operand->Equals(cur_operand)) continue;
|
||||
int index = pos.InstructionIndex();
|
||||
bool delay_insertion = false;
|
||||
GapInstruction::InnerPosition gap_pos;
|
||||
int gap_index = index;
|
||||
if (code()->IsGapAt(index)) {
|
||||
gap_pos = pos.IsInstructionStart() ? GapInstruction::START
|
||||
: GapInstruction::END;
|
||||
Instruction::GapPosition gap_pos;
|
||||
int gap_index = pos.ToInstructionIndex();
|
||||
if (pos.IsGapPosition()) {
|
||||
gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
|
||||
} else {
|
||||
gap_index = pos.IsInstructionStart() ? (index - 1) : (index + 1);
|
||||
delay_insertion = gap_index < index;
|
||||
gap_pos = delay_insertion ? GapInstruction::END : GapInstruction::START;
|
||||
if (pos.IsStart()) {
|
||||
delay_insertion = true;
|
||||
} else {
|
||||
gap_index++;
|
||||
}
|
||||
gap_pos = delay_insertion ? Instruction::END : Instruction::START;
|
||||
}
|
||||
auto move = code()->GapAt(gap_index)->GetOrCreateParallelMove(
|
||||
auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
|
||||
gap_pos, code_zone());
|
||||
if (!delay_insertion) {
|
||||
move->AddMove(prev_operand, cur_operand, code_zone());
|
||||
@ -1612,24 +1589,24 @@ class LiveRangeBoundArray {
|
||||
}
|
||||
|
||||
LiveRangeBound* FindPred(const InstructionBlock* pred) {
|
||||
auto pred_end =
|
||||
LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
|
||||
auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
|
||||
pred->last_instruction_index());
|
||||
return Find(pred_end);
|
||||
}
|
||||
|
||||
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
|
||||
auto succ_start =
|
||||
LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
|
||||
auto succ_start = LifetimePosition::GapFromInstructionIndex(
|
||||
succ->first_instruction_index());
|
||||
return Find(succ_start);
|
||||
}
|
||||
|
||||
void Find(const InstructionBlock* block, const InstructionBlock* pred,
|
||||
FindResult* result) const {
|
||||
auto pred_end =
|
||||
LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
|
||||
auto pred_end = LifetimePosition::InstructionFromInstructionIndex(
|
||||
pred->last_instruction_index());
|
||||
auto bound = Find(pred_end);
|
||||
result->pred_cover_ = bound->range_;
|
||||
auto cur_start = LifetimePosition::FromInstructionIndex(
|
||||
auto cur_start = LifetimePosition::GapFromInstructionIndex(
|
||||
block->first_instruction_index());
|
||||
// Common case.
|
||||
if (bound->CanCover(cur_start)) {
|
||||
@ -1735,15 +1712,15 @@ void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
|
||||
InstructionOperand* pred_op) {
|
||||
if (pred_op->Equals(cur_op)) return;
|
||||
int gap_index;
|
||||
GapInstruction::InnerPosition position;
|
||||
Instruction::GapPosition position;
|
||||
if (block->PredecessorCount() == 1) {
|
||||
gap_index = block->first_instruction_index();
|
||||
position = GapInstruction::START;
|
||||
position = Instruction::START;
|
||||
} else {
|
||||
DCHECK(pred->SuccessorCount() == 1);
|
||||
DCHECK(!InstructionAt(pred->last_instruction_index())->HasPointerMap());
|
||||
gap_index = pred->last_instruction_index() - 1;
|
||||
position = GapInstruction::END;
|
||||
gap_index = pred->last_instruction_index();
|
||||
position = Instruction::END;
|
||||
}
|
||||
AddGapMove(gap_index, position, pred_op, cur_op);
|
||||
}
|
||||
@ -1771,10 +1748,9 @@ void RegisterAllocator::BuildLiveRanges() {
|
||||
if (!FLAG_turbo_delay_ssa_decon) {
|
||||
InstructionOperand* hint = nullptr;
|
||||
InstructionOperand* phi_operand = nullptr;
|
||||
auto gap =
|
||||
GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
|
||||
auto move =
|
||||
gap->GetOrCreateParallelMove(GapInstruction::END, code_zone());
|
||||
auto instr = GetLastInstruction(
|
||||
code()->InstructionBlockAt(block->predecessors()[0]));
|
||||
auto move = instr->GetParallelMove(Instruction::END);
|
||||
for (int j = 0; j < move->move_operands()->length(); ++j) {
|
||||
auto to = move->move_operands()->at(j).destination();
|
||||
if (to->IsUnallocated() &&
|
||||
@ -1785,7 +1761,7 @@ void RegisterAllocator::BuildLiveRanges() {
|
||||
}
|
||||
}
|
||||
DCHECK(hint != nullptr);
|
||||
auto block_start = LifetimePosition::FromInstructionIndex(
|
||||
auto block_start = LifetimePosition::GapFromInstructionIndex(
|
||||
block->first_instruction_index());
|
||||
Define(block_start, phi_operand, hint);
|
||||
}
|
||||
@ -1799,10 +1775,10 @@ void RegisterAllocator::BuildLiveRanges() {
|
||||
// Add a live range stretching from the first loop instruction to the last
|
||||
// for each value live on entry to the header.
|
||||
BitVector::Iterator iterator(live);
|
||||
auto start = LifetimePosition::FromInstructionIndex(
|
||||
auto start = LifetimePosition::GapFromInstructionIndex(
|
||||
block->first_instruction_index());
|
||||
auto end = LifetimePosition::FromInstructionIndex(
|
||||
code()->LastLoopInstructionIndex(block)).NextInstruction();
|
||||
auto end = LifetimePosition::GapFromInstructionIndex(
|
||||
code()->LastLoopInstructionIndex(block)).NextFullStart();
|
||||
while (!iterator.Done()) {
|
||||
int operand_index = iterator.Current();
|
||||
auto range = LiveRangeFor(operand_index);
|
||||
@ -1833,9 +1809,7 @@ void RegisterAllocator::BuildLiveRanges() {
|
||||
if (pos->type() == UsePositionType::kRequiresSlot) continue;
|
||||
UsePositionType new_type = UsePositionType::kAny;
|
||||
// Can't mark phis as needing a register.
|
||||
if (!code()
|
||||
->InstructionAt(pos->pos().InstructionIndex())
|
||||
->IsGapMoves()) {
|
||||
if (!pos->pos().IsGapPosition()) {
|
||||
new_type = UsePositionType::kRequiresRegister;
|
||||
}
|
||||
pos->set_type(new_type, true);
|
||||
@ -1894,12 +1868,13 @@ void RegisterAllocator::PopulatePointerMaps() {
|
||||
if (range->IsEmpty()) continue;
|
||||
|
||||
// Find the extent of the range and its children.
|
||||
int start = range->Start().InstructionIndex();
|
||||
int start = range->Start().ToInstructionIndex();
|
||||
int end = 0;
|
||||
for (auto cur = range; cur != nullptr; cur = cur->next()) {
|
||||
auto this_end = cur->End();
|
||||
if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
|
||||
DCHECK(cur->Start().InstructionIndex() >= start);
|
||||
if (this_end.ToInstructionIndex() > end)
|
||||
end = this_end.ToInstructionIndex();
|
||||
DCHECK(cur->Start().ToInstructionIndex() >= start);
|
||||
}
|
||||
|
||||
// Most of the ranges are in order, but not all. Keep an eye on when they
|
||||
@ -1924,7 +1899,8 @@ void RegisterAllocator::PopulatePointerMaps() {
|
||||
|
||||
// Advance to the next active range that covers the current
|
||||
// safe point position.
|
||||
auto safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point);
|
||||
auto safe_point_pos =
|
||||
LifetimePosition::InstructionFromInstructionIndex(safe_point);
|
||||
auto cur = range;
|
||||
while (cur != nullptr && !cur->Covers(safe_point_pos)) {
|
||||
cur = cur->next();
|
||||
@ -2014,8 +1990,8 @@ void RegisterAllocator::AllocateRegisters() {
|
||||
if (!current->HasNoSpillType()) {
|
||||
TRACE("Live range %d already has a spill operand\n", current->id());
|
||||
auto next_pos = position;
|
||||
if (code()->IsGapAt(next_pos.InstructionIndex())) {
|
||||
next_pos = next_pos.NextInstruction();
|
||||
if (next_pos.IsGapPosition()) {
|
||||
next_pos = next_pos.NextStart();
|
||||
}
|
||||
auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
|
||||
// If the range already has a spill operand and it doesn't need a
|
||||
@ -2023,8 +1999,7 @@ void RegisterAllocator::AllocateRegisters() {
|
||||
if (pos == nullptr) {
|
||||
Spill(current);
|
||||
continue;
|
||||
} else if (pos->pos().Value() >
|
||||
current->Start().NextInstruction().Value()) {
|
||||
} else if (pos->pos().Value() > current->Start().NextStart().Value()) {
|
||||
// Do not spill live range eagerly if use position that can benefit from
|
||||
// the register is too close to the start of live range.
|
||||
SpillBetween(current, current->Start(), pos->pos());
|
||||
@ -2196,7 +2171,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
|
||||
|
||||
for (auto cur_active : active_live_ranges()) {
|
||||
free_until_pos[cur_active->assigned_register()] =
|
||||
LifetimePosition::FromInstructionIndex(0);
|
||||
LifetimePosition::GapFromInstructionIndex(0);
|
||||
}
|
||||
|
||||
for (auto cur_inactive : inactive_live_ranges()) {
|
||||
@ -2276,7 +2251,7 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
|
||||
int cur_reg = range->assigned_register();
|
||||
if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
|
||||
block_pos[cur_reg] = use_pos[cur_reg] =
|
||||
LifetimePosition::FromInstructionIndex(0);
|
||||
LifetimePosition::GapFromInstructionIndex(0);
|
||||
} else {
|
||||
auto next_use =
|
||||
range->NextUsePositionRegisterIsBeneficial(current->Start());
|
||||
@ -2320,8 +2295,8 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
|
||||
if (block_pos[reg].Value() < current->End().Value()) {
|
||||
// Register becomes blocked before the current range end. Split before that
|
||||
// position.
|
||||
LiveRange* tail = SplitBetween(current, current->Start(),
|
||||
block_pos[reg].InstructionStart());
|
||||
LiveRange* tail =
|
||||
SplitBetween(current, current->Start(), block_pos[reg].Start());
|
||||
AddToUnhandledSorted(tail);
|
||||
}
|
||||
|
||||
@ -2348,7 +2323,7 @@ static const InstructionBlock* GetContainingLoop(
|
||||
|
||||
LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
|
||||
LiveRange* range, LifetimePosition pos) {
|
||||
auto block = GetInstructionBlock(pos.InstructionStart());
|
||||
auto block = GetInstructionBlock(pos.Start());
|
||||
auto loop_header =
|
||||
block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
|
||||
|
||||
@ -2360,7 +2335,7 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
|
||||
// We are going to spill live range inside the loop.
|
||||
// If possible try to move spilling position backwards to loop header.
|
||||
// This will reduce number of memory moves on the back edge.
|
||||
auto loop_start = LifetimePosition::FromInstructionIndex(
|
||||
auto loop_start = LifetimePosition::GapFromInstructionIndex(
|
||||
loop_header->first_instruction_index());
|
||||
|
||||
if (range->Covers(loop_start)) {
|
||||
@ -2427,9 +2402,9 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
|
||||
|
||||
|
||||
bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
|
||||
return pos.IsInstructionStart() &&
|
||||
code()->GetInstructionBlock(pos.InstructionIndex())->code_start() ==
|
||||
pos.InstructionIndex();
|
||||
return pos.IsFullStart() &&
|
||||
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
|
||||
pos.ToInstructionIndex();
|
||||
}
|
||||
|
||||
|
||||
@ -2442,9 +2417,9 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
|
||||
|
||||
// We can't properly connect liveranges if splitting occurred at the end
|
||||
// a block.
|
||||
DCHECK(pos.IsInstructionStart() ||
|
||||
(code()->GetInstructionBlock(pos.InstructionIndex()))
|
||||
->last_instruction_index() != pos.InstructionIndex());
|
||||
DCHECK(pos.IsStart() || pos.IsGapPosition() ||
|
||||
(code()->GetInstructionBlock(pos.ToInstructionIndex()))
|
||||
->last_instruction_index() != pos.ToInstructionIndex());
|
||||
|
||||
int vreg = GetVirtualRegister();
|
||||
auto result = LiveRangeFor(vreg);
|
||||
@ -2468,8 +2443,8 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
|
||||
|
||||
LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
|
||||
LifetimePosition end) {
|
||||
int start_instr = start.InstructionIndex();
|
||||
int end_instr = end.InstructionIndex();
|
||||
int start_instr = start.ToInstructionIndex();
|
||||
int end_instr = end.ToInstructionIndex();
|
||||
DCHECK(start_instr <= end_instr);
|
||||
|
||||
// We have no choice
|
||||
@ -2497,7 +2472,7 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
|
||||
// position unless end_block is a loop header itself.
|
||||
if (block == end_block && !end_block->IsLoopHeader()) return end;
|
||||
|
||||
return LifetimePosition::FromInstructionIndex(
|
||||
return LifetimePosition::GapFromInstructionIndex(
|
||||
block->first_instruction_index());
|
||||
}
|
||||
|
||||
@ -2525,13 +2500,12 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
|
||||
// The split result intersects with [start, end[.
|
||||
// Split it at position between ]start+1, end[, spill the middle part
|
||||
// and put the rest to unhandled.
|
||||
auto third_part_end = end.PrevInstruction().InstructionEnd();
|
||||
if (IsBlockBoundary(end.InstructionStart())) {
|
||||
third_part_end = end.InstructionStart();
|
||||
auto third_part_end = end.PrevStart().End();
|
||||
if (IsBlockBoundary(end.Start())) {
|
||||
third_part_end = end.Start();
|
||||
}
|
||||
auto third_part = SplitBetween(
|
||||
second_part, Max(second_part->Start().InstructionEnd(), until),
|
||||
third_part_end);
|
||||
second_part, Max(second_part->Start().End(), until), third_part_end);
|
||||
|
||||
DCHECK(third_part != second_part);
|
||||
|
||||
|
@ -20,57 +20,84 @@ enum RegisterKind {
|
||||
|
||||
|
||||
// This class represents a single point of a InstructionOperand's lifetime. For
|
||||
// each instruction there are exactly two lifetime positions: the beginning and
|
||||
// the end of the instruction. Lifetime positions for different instructions are
|
||||
// disjoint.
|
||||
// each instruction there are four lifetime positions:
|
||||
//
|
||||
// [[START, END], [START, END]]
|
||||
//
|
||||
// Where the first half position corresponds to
|
||||
//
|
||||
// [GapPosition::START, GapPosition::END]
|
||||
//
|
||||
// and the second half position corresponds to
|
||||
//
|
||||
// [Lifetime::USED_AT_START, Lifetime::USED_AT_END]
|
||||
//
|
||||
class LifetimePosition FINAL {
|
||||
public:
|
||||
// Return the lifetime position that corresponds to the beginning of
|
||||
// the instruction with the given index.
|
||||
static LifetimePosition FromInstructionIndex(int index) {
|
||||
// the gap with the given index.
|
||||
static LifetimePosition GapFromInstructionIndex(int index) {
|
||||
return LifetimePosition(index * kStep);
|
||||
}
|
||||
// Return the lifetime position that corresponds to the beginning of
|
||||
// the instruction with the given index.
|
||||
static LifetimePosition InstructionFromInstructionIndex(int index) {
|
||||
return LifetimePosition(index * kStep + kHalfStep);
|
||||
}
|
||||
|
||||
// Returns a numeric representation of this lifetime position.
|
||||
int Value() const { return value_; }
|
||||
|
||||
// Returns the index of the instruction to which this lifetime position
|
||||
// corresponds.
|
||||
int InstructionIndex() const {
|
||||
int ToInstructionIndex() const {
|
||||
DCHECK(IsValid());
|
||||
return value_ / kStep;
|
||||
}
|
||||
|
||||
// Returns true if this lifetime position corresponds to the instruction
|
||||
// start.
|
||||
bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
|
||||
// Returns true if this lifetime position corresponds to a START value
|
||||
bool IsStart() const { return (value_ & (kHalfStep - 1)) == 0; }
|
||||
// Returns true if this lifetime position corresponds to a gap START value
|
||||
bool IsFullStart() const { return (value_ & (kStep - 1)) == 0; }
|
||||
|
||||
// Returns the lifetime position for the start of the instruction which
|
||||
// corresponds to this lifetime position.
|
||||
LifetimePosition InstructionStart() const {
|
||||
bool IsGapPosition() { return (value_ & 0x2) == 0; }
|
||||
bool IsInstructionPosition() { return !IsGapPosition(); }
|
||||
|
||||
// Returns the lifetime position for the current START.
|
||||
LifetimePosition Start() const {
|
||||
DCHECK(IsValid());
|
||||
return LifetimePosition(value_ & ~(kHalfStep - 1));
|
||||
}
|
||||
|
||||
// Returns the lifetime position for the current gap START.
|
||||
LifetimePosition FullStart() const {
|
||||
DCHECK(IsValid());
|
||||
return LifetimePosition(value_ & ~(kStep - 1));
|
||||
}
|
||||
|
||||
// Returns the lifetime position for the end of the instruction which
|
||||
// corresponds to this lifetime position.
|
||||
LifetimePosition InstructionEnd() const {
|
||||
// Returns the lifetime position for the current END.
|
||||
LifetimePosition End() const {
|
||||
DCHECK(IsValid());
|
||||
return LifetimePosition(InstructionStart().Value() + kStep / 2);
|
||||
return LifetimePosition(Start().Value() + kHalfStep / 2);
|
||||
}
|
||||
|
||||
// Returns the lifetime position for the beginning of the next instruction.
|
||||
LifetimePosition NextInstruction() const {
|
||||
// Returns the lifetime position for the beginning of the next START.
|
||||
LifetimePosition NextStart() const {
|
||||
DCHECK(IsValid());
|
||||
return LifetimePosition(InstructionStart().Value() + kStep);
|
||||
return LifetimePosition(Start().Value() + kHalfStep);
|
||||
}
|
||||
|
||||
// Returns the lifetime position for the beginning of the previous
|
||||
// instruction.
|
||||
LifetimePosition PrevInstruction() const {
|
||||
// Returns the lifetime position for the beginning of the next gap START.
|
||||
LifetimePosition NextFullStart() const {
|
||||
DCHECK(IsValid());
|
||||
DCHECK(value_ > 1);
|
||||
return LifetimePosition(InstructionStart().Value() - kStep);
|
||||
return LifetimePosition(FullStart().Value() + kStep);
|
||||
}
|
||||
|
||||
// Returns the lifetime position for the beginning of the previous START.
|
||||
LifetimePosition PrevStart() const {
|
||||
DCHECK(IsValid());
|
||||
DCHECK(value_ >= kHalfStep);
|
||||
return LifetimePosition(Start().Value() - kHalfStep);
|
||||
}
|
||||
|
||||
// Constructs the lifetime position which does not correspond to any
|
||||
@ -90,10 +117,11 @@ class LifetimePosition FINAL {
|
||||
}
|
||||
|
||||
private:
|
||||
static const int kStep = 2;
|
||||
static const int kHalfStep = 2;
|
||||
static const int kStep = 2 * kHalfStep;
|
||||
|
||||
// Code relies on kStep being a power of two.
|
||||
STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
|
||||
// Code relies on kStep and kHalfStep being a power of two.
|
||||
STATIC_ASSERT(IS_POWER_OF_TWO(kHalfStep));
|
||||
|
||||
explicit LifetimePosition(int value) : value_(value) {}
|
||||
|
||||
@ -495,8 +523,8 @@ class RegisterAllocator FINAL : public ZoneObject {
|
||||
bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
|
||||
void ProcessInstructions(const InstructionBlock* block, BitVector* live);
|
||||
void MeetRegisterConstraints(const InstructionBlock* block);
|
||||
void MeetConstraintsBetween(Instruction* first, Instruction* second,
|
||||
int gap_index);
|
||||
void MeetConstraintsBefore(int index);
|
||||
void MeetConstraintsAfter(int index);
|
||||
void MeetRegisterConstraintsForLastInstructionInBlock(
|
||||
const InstructionBlock* block);
|
||||
void ResolvePhis(const InstructionBlock* block);
|
||||
@ -509,7 +537,7 @@ class RegisterAllocator FINAL : public ZoneObject {
|
||||
InstructionOperand* hint);
|
||||
void Use(LifetimePosition block_start, LifetimePosition position,
|
||||
InstructionOperand* operand, InstructionOperand* hint);
|
||||
void AddGapMove(int index, GapInstruction::InnerPosition position,
|
||||
void AddGapMove(int index, Instruction::GapPosition position,
|
||||
InstructionOperand* from, InstructionOperand* to);
|
||||
|
||||
// Helper methods for updating the life range lists.
|
||||
@ -590,7 +618,7 @@ class RegisterAllocator FINAL : public ZoneObject {
|
||||
LiveRange* FixedLiveRangeFor(int index);
|
||||
LiveRange* FixedDoubleLiveRangeFor(int index);
|
||||
LiveRange* LiveRangeFor(int index);
|
||||
GapInstruction* GetLastGap(const InstructionBlock* block);
|
||||
Instruction* GetLastInstruction(const InstructionBlock* block);
|
||||
|
||||
const char* RegisterName(int allocation_index);
|
||||
|
||||
|
@ -206,10 +206,7 @@ TEST(InstructionIsGapAt) {
|
||||
R.code->AddInstruction(g);
|
||||
R.code->EndBlock(R.RpoFor(b0));
|
||||
|
||||
CHECK(R.code->instructions().size() == 4);
|
||||
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
|
||||
CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
|
||||
}
|
||||
CHECK(R.code->instructions().size() == 2);
|
||||
}
|
||||
|
||||
|
||||
@ -236,10 +233,7 @@ TEST(InstructionIsGapAt2) {
|
||||
R.code->AddInstruction(g1);
|
||||
R.code->EndBlock(R.RpoFor(b1));
|
||||
|
||||
CHECK(R.code->instructions().size() == 8);
|
||||
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
|
||||
CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
|
||||
}
|
||||
CHECK(R.code->instructions().size() == 4);
|
||||
}
|
||||
|
||||
|
||||
@ -257,21 +251,15 @@ TEST(InstructionAddGapMove) {
|
||||
R.code->AddInstruction(g);
|
||||
R.code->EndBlock(R.RpoFor(b0));
|
||||
|
||||
CHECK(R.code->instructions().size() == 4);
|
||||
for (size_t i = 0; i < R.code->instructions().size(); ++i) {
|
||||
CHECK_EQ(i % 2 == 0, R.code->instructions()[i]->IsGapMoves());
|
||||
}
|
||||
CHECK(R.code->instructions().size() == 2);
|
||||
|
||||
int indexes[] = {0, 2, -1};
|
||||
for (int i = 0; indexes[i] >= 0; i++) {
|
||||
int index = indexes[i];
|
||||
|
||||
UnallocatedOperand* op1 = R.NewUnallocated(index + 6);
|
||||
UnallocatedOperand* op2 = R.NewUnallocated(index + 12);
|
||||
|
||||
R.code->AddGapMove(index, op1, op2);
|
||||
GapInstruction* gap = R.code->GapAt(index);
|
||||
ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
|
||||
int index = 0;
|
||||
for (auto instr : R.code->instructions()) {
|
||||
UnallocatedOperand* op1 = R.NewUnallocated(index++);
|
||||
UnallocatedOperand* op2 = R.NewUnallocated(index++);
|
||||
instr->GetOrCreateParallelMove(TestInstr::START, R.zone())
|
||||
->AddMove(op1, op2, R.zone());
|
||||
ParallelMove* move = instr->GetParallelMove(TestInstr::START);
|
||||
CHECK(move);
|
||||
const ZoneList<MoveOperands>* move_operands = move->move_operands();
|
||||
CHECK_EQ(1, move_operands->length());
|
||||
|
@ -58,16 +58,16 @@ class TestCode : public HandleAndZoneScope {
|
||||
void RedundantMoves() {
|
||||
Start();
|
||||
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
|
||||
int index = static_cast<int>(sequence_.instructions().size()) - 2;
|
||||
sequence_.AddGapMove(index, RegisterOperand::New(13, main_zone()),
|
||||
RegisterOperand::New(13, main_zone()));
|
||||
int index = static_cast<int>(sequence_.instructions().size()) - 1;
|
||||
AddGapMove(index, RegisterOperand::New(13, main_zone()),
|
||||
RegisterOperand::New(13, main_zone()));
|
||||
}
|
||||
void NonRedundantMoves() {
|
||||
Start();
|
||||
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
|
||||
int index = static_cast<int>(sequence_.instructions().size()) - 2;
|
||||
sequence_.AddGapMove(index, ImmediateOperand::New(11, main_zone()),
|
||||
RegisterOperand::New(11, main_zone()));
|
||||
int index = static_cast<int>(sequence_.instructions().size()) - 1;
|
||||
AddGapMove(index, ImmediateOperand::New(11, main_zone()),
|
||||
RegisterOperand::New(11, main_zone()));
|
||||
}
|
||||
void Other() {
|
||||
Start();
|
||||
@ -96,6 +96,11 @@ class TestCode : public HandleAndZoneScope {
|
||||
CHECK(current_ == NULL);
|
||||
Start(true);
|
||||
}
|
||||
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to) {
|
||||
sequence_.InstructionAt(index)
|
||||
->GetOrCreateParallelMove(Instruction::START, main_zone())
|
||||
->AddMove(from, to, main_zone());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
|
@ -11,13 +11,11 @@ namespace compiler {
|
||||
|
||||
class MoveOptimizerTest : public InstructionSequenceTest {
|
||||
public:
|
||||
GapInstruction* LastGap() {
|
||||
return GapInstruction::cast(*(sequence()->instructions().rbegin() + 1));
|
||||
}
|
||||
Instruction* LastInstruction() { return sequence()->instructions().back(); }
|
||||
|
||||
void AddMove(GapInstruction* gap, TestOperand from, TestOperand to,
|
||||
GapInstruction::InnerPosition pos = GapInstruction::START) {
|
||||
auto parallel_move = gap->GetOrCreateParallelMove(pos, zone());
|
||||
void AddMove(Instruction* instr, TestOperand from, TestOperand to,
|
||||
Instruction::GapPosition pos = Instruction::START) {
|
||||
auto parallel_move = instr->GetOrCreateParallelMove(pos, zone());
|
||||
parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
|
||||
}
|
||||
|
||||
@ -86,16 +84,16 @@ class MoveOptimizerTest : public InstructionSequenceTest {
|
||||
|
||||
TEST_F(MoveOptimizerTest, RemovesRedundant) {
|
||||
StartBlock();
|
||||
EmitNop();
|
||||
AddMove(LastGap(), Reg(0), Reg(1));
|
||||
EmitNop();
|
||||
AddMove(LastGap(), Reg(1), Reg(0));
|
||||
auto first_instr = EmitNop();
|
||||
AddMove(first_instr, Reg(0), Reg(1));
|
||||
auto last_instr = EmitNop();
|
||||
AddMove(last_instr, Reg(1), Reg(0));
|
||||
EndBlock(Last());
|
||||
|
||||
Optimize();
|
||||
|
||||
auto gap = LastGap();
|
||||
auto move = gap->parallel_moves()[0];
|
||||
CHECK_EQ(0, NonRedundantSize(first_instr->parallel_moves()[0]));
|
||||
auto move = last_instr->parallel_moves()[0];
|
||||
CHECK_EQ(1, NonRedundantSize(move));
|
||||
CHECK(Contains(move, Reg(0), Reg(1)));
|
||||
}
|
||||
@ -105,7 +103,7 @@ TEST_F(MoveOptimizerTest, SplitsConstants) {
|
||||
StartBlock();
|
||||
EndBlock(Last());
|
||||
|
||||
auto gap = LastGap();
|
||||
auto gap = LastInstruction();
|
||||
AddMove(gap, Const(1), Slot(0));
|
||||
AddMove(gap, Const(1), Slot(1));
|
||||
AddMove(gap, Const(1), Reg(0));
|
||||
@ -131,18 +129,18 @@ TEST_F(MoveOptimizerTest, SimpleMerge) {
|
||||
|
||||
StartBlock();
|
||||
EndBlock(Jump(2));
|
||||
AddMove(LastGap(), Reg(0), Reg(1));
|
||||
AddMove(LastInstruction(), Reg(0), Reg(1));
|
||||
|
||||
StartBlock();
|
||||
EndBlock(Jump(1));
|
||||
AddMove(LastGap(), Reg(0), Reg(1));
|
||||
AddMove(LastInstruction(), Reg(0), Reg(1));
|
||||
|
||||
StartBlock();
|
||||
EndBlock(Last());
|
||||
|
||||
Optimize();
|
||||
|
||||
auto move = LastGap()->parallel_moves()[0];
|
||||
auto move = LastInstruction()->parallel_moves()[0];
|
||||
CHECK_EQ(1, NonRedundantSize(move));
|
||||
CHECK(Contains(move, Reg(0), Reg(1)));
|
||||
}
|
||||
@ -154,13 +152,13 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
|
||||
|
||||
StartBlock();
|
||||
EndBlock(Jump(2));
|
||||
auto gap_0 = LastGap();
|
||||
auto gap_0 = LastInstruction();
|
||||
AddMove(gap_0, Reg(0), Reg(1));
|
||||
AddMove(LastGap(), Reg(1), Reg(0));
|
||||
AddMove(LastInstruction(), Reg(1), Reg(0));
|
||||
|
||||
StartBlock();
|
||||
EndBlock(Jump(1));
|
||||
auto gap_1 = LastGap();
|
||||
auto gap_1 = LastInstruction();
|
||||
AddMove(gap_1, Reg(0), Reg(1));
|
||||
AddMove(gap_1, Reg(1), Reg(0));
|
||||
|
||||
@ -169,9 +167,9 @@ TEST_F(MoveOptimizerTest, SimpleMergeCycle) {
|
||||
|
||||
Optimize();
|
||||
|
||||
CHECK(gap_0->IsRedundant());
|
||||
CHECK(gap_1->IsRedundant());
|
||||
auto move = LastGap()->parallel_moves()[0];
|
||||
CHECK(gap_0->AreMovesRedundant());
|
||||
CHECK(gap_1->AreMovesRedundant());
|
||||
auto move = LastInstruction()->parallel_moves()[0];
|
||||
CHECK_EQ(2, NonRedundantSize(move));
|
||||
CHECK(Contains(move, Reg(0), Reg(1)));
|
||||
CHECK(Contains(move, Reg(1), Reg(0)));
|
||||
|
Loading…
Reference in New Issue
Block a user