[turbofan] cleanup ParallelMove

- make ParallelMove into a ZoneVector, removing an annoying level of indirection
- make MoveOperands hold InstructionOperands instead of pointers, so there's no more operand aliasing for moves
- opens up possibility of storing MachineType in allocated operands

R=bmeurer@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1081373002

Cr-Commit-Position: refs/heads/master@{#27842}
This commit is contained in:
dcarney 2015-04-15 05:36:36 -07:00 committed by Commit bot
parent 6198bbc56d
commit 00aec79079
13 changed files with 356 additions and 375 deletions

View File

@ -12,47 +12,30 @@ namespace v8 {
namespace internal {
namespace compiler {
typedef ZoneList<MoveOperands>::iterator op_iterator;
namespace {
#ifdef ENABLE_SLOW_DCHECKS
struct InstructionOperandComparator {
bool operator()(const InstructionOperand* x,
const InstructionOperand* y) const {
return *x < *y;
}
};
#endif
// No operand should be the destination for more than one move.
static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
#ifdef ENABLE_SLOW_DCHECKS
std::set<InstructionOperand*, InstructionOperandComparator> seen;
for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
SLOW_DCHECK(seen.find(i->destination()) == seen.end());
seen.insert(i->destination());
}
#endif
inline bool Blocks(MoveOperands* move, InstructionOperand destination) {
return move->Blocks(destination);
}
void GapResolver::Resolve(ParallelMove* parallel_move) const {
ZoneList<MoveOperands>* moves = parallel_move->move_operands();
// TODO(svenpanne) Use the member version of remove_if when we use real lists.
op_iterator end =
std::remove_if(moves->begin(), moves->end(),
std::mem_fun_ref(&MoveOperands::IsRedundant));
moves->Rewind(static_cast<int>(end - moves->begin()));
inline bool IsRedundant(MoveOperands* move) { return move->IsRedundant(); }
VerifyMovesAreInjective(moves);
} // namespace
for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
if (!move->IsEliminated()) PerformMove(moves, &*move);
void GapResolver::Resolve(ParallelMove* moves) const {
// Clear redundant moves.
auto it =
std::remove_if(moves->begin(), moves->end(), std::ptr_fun(IsRedundant));
moves->erase(it, moves->end());
for (auto move : *moves) {
if (!move->IsEliminated()) PerformMove(moves, move);
}
}
void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
MoveOperands* move) const {
void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// Each call to this function performs a move and deletes it from the move
// graph. We first recursively perform any move blocking this one. We mark a
// move as "pending" on entry to PerformMove in order to detect cycles in the
@ -63,14 +46,14 @@ void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
// Clear this move's destination to indicate a pending move. The actual
// destination is saved on the side.
DCHECK_NOT_NULL(move->source()); // Or else it will look eliminated.
InstructionOperand* destination = move->destination();
move->set_destination(NULL);
DCHECK(!move->source().IsInvalid()); // Or else it will look eliminated.
InstructionOperand destination = move->destination();
move->SetPending();
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
// destination blocks this one so recursively perform all such moves.
for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
for (auto other : *moves) {
if (other->Blocks(destination) && !other->IsPending()) {
// Though PerformMove can change any source operand in the move graph,
// this call cannot create a blocking move via a swap (this loop does not
@ -91,8 +74,8 @@ void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand* source = move->source();
if (source->Equals(destination)) {
InstructionOperand source = move->source();
if (source == destination) {
move->Eliminate();
return;
}
@ -100,28 +83,27 @@ void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
// The move may be blocked on a (at most one) pending move, in which case we
// have a cycle. Search for such a blocking move and perform a swap to
// resolve it.
op_iterator blocker = std::find_if(
moves->begin(), moves->end(),
std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
auto blocker = std::find_if(moves->begin(), moves->end(),
std::bind2nd(std::ptr_fun(&Blocks), destination));
if (blocker == moves->end()) {
// The easy case: This move is not blocked.
assembler_->AssembleMove(source, destination);
assembler_->AssembleMove(&source, &destination);
move->Eliminate();
return;
}
DCHECK(blocker->IsPending());
DCHECK((*blocker)->IsPending());
// Ensure source is a register or both are stack slots, to limit swap cases.
if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
if (source.IsStackSlot() || source.IsDoubleStackSlot()) {
std::swap(source, destination);
}
assembler_->AssembleSwap(source, destination);
assembler_->AssembleSwap(&source, &destination);
move->Eliminate();
// Any unperformed (including pending) move with a source of either this
// move's source or destination needs to have their source changed to
// reflect the state of affairs after the swap.
for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
for (auto other : *moves) {
if (other->Blocks(source)) {
other->set_source(destination);
} else if (other->Blocks(destination)) {

View File

@ -34,7 +34,7 @@ class GapResolver FINAL {
private:
// Perform the given move, possibly requiring other moves to satisfy
// dependencies.
void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
void PerformMove(ParallelMove* moves, MoveOperands* move) const;
// Assembler used to emit moves and save registers.
Assembler* const assembler_;

View File

@ -13,7 +13,7 @@ namespace compiler {
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& printable) {
const InstructionOperand& op = *printable.op_;
const InstructionOperand& op = printable.op_;
const RegisterConfiguration* conf = printable.register_configuration_;
switch (op.kind()) {
case InstructionOperand::UNALLOCATED: {
@ -82,9 +82,8 @@ std::ostream& operator<<(std::ostream& os,
const MoveOperands& mo = *printable.move_operands_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
mo.destination()};
os << printable_op;
if (!mo.source()->Equals(mo.destination())) {
if (mo.source() != mo.destination()) {
printable_op.op_ = mo.source();
os << " = " << printable_op;
}
@ -93,24 +92,23 @@ std::ostream& operator<<(std::ostream& os,
bool ParallelMove::IsRedundant() const {
for (int i = 0; i < move_operands_.length(); ++i) {
if (!move_operands_[i].IsRedundant()) return false;
for (auto move : *this) {
if (!move->IsRedundant()) return false;
}
return true;
}
MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
auto move_ops = move_operands();
MoveOperands* replacement = nullptr;
MoveOperands* to_eliminate = nullptr;
for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
for (auto curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination()->Equals(move->source())) {
if (curr->destination() == move->source()) {
DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
} else if (curr->destination()->Equals(move->destination())) {
} else if (curr->destination() == move->destination()) {
DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
@ -175,8 +173,7 @@ std::ostream& operator<<(std::ostream& os,
const PrintableParallelMove& printable) {
const ParallelMove& pm = *printable.parallel_move_;
bool first = true;
for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
move != pm.move_operands()->end(); ++move) {
for (auto move : pm) {
if (move->IsEliminated()) continue;
if (!first) os << " ";
first = false;
@ -199,14 +196,14 @@ std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm) {
os << "{";
bool first = true;
PrintableInstructionOperand poi = {RegisterConfiguration::ArchDefault(),
nullptr};
InstructionOperand()};
for (auto& op : pm.reference_operands_) {
if (!first) {
os << ";";
} else {
first = false;
}
poi.op_ = &op;
poi.op_ = op;
os << poi;
}
return os << "}";
@ -295,7 +292,7 @@ std::ostream& operator<<(std::ostream& os,
const PrintableInstruction& printable) {
const Instruction& instr = *printable.instr_;
PrintableInstructionOperand printable_op = {printable.register_configuration_,
NULL};
InstructionOperand()};
os << "gap ";
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
@ -312,7 +309,7 @@ std::ostream& operator<<(std::ostream& os,
if (instr.OutputCount() > 1) os << "(";
for (size_t i = 0; i < instr.OutputCount(); i++) {
if (i > 0) os << ", ";
printable_op.op_ = instr.OutputAt(i);
printable_op.op_ = *instr.OutputAt(i);
os << printable_op;
}
@ -330,7 +327,7 @@ std::ostream& operator<<(std::ostream& os,
}
if (instr.InputCount() > 0) {
for (size_t i = 0; i < instr.InputCount(); i++) {
printable_op.op_ = instr.InputAt(i);
printable_op.op_ = *instr.InputAt(i);
os << " " << printable_op;
}
}
@ -368,14 +365,12 @@ PhiInstruction::PhiInstruction(Zone* zone, int virtual_register,
size_t input_count)
: virtual_register_(virtual_register),
output_(UnallocatedOperand(UnallocatedOperand::NONE, virtual_register)),
operands_(input_count, zone),
inputs_(input_count, zone) {}
operands_(input_count, InstructionOperand::kInvalidVirtualRegister,
zone) {}
void PhiInstruction::SetInput(size_t offset, int virtual_register) {
DCHECK(inputs_[offset].IsInvalid());
auto input = UnallocatedOperand(UnallocatedOperand::ANY, virtual_register);
inputs_[offset] = input;
DCHECK_EQ(InstructionOperand::kInvalidVirtualRegister, operands_[offset]);
operands_[offset] = virtual_register;
}
@ -726,11 +721,10 @@ std::ostream& operator<<(std::ostream& os,
for (auto phi : block->phis()) {
PrintableInstructionOperand printable_op = {
printable.register_configuration_, &phi->output()};
printable.register_configuration_, phi->output()};
os << " phi: " << printable_op << " =";
for (auto input : phi->inputs()) {
printable_op.op_ = &input;
os << " " << printable_op;
for (auto input : phi->operands()) {
os << " v" << input;
}
os << "\n";
}

View File

@ -50,10 +50,6 @@ class InstructionOperand {
inline bool IsStackSlot() const;
inline bool IsDoubleStackSlot() const;
bool Equals(const InstructionOperand* other) const {
return value_ == other->value_;
}
// Useful for map/set keys.
bool operator<(const InstructionOperand& op) const {
return value_ < op.value_;
@ -63,6 +59,10 @@ class InstructionOperand {
return value_ == op.value_;
}
bool operator!=(const InstructionOperand& op) const {
return value_ != op.value_;
}
template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
void* buffer = zone->New(sizeof(op));
@ -84,7 +84,7 @@ class InstructionOperand {
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
const InstructionOperand* op_;
InstructionOperand op_;
};
std::ostream& operator<<(std::ostream& os,
@ -167,12 +167,6 @@ class UnallocatedOperand : public InstructionOperand {
value_ |= LifetimeField::encode(lifetime);
}
UnallocatedOperand* Copy(Zone* zone) { return New(zone, *this); }
UnallocatedOperand* CopyUnconstrained(Zone* zone) {
return New(zone, UnallocatedOperand(ANY, virtual_register()));
}
// Predicates for the operand policy.
bool HasAnyPolicy() const {
return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
@ -435,43 +429,55 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
#undef ALLOCATED_OPERAND_CLASS
class MoveOperands FINAL {
class MoveOperands FINAL : public ZoneObject {
public:
MoveOperands(InstructionOperand* source, InstructionOperand* destination)
: source_(source), destination_(destination) {}
MoveOperands(const InstructionOperand& source,
const InstructionOperand& destination)
: source_(source), destination_(destination) {
DCHECK(!source.IsInvalid() && !destination.IsInvalid());
}
InstructionOperand* source() const { return source_; }
void set_source(InstructionOperand* operand) { source_ = operand; }
const InstructionOperand& source() const { return source_; }
InstructionOperand& source() { return source_; }
void set_source(const InstructionOperand& operand) { source_ = operand; }
InstructionOperand* destination() const { return destination_; }
void set_destination(InstructionOperand* operand) { destination_ = operand; }
const InstructionOperand& destination() const { return destination_; }
InstructionOperand& destination() { return destination_; }
void set_destination(const InstructionOperand& operand) {
destination_ = operand;
}
// The gap resolver marks moves as "in-progress" by clearing the
// destination (but not the source).
bool IsPending() const { return destination_ == NULL && source_ != NULL; }
bool IsPending() const {
return destination_.IsInvalid() && !source_.IsInvalid();
}
void SetPending() { destination_ = InstructionOperand(); }
// True if this move a move into the given destination operand.
bool Blocks(InstructionOperand* operand) const {
return !IsEliminated() && source()->Equals(operand);
bool Blocks(const InstructionOperand& operand) const {
return !IsEliminated() && source() == operand;
}
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
DCHECK_IMPLIES(destination_ != nullptr, !destination_->IsConstant());
return IsEliminated() || source_->Equals(destination_);
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
return IsEliminated() || source_ == destination_;
}
// We clear both operands to indicate move that's been eliminated.
void Eliminate() { source_ = destination_ = NULL; }
void Eliminate() { source_ = destination_ = InstructionOperand(); }
bool IsEliminated() const {
DCHECK(source_ != NULL || destination_ == NULL);
return source_ == NULL;
DCHECK_IMPLIES(source_.IsInvalid(), destination_.IsInvalid());
return source_.IsInvalid();
}
private:
InstructionOperand* source_;
InstructionOperand* destination_;
InstructionOperand source_;
InstructionOperand destination_;
DISALLOW_COPY_AND_ASSIGN(MoveOperands);
};
@ -484,29 +490,29 @@ struct PrintableMoveOperands {
std::ostream& operator<<(std::ostream& os, const PrintableMoveOperands& mo);
class ParallelMove FINAL : public ZoneObject {
class ParallelMove FINAL : public ZoneVector<MoveOperands*>, public ZoneObject {
public:
explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
explicit ParallelMove(Zone* zone) : ZoneVector<MoveOperands*>(zone) {
reserve(4);
}
void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
move_operands_.Add(MoveOperands(from, to), zone);
MoveOperands* AddMove(const InstructionOperand& from,
const InstructionOperand& to) {
auto zone = get_allocator().zone();
auto move = new (zone) MoveOperands(from, to);
push_back(move);
return move;
}
bool IsRedundant() const;
ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
const ZoneList<MoveOperands>* move_operands() const {
return &move_operands_;
}
// Prepare this ParallelMove to insert move as if it happened in a subsequent
// ParallelMove. move->source() may be changed. The MoveOperand returned
// must be Eliminated and, as it points directly into move_operands_, it must
// be Eliminated before any further mutation.
// must be Eliminated.
MoveOperands* PrepareInsertAfter(MoveOperands* move) const;
private:
ZoneList<MoveOperands> move_operands_;
DISALLOW_COPY_AND_ASSIGN(ParallelMove);
};
@ -856,18 +862,15 @@ class PhiInstruction FINAL : public ZoneObject {
int virtual_register() const { return virtual_register_; }
const IntVector& operands() const { return operands_; }
// TODO(dcarney): this has no real business being here, since it's internal to
// the register allocator, but putting it here was convenient.
const InstructionOperand& output() const { return output_; }
InstructionOperand& output() { return output_; }
const Inputs& inputs() const { return inputs_; }
Inputs& inputs() { return inputs_; }
private:
// TODO(dcarney): some of these fields are only for verification, move them to
// verifier.
const int virtual_register_;
InstructionOperand output_;
IntVector operands_;
Inputs inputs_;
};

View File

@ -21,16 +21,13 @@ bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
int FindFirstNonEmptySlot(Instruction* instr) {
int i = Instruction::FIRST_GAP_POSITION;
for (; i <= Instruction::LAST_GAP_POSITION; i++) {
auto move = instr->parallel_moves()[i];
if (move == nullptr) continue;
auto move_ops = move->move_operands();
auto op = move_ops->begin();
for (; op != move_ops->end(); ++op) {
if (!op->IsRedundant()) break;
op->Eliminate();
auto moves = instr->parallel_moves()[i];
if (moves == nullptr) continue;
for (auto move : *moves) {
if (!move->IsRedundant()) return i;
move->Eliminate();
}
if (op != move_ops->end()) break; // Found non-redundant move.
move_ops->Rewind(0); // Clear this redundant move.
moves->clear(); // Clear this redundant move.
}
return i;
}
@ -63,29 +60,27 @@ void MoveOptimizer::Run() {
void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
ParallelMove* right) {
DCHECK(eliminated->empty());
auto move_ops = right->move_operands();
if (!left->move_operands()->is_empty()) {
if (!left->empty()) {
// Modify the right moves in place and collect moves that will be killed by
// merging the two gaps.
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
auto to_eliminate = left->PrepareInsertAfter(op);
for (auto move : *right) {
if (move->IsRedundant()) continue;
auto to_eliminate = left->PrepareInsertAfter(move);
if (to_eliminate != nullptr) eliminated->push_back(to_eliminate);
}
// Eliminate dead moves. Must happen before insertion of new moves as the
// contents of eliminated are pointers into a list.
// Eliminate dead moves.
for (auto to_eliminate : *eliminated) {
to_eliminate->Eliminate();
}
eliminated->clear();
}
// Add all possibly modified moves from right side.
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
left->move_operands()->Add(*op, code_zone());
for (auto move : *right) {
if (move->IsRedundant()) continue;
left->push_back(move);
}
// Nuke right.
move_ops->Rewind(0);
right->clear();
}
@ -159,14 +154,13 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
auto pred = code()->InstructionBlockAt(pred_index);
auto instr = LastInstruction(pred);
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
instr->parallel_moves()[0]->empty()) {
return;
}
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
auto src = *op->source();
auto dst = *op->destination();
for (auto move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
auto src = move->source();
auto dst = move->destination();
MoveKey key = {src, dst};
auto res = move_map.insert(std::make_pair(key, 1));
if (!res.second) {
@ -188,30 +182,29 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
DCHECK(instr != nullptr);
bool gap_initialized = true;
if (instr->parallel_moves()[0] == nullptr ||
instr->parallel_moves()[0]->move_operands()->is_empty()) {
instr->parallel_moves()[0]->empty()) {
to_finalize_.push_back(instr);
} else {
// Will compress after insertion.
gap_initialized = false;
std::swap(instr->parallel_moves()[0], instr->parallel_moves()[1]);
}
auto move = instr->GetOrCreateParallelMove(
auto moves = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(0), code_zone());
// Delete relevant entries in predecessors and move everything to block.
bool first_iteration = true;
for (auto pred_index : block->predecessors()) {
auto pred = code()->InstructionBlockAt(pred_index);
auto move_ops = LastInstruction(pred)->parallel_moves()[0]->move_operands();
for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
if (op->IsRedundant()) continue;
MoveKey key = {*op->source(), *op->destination()};
for (auto move : *LastInstruction(pred)->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
MoveKey key = {move->source(), move->destination()};
auto it = move_map.find(key);
USE(it);
DCHECK(it != move_map.end());
if (first_iteration) {
move->AddMove(op->source(), op->destination(), code_zone());
moves->AddMove(move->source(), move->destination());
}
op->Eliminate();
move->Eliminate();
}
first_iteration = false;
}
@ -223,70 +216,55 @@ void MoveOptimizer::OptimizeMerge(InstructionBlock* block) {
}
namespace {
bool IsSlot(const InstructionOperand& op) {
return op.IsStackSlot() || op.IsDoubleStackSlot();
}
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (a->source() != b->source()) return a->source() < b->source();
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
return a->destination() < b->destination();
}
} // namespace
// Split multiple loads of the same constant or stack slot off into the second
// slot and keep remaining moves in the first slot.
void MoveOptimizer::FinalizeMoves(Instruction* instr) {
auto loads = temp_vector_0();
DCHECK(loads.empty());
auto new_moves = temp_vector_1();
DCHECK(new_moves.empty());
auto move_ops = instr->parallel_moves()[0]->move_operands();
for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
if (move->IsRedundant()) {
move->Eliminate();
continue;
}
if (!(move->source()->IsConstant() || move->source()->IsStackSlot() ||
move->source()->IsDoubleStackSlot()))
continue;
// Search for existing move to this slot.
MoveOperands* found = nullptr;
for (auto load : loads) {
if (load->source()->Equals(move->source())) {
found = load;
break;
}
}
// Not found so insert.
if (found == nullptr) {
// Find all the loads.
for (auto move : *instr->parallel_moves()[0]) {
if (move->IsRedundant()) continue;
if (move->source().IsConstant() || IsSlot(move->source())) {
loads.push_back(move);
// Replace source with copy for later use.
auto dest = move->destination();
move->set_destination(InstructionOperand::New(code_zone(), *dest));
}
}
if (loads.empty()) return;
// Group the loads by source, moving the preferred destination to the
// beginning of the group.
std::sort(loads.begin(), loads.end(), LoadCompare);
MoveOperands* group_begin = nullptr;
for (auto load : loads) {
// New group.
if (group_begin == nullptr || load->source() != group_begin->source()) {
group_begin = load;
continue;
}
if ((found->destination()->IsStackSlot() ||
found->destination()->IsDoubleStackSlot()) &&
!(move->destination()->IsStackSlot() ||
move->destination()->IsDoubleStackSlot())) {
// Found a better source for this load. Smash it in place to affect other
// loads that have already been split.
auto next_dest =
InstructionOperand::New(code_zone(), *found->destination());
auto dest = move->destination();
InstructionOperand::ReplaceWith(found->destination(), dest);
move->set_destination(next_dest);
}
// move from load destination.
move->set_source(found->destination());
new_moves.push_back(move);
// Nothing to be gained from splitting here.
if (IsSlot(group_begin->destination())) continue;
// Insert new move into slot 1.
auto slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
slot_1->AddMove(group_begin->destination(), load->destination());
load->Eliminate();
}
loads.clear();
if (new_moves.empty()) return;
// Insert all new moves into slot 1.
auto slot_1 = instr->GetOrCreateParallelMove(
static_cast<Instruction::GapPosition>(1), code_zone());
DCHECK(slot_1->move_operands()->is_empty());
slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
static_cast<int>(new_moves.size()),
code_zone());
auto it = slot_1->move_operands()->begin();
for (auto new_move : new_moves) {
std::swap(*new_move, *it);
++it;
}
DCHECK_EQ(it, slot_1->move_operands()->end());
new_moves.clear();
}
} // namespace compiler

View File

@ -10,12 +10,14 @@ namespace v8 {
namespace internal {
namespace compiler {
static size_t OperandCount(const Instruction* instr) {
namespace {
size_t OperandCount(const Instruction* instr) {
return instr->InputCount() + instr->OutputCount() + instr->TempCount();
}
static void VerifyEmptyGaps(const Instruction* instr) {
void VerifyEmptyGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
@ -25,6 +27,24 @@ static void VerifyEmptyGaps(const Instruction* instr) {
}
void VerifyAllocatedGaps(const Instruction* instr) {
for (int i = Instruction::FIRST_GAP_POSITION;
i <= Instruction::LAST_GAP_POSITION; i++) {
Instruction::GapPosition inner_pos =
static_cast<Instruction::GapPosition>(i);
auto moves = instr->GetParallelMove(inner_pos);
if (moves == nullptr) continue;
for (auto move : *moves) {
if (move->IsRedundant()) continue;
CHECK(move->source().IsAllocated() || move->source().IsConstant());
CHECK(move->destination().IsAllocated());
}
}
}
} // namespace
void RegisterAllocatorVerifier::VerifyInput(
const OperandConstraint& constraint) {
CHECK_NE(kSameAsFirst, constraint.type_);
@ -94,6 +114,8 @@ void RegisterAllocatorVerifier::VerifyAssignment() {
auto instr_it = sequence()->begin();
for (const auto& instr_constraint : *constraints()) {
const auto* instr = instr_constraint.instruction_;
// All gaps should be totally allocated at this point.
VerifyAllocatedGaps(instr);
const size_t operand_count = instr_constraint.operand_constaints_size_;
const auto* op_constraints = instr_constraint.operand_constraints_;
CHECK_EQ(instr, *instr_it);
@ -298,7 +320,7 @@ class OperandMap : public ZoneObject {
this->erase(it++);
if (it == this->end()) return;
}
if (it->first->Equals(o.first)) {
if (*it->first == *o.first) {
++it;
if (it == this->end()) return;
} else {
@ -312,23 +334,22 @@ class OperandMap : public ZoneObject {
Map& map() { return map_; }
void RunParallelMoves(Zone* zone, const ParallelMove* move) {
void RunParallelMoves(Zone* zone, const ParallelMove* moves) {
// Compute outgoing mappings.
Map to_insert(zone);
auto moves = move->move_operands();
for (auto i = moves->begin(); i != moves->end(); ++i) {
if (i->IsEliminated()) continue;
auto cur = map().find(i->source());
for (auto move : *moves) {
if (move->IsEliminated()) continue;
auto cur = map().find(&move->source());
CHECK(cur != map().end());
auto res =
to_insert.insert(std::make_pair(i->destination(), cur->second));
to_insert.insert(std::make_pair(&move->destination(), cur->second));
// Ensure injectivity of moves.
CHECK(res.second);
}
// Drop current mappings.
for (auto i = moves->begin(); i != moves->end(); ++i) {
if (i->IsEliminated()) continue;
auto cur = map().find(i->destination());
for (auto move : *moves) {
if (move->IsEliminated()) continue;
auto cur = map().find(&move->destination());
if (cur != map().end()) map().erase(cur);
}
// Insert new values.

View File

@ -140,8 +140,6 @@ LiveRange::LiveRange(int id, Zone* zone)
void LiveRange::set_assigned_register(int reg) {
DCHECK(!HasRegisterAssigned() && !IsSpilled());
assigned_register_ = reg;
// TODO(dcarney): stop aliasing hint operands.
ConvertUsesToOperand(GetAssignedOperand(), nullptr);
}
@ -175,19 +173,17 @@ void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
// constraint move from a fixed output register to a slot.
if (might_be_duplicated) {
bool found = false;
auto move_ops = move->move_operands();
for (auto move_op = move_ops->begin(); move_op != move_ops->end();
++move_op) {
for (auto move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source()->Equals(to_spill->operand) &&
move_op->destination()->Equals(op)) {
if (move_op->source() == *to_spill->operand &&
move_op->destination() == *op) {
found = true;
break;
}
}
if (found) continue;
}
move->AddMove(to_spill->operand, op, zone);
move->AddMove(*to_spill->operand, *op);
}
}
@ -803,12 +799,13 @@ void RegisterAllocator::Use(LifetimePosition block_start,
}
void RegisterAllocator::AddGapMove(int index, Instruction::GapPosition position,
InstructionOperand* from,
InstructionOperand* to) {
MoveOperands* RegisterAllocator::AddGapMove(int index,
Instruction::GapPosition position,
const InstructionOperand& from,
const InstructionOperand& to) {
auto instr = code()->InstructionAt(index);
auto move = instr->GetOrCreateParallelMove(position, code_zone());
move->AddMove(from, to, code_zone());
auto moves = instr->GetOrCreateParallelMove(position, code_zone());
return moves->AddMove(from, to);
}
@ -960,6 +957,7 @@ void RegisterAllocator::CommitAssignment() {
}
auto assigned = range->GetAssignedOperand();
range->ConvertUsesToOperand(assigned, spill_operand);
if (range->is_phi()) AssignPhiInput(range, assigned);
if (!range->IsChild() && spill_operand != nullptr) {
range->CommitSpillsAtDefinition(code(), spill_operand,
range->has_slot_use());
@ -981,8 +979,8 @@ bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
auto lookup = phi_map_.find(range->id());
DCHECK(lookup != phi_map_.end());
auto phi = lookup->second.phi;
auto block = lookup->second.block;
auto phi = lookup->second->phi;
auto block = lookup->second->block;
// Count the number of spilled operands.
size_t spilled_count = 0;
LiveRange* first_op = nullptr;
@ -1098,10 +1096,8 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
int gap_index = successor->first_instruction_index();
// Create an unconstrained operand for the same virtual register
// and insert a gap move from the fixed output to the operand.
UnallocatedOperand* output_copy =
UnallocatedOperand(UnallocatedOperand::ANY, output_vreg)
.Copy(code_zone());
AddGapMove(gap_index, Instruction::START, output, output_copy);
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
AddGapMove(gap_index, Instruction::START, *output, output_copy);
}
}
@ -1139,8 +1135,9 @@ void RegisterAllocator::MeetConstraintsAfter(int instr_index) {
auto range = LiveRangeFor(first_output->virtual_register());
bool assigned = false;
if (first_output->HasFixedPolicy()) {
auto output_copy = first_output->CopyUnconstrained(code_zone());
bool is_tagged = HasTaggedValue(first_output->virtual_register());
int output_vreg = first_output->virtual_register();
UnallocatedOperand output_copy(UnallocatedOperand::ANY, output_vreg);
bool is_tagged = HasTaggedValue(output_vreg);
AllocateFixed(first_output, instr_index, is_tagged);
// This value is produced on the stack, we never need to spill it.
@ -1151,7 +1148,7 @@ void RegisterAllocator::MeetConstraintsAfter(int instr_index) {
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
}
AddGapMove(instr_index + 1, Instruction::START, first_output,
AddGapMove(instr_index + 1, Instruction::START, *first_output,
output_copy);
}
// Make sure we add a gap move for spilling (if we have not done
@ -1172,10 +1169,11 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
if (input->IsImmediate()) continue; // Ignore immediates.
auto cur_input = UnallocatedOperand::cast(input);
if (cur_input->HasFixedPolicy()) {
auto input_copy = cur_input->CopyUnconstrained(code_zone());
bool is_tagged = HasTaggedValue(cur_input->virtual_register());
int input_vreg = cur_input->virtual_register();
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
bool is_tagged = HasTaggedValue(input_vreg);
AllocateFixed(cur_input, instr_index, is_tagged);
AddGapMove(instr_index, Instruction::END, input_copy, cur_input);
AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
}
}
// Handle "output same as input" for second instruction.
@ -1189,12 +1187,12 @@ void RegisterAllocator::MeetConstraintsBefore(int instr_index) {
UnallocatedOperand::cast(second->InputAt(0));
int output_vreg = second_output->virtual_register();
int input_vreg = cur_input->virtual_register();
auto input_copy = cur_input->CopyUnconstrained(code_zone());
UnallocatedOperand input_copy(UnallocatedOperand::ANY, input_vreg);
cur_input->set_virtual_register(second_output->virtual_register());
AddGapMove(instr_index, Instruction::END, input_copy, cur_input);
AddGapMove(instr_index, Instruction::END, input_copy, *cur_input);
if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
if (second->HasReferenceMap()) {
second->reference_map()->RecordReference(*input_copy);
second->reference_map()->RecordReference(input_copy);
}
} else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
// The input is assumed to immediately have a tagged representation,
@ -1331,13 +1329,12 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
} else {
curr_position = curr_position.Start();
}
auto move_ops = move->move_operands();
for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
auto from = cur->source();
auto to = cur->destination();
auto hint = to;
if (to->IsUnallocated()) {
int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
for (auto cur : *move) {
auto& from = cur->source();
auto& to = cur->destination();
auto hint = &to;
if (to.IsUnallocated()) {
int to_vreg = UnallocatedOperand::cast(to).virtual_register();
auto to_range = LiveRangeFor(to_vreg);
if (to_range->is_phi()) {
if (to_range->is_non_loop_phi()) {
@ -1345,7 +1342,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
}
} else {
if (live->Contains(to_vreg)) {
Define(curr_position, to, from);
Define(curr_position, &to, &from);
live->Remove(to_vreg);
} else {
cur->Eliminate();
@ -1353,11 +1350,11 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
}
}
} else {
Define(curr_position, to, from);
Define(curr_position, &to, &from);
}
Use(block_start_position, curr_position, from, hint);
if (from->IsUnallocated()) {
live->Add(UnallocatedOperand::cast(from)->virtual_register());
Use(block_start_position, curr_position, &from, hint);
if (from.IsUnallocated()) {
live->Add(UnallocatedOperand::cast(from).virtual_register());
}
}
}
@ -1368,16 +1365,18 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
for (auto phi : block->phis()) {
int phi_vreg = phi->virtual_register();
auto res =
phi_map_.insert(std::make_pair(phi_vreg, PhiMapValue(phi, block)));
auto map_value = new (local_zone()) PhiMapValue(phi, block, local_zone());
auto res = phi_map_.insert(std::make_pair(phi_vreg, map_value));
DCHECK(res.second);
USE(res);
auto& output = phi->output();
for (size_t i = 0; i < phi->operands().size(); ++i) {
InstructionBlock* cur_block =
code()->InstructionBlockAt(block->predecessors()[i]);
AddGapMove(cur_block->last_instruction_index(), Instruction::END,
&phi->inputs()[i], &output);
UnallocatedOperand input(UnallocatedOperand::ANY, phi->operands()[i]);
auto move = AddGapMove(cur_block->last_instruction_index(),
Instruction::END, input, output);
map_value->incoming_moves.push_back(move);
DCHECK(!InstructionAt(cur_block->last_instruction_index())
->HasReferenceMap());
}
@ -1392,6 +1391,17 @@ void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
}
void RegisterAllocator::AssignPhiInput(LiveRange* range,
const InstructionOperand& assignment) {
DCHECK(range->is_phi());
auto it = phi_map_.find(range->id());
DCHECK(it != phi_map_.end());
for (auto move : it->second->incoming_moves) {
move->set_destination(assignment);
}
}
void RegisterAllocator::MeetRegisterConstraints() {
for (auto block : code()->instruction_blocks()) {
MeetRegisterConstraints(block);
@ -1415,7 +1425,7 @@ const InstructionBlock* RegisterAllocator::GetInstructionBlock(
void RegisterAllocator::ConnectRanges() {
ZoneMap<std::pair<ParallelMove*, InstructionOperand*>, InstructionOperand*>
ZoneMap<std::pair<ParallelMove*, InstructionOperand>, InstructionOperand>
delayed_insertion_map(local_zone());
for (auto first_range : live_ranges()) {
if (first_range == nullptr || first_range->IsChild()) continue;
@ -1430,9 +1440,9 @@ void RegisterAllocator::ConnectRanges() {
!CanEagerlyResolveControlFlow(GetInstructionBlock(pos))) {
continue;
}
auto prev = first_range->GetAssignedOperand();
auto cur = second_range->GetAssignedOperand();
if (prev == cur) continue;
auto prev_operand = first_range->GetAssignedOperand();
auto cur_operand = second_range->GetAssignedOperand();
if (prev_operand == cur_operand) continue;
bool delay_insertion = false;
Instruction::GapPosition gap_pos;
int gap_index = pos.ToInstructionIndex();
@ -1448,10 +1458,8 @@ void RegisterAllocator::ConnectRanges() {
}
auto move = code()->InstructionAt(gap_index)->GetOrCreateParallelMove(
gap_pos, code_zone());
auto prev_operand = InstructionOperand::New(code_zone(), prev);
auto cur_operand = InstructionOperand::New(code_zone(), cur);
if (!delay_insertion) {
move->AddMove(prev_operand, cur_operand, code_zone());
move->AddMove(prev_operand, cur_operand);
} else {
delayed_insertion_map.insert(
std::make_pair(std::make_pair(move, prev_operand), cur_operand));
@ -1460,31 +1468,31 @@ void RegisterAllocator::ConnectRanges() {
}
if (delayed_insertion_map.empty()) return;
// Insert all the moves which should occur after the stored move.
ZoneVector<MoveOperands> to_insert(local_zone());
ZoneVector<MoveOperands*> to_insert(local_zone());
ZoneVector<MoveOperands*> to_eliminate(local_zone());
to_insert.reserve(4);
to_eliminate.reserve(4);
auto move = delayed_insertion_map.begin()->first.first;
auto moves = delayed_insertion_map.begin()->first.first;
for (auto it = delayed_insertion_map.begin();; ++it) {
bool done = it == delayed_insertion_map.end();
if (done || it->first.first != move) {
if (done || it->first.first != moves) {
// Commit the MoveOperands for current ParallelMove.
for (auto move_ops : to_eliminate) {
move_ops->Eliminate();
for (auto move : to_eliminate) {
move->Eliminate();
}
for (auto move_ops : to_insert) {
move->AddMove(move_ops.source(), move_ops.destination(), code_zone());
for (auto move : to_insert) {
moves->push_back(move);
}
if (done) break;
// Reset state.
to_eliminate.clear();
to_insert.clear();
move = it->first.first;
moves = it->first.first;
}
// Gather all MoveOperands for a single ParallelMove.
MoveOperands move_ops(it->first.second, it->second);
auto eliminate = move->PrepareInsertAfter(&move_ops);
to_insert.push_back(move_ops);
auto move = new (code_zone()) MoveOperands(it->first.second, it->second);
auto eliminate = moves->PrepareInsertAfter(move);
to_insert.push_back(move);
if (eliminate != nullptr) to_eliminate.push_back(eliminate);
}
}
@ -1650,9 +1658,7 @@ void RegisterAllocator::ResolveControlFlow() {
auto pred_op = result.pred_cover_->GetAssignedOperand();
auto cur_op = result.cur_cover_->GetAssignedOperand();
if (pred_op == cur_op) continue;
auto pred_ptr = InstructionOperand::New(code_zone(), pred_op);
auto cur_ptr = InstructionOperand::New(code_zone(), cur_op);
ResolveControlFlow(block, cur_ptr, pred_block, pred_ptr);
ResolveControlFlow(block, cur_op, pred_block, pred_op);
}
iterator.Advance();
}
@ -1661,10 +1667,10 @@ void RegisterAllocator::ResolveControlFlow() {
void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
InstructionOperand* cur_op,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
InstructionOperand* pred_op) {
DCHECK(!pred_op->Equals(cur_op));
const InstructionOperand& pred_op) {
DCHECK(pred_op != cur_op);
int gap_index;
Instruction::GapPosition position;
if (block->PredecessorCount() == 1) {
@ -1700,23 +1706,20 @@ void RegisterAllocator::BuildLiveRanges() {
int phi_vreg = phi->virtual_register();
live->Remove(phi_vreg);
InstructionOperand* hint = nullptr;
InstructionOperand* phi_operand = nullptr;
auto instr = GetLastInstruction(
code()->InstructionBlockAt(block->predecessors()[0]));
auto move = instr->GetParallelMove(Instruction::END);
for (int j = 0; j < move->move_operands()->length(); ++j) {
auto to = move->move_operands()->at(j).destination();
if (to->IsUnallocated() &&
UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
hint = move->move_operands()->at(j).source();
phi_operand = to;
for (auto move : *instr->GetParallelMove(Instruction::END)) {
auto& to = move->destination();
if (to.IsUnallocated() &&
UnallocatedOperand::cast(to).virtual_register() == phi_vreg) {
hint = &move->source();
break;
}
}
DCHECK(hint != nullptr);
auto block_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
Define(block_start, phi_operand, hint);
Define(block_start, &phi->output(), hint);
}
// Now live is live_in for this block except not including values live
@ -2507,6 +2510,10 @@ void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
assigned_registers_->Add(reg);
}
range->set_assigned_register(reg);
auto assignment = range->GetAssignedOperand();
// TODO(dcarney): stop aliasing hint operands.
range->ConvertUsesToOperand(assignment, nullptr);
if (range->is_phi()) AssignPhiInput(range, assignment);
}
} // namespace compiler

View File

@ -509,8 +509,9 @@ class RegisterAllocator FINAL : public ZoneObject {
InstructionOperand* hint);
void Use(LifetimePosition block_start, LifetimePosition position,
InstructionOperand* operand, InstructionOperand* hint);
void AddGapMove(int index, Instruction::GapPosition position,
InstructionOperand* from, InstructionOperand* to);
MoveOperands* AddGapMove(int index, Instruction::GapPosition position,
const InstructionOperand& from,
const InstructionOperand& to);
// Helper methods for updating the life range lists.
void AddToActive(LiveRange* range);
@ -574,9 +575,9 @@ class RegisterAllocator FINAL : public ZoneObject {
// Helper methods for resolving control flow.
void ResolveControlFlow(const InstructionBlock* block,
InstructionOperand* cur_op,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
InstructionOperand* pred_op);
const InstructionOperand& pred_op);
void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
@ -595,6 +596,7 @@ class RegisterAllocator FINAL : public ZoneObject {
const char* RegisterName(int allocation_index);
Instruction* InstructionAt(int index) { return code()->InstructionAt(index); }
void AssignPhiInput(LiveRange* range, const InstructionOperand& assignment);
Frame* frame() const { return frame_; }
const char* debug_name() const { return debug_name_; }
@ -613,13 +615,17 @@ class RegisterAllocator FINAL : public ZoneObject {
}
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
struct PhiMapValue {
PhiMapValue(PhiInstruction* phi, const InstructionBlock* block)
: phi(phi), block(block) {}
class PhiMapValue : public ZoneObject {
public:
PhiMapValue(PhiInstruction* phi, const InstructionBlock* block, Zone* zone)
: phi(phi), block(block), incoming_moves(zone) {
incoming_moves.reserve(phi->operands().size());
}
PhiInstruction* const phi;
const InstructionBlock* const block;
ZoneVector<MoveOperands*> incoming_moves;
};
typedef ZoneMap<int, PhiMapValue> PhiMap;
typedef ZoneMap<int, PhiMapValue*> PhiMap;
Zone* const local_zone_;
Frame* const frame_;

View File

@ -57,6 +57,8 @@ class zone_allocator {
return zone_ != other.zone_;
}
Zone* zone() { return zone_; }
private:
zone_allocator();
Zone* zone_;

View File

@ -14,12 +14,10 @@ using namespace v8::internal::compiler;
// that the actual values don't really matter, all we care about is equality.
class InterpreterState {
public:
typedef std::vector<MoveOperands> Moves;
void ExecuteInParallel(Moves moves) {
void ExecuteInParallel(const ParallelMove* moves) {
InterpreterState copy(*this);
for (Moves::iterator it = moves.begin(); it != moves.end(); ++it) {
if (!it->IsRedundant()) write(it->destination(), copy.read(it->source()));
for (const auto m : *moves) {
if (!m->IsRedundant()) write(m->destination(), copy.read(m->source()));
}
}
@ -57,12 +55,12 @@ class InterpreterState {
typedef Key Value;
typedef std::map<Key, Value> OperandMap;
Value read(const InstructionOperand* op) const {
Value read(const InstructionOperand& op) const {
OperandMap::const_iterator it = values_.find(KeyFor(op));
return (it == values_.end()) ? ValueFor(op) : it->second;
}
void write(const InstructionOperand* op, Value v) {
void write(const InstructionOperand& op, Value v) {
if (v == ValueFor(op)) {
values_.erase(KeyFor(op));
} else {
@ -70,22 +68,22 @@ class InterpreterState {
}
}
static Key KeyFor(const InstructionOperand* op) {
bool is_constant = op->IsConstant();
static Key KeyFor(const InstructionOperand& op) {
bool is_constant = op.IsConstant();
AllocatedOperand::AllocatedKind kind;
int index;
if (!is_constant) {
index = AllocatedOperand::cast(op)->index();
kind = AllocatedOperand::cast(op)->allocated_kind();
index = AllocatedOperand::cast(op).index();
kind = AllocatedOperand::cast(op).allocated_kind();
} else {
index = ConstantOperand::cast(op)->virtual_register();
index = ConstantOperand::cast(op).virtual_register();
kind = AllocatedOperand::REGISTER;
}
Key key = {is_constant, kind, index};
return key;
}
static Value ValueFor(const InstructionOperand* op) { return KeyFor(op); }
static Value ValueFor(const InstructionOperand& op) { return KeyFor(op); }
static InstructionOperand FromKey(Key key) {
if (key.is_constant) {
@ -101,7 +99,7 @@ class InterpreterState {
if (it != is.values_.begin()) os << " ";
InstructionOperand source = FromKey(it->first);
InstructionOperand destination = FromKey(it->second);
MoveOperands mo(&source, &destination);
MoveOperands mo(source, destination);
PrintableMoveOperands pmo = {RegisterConfiguration::ArchDefault(), &mo};
os << pmo;
}
@ -115,30 +113,31 @@ class InterpreterState {
// An abstract interpreter for moves, swaps and parallel moves.
class MoveInterpreter : public GapResolver::Assembler {
public:
explicit MoveInterpreter(Zone* zone) : zone_(zone) {}
virtual void AssembleMove(InstructionOperand* source,
InstructionOperand* destination) OVERRIDE {
InterpreterState::Moves moves;
moves.push_back(MoveOperands(source, destination));
ParallelMove* moves = new (zone_) ParallelMove(zone_);
moves->AddMove(*source, *destination);
state_.ExecuteInParallel(moves);
}
virtual void AssembleSwap(InstructionOperand* source,
InstructionOperand* destination) OVERRIDE {
InterpreterState::Moves moves;
moves.push_back(MoveOperands(source, destination));
moves.push_back(MoveOperands(destination, source));
ParallelMove* moves = new (zone_) ParallelMove(zone_);
moves->AddMove(*source, *destination);
moves->AddMove(*destination, *source);
state_.ExecuteInParallel(moves);
}
void AssembleParallelMove(const ParallelMove* pm) {
InterpreterState::Moves moves(pm->move_operands()->begin(),
pm->move_operands()->end());
void AssembleParallelMove(const ParallelMove* moves) {
state_.ExecuteInParallel(moves);
}
InterpreterState state() const { return state_; }
private:
Zone* const zone_;
InterpreterState state_;
};
@ -149,11 +148,11 @@ class ParallelMoveCreator : public HandleAndZoneScope {
ParallelMove* Create(int size) {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
std::set<InstructionOperand*, InstructionOperandComparator> seen;
std::set<InstructionOperand> seen;
for (int i = 0; i < size; ++i) {
MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
parallel_move->AddMove(mo.source(), mo.destination(), main_zone());
parallel_move->AddMove(mo.source(), mo.destination());
seen.insert(mo.destination());
}
}
@ -161,30 +160,23 @@ class ParallelMoveCreator : public HandleAndZoneScope {
}
private:
struct InstructionOperandComparator {
bool operator()(const InstructionOperand* x,
const InstructionOperand* y) const {
return *x < *y;
}
};
InstructionOperand* CreateRandomOperand(bool is_source) {
InstructionOperand CreateRandomOperand(bool is_source) {
int index = rng_->NextInt(6);
// destination can't be Constant.
switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0:
return StackSlotOperand::New(main_zone(), index);
return StackSlotOperand(index);
case 1:
return DoubleStackSlotOperand::New(main_zone(), index);
return DoubleStackSlotOperand(index);
case 2:
return RegisterOperand::New(main_zone(), index);
return RegisterOperand(index);
case 3:
return DoubleRegisterOperand::New(main_zone(), index);
return DoubleRegisterOperand(index);
case 4:
return ConstantOperand::New(main_zone(), index);
return ConstantOperand(index);
}
UNREACHABLE();
return NULL;
return InstructionOperand();
}
private:
@ -199,10 +191,10 @@ TEST(FuzzResolver) {
ParallelMove* pm = pmc.Create(size);
// Note: The gap resolver modifies the ParallelMove, so interpret first.
MoveInterpreter mi1;
MoveInterpreter mi1(pmc.main_zone());
mi1.AssembleParallelMove(pm);
MoveInterpreter mi2;
MoveInterpreter mi2(pmc.main_zone());
GapResolver resolver(&mi2);
resolver.Resolve(pm);

View File

@ -83,8 +83,8 @@ class InstructionTester : public HandleAndZoneScope {
return code->AddInstruction(instr);
}
UnallocatedOperand* NewUnallocated(int vreg) {
return UnallocatedOperand(UnallocatedOperand::ANY, vreg).Copy(zone());
UnallocatedOperand Unallocated(int vreg) {
return UnallocatedOperand(UnallocatedOperand::ANY, vreg);
}
RpoNumber RpoFor(BasicBlock* block) {
@ -255,17 +255,16 @@ TEST(InstructionAddGapMove) {
int index = 0;
for (auto instr : R.code->instructions()) {
UnallocatedOperand* op1 = R.NewUnallocated(index++);
UnallocatedOperand* op2 = R.NewUnallocated(index++);
UnallocatedOperand op1 = R.Unallocated(index++);
UnallocatedOperand op2 = R.Unallocated(index++);
instr->GetOrCreateParallelMove(TestInstr::START, R.zone())
->AddMove(op1, op2, R.zone());
->AddMove(op1, op2);
ParallelMove* move = instr->GetParallelMove(TestInstr::START);
CHECK(move);
const ZoneList<MoveOperands>* move_operands = move->move_operands();
CHECK_EQ(1, move_operands->length());
MoveOperands* cur = &move_operands->at(0);
CHECK_EQ(op1, cur->source());
CHECK_EQ(op2, cur->destination());
CHECK_EQ(1u, move->size());
MoveOperands* cur = move->at(0);
CHECK(op1 == cur->source());
CHECK(op2 == cur->destination());
}
}
@ -309,15 +308,15 @@ TEST(InstructionOperands) {
CHECK(k == m->TempCount());
for (size_t z = 0; z < i; z++) {
CHECK(outputs[z].Equals(m->OutputAt(z)));
CHECK(outputs[z] == *m->OutputAt(z));
}
for (size_t z = 0; z < j; z++) {
CHECK(inputs[z].Equals(m->InputAt(z)));
CHECK(inputs[z] == *m->InputAt(z));
}
for (size_t z = 0; z < k; z++) {
CHECK(temps[z].Equals(m->TempAt(z)));
CHECK(temps[z] == *m->TempAt(z));
}
}
}

View File

@ -59,15 +59,13 @@ class TestCode : public HandleAndZoneScope {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, RegisterOperand::New(main_zone(), 13),
RegisterOperand::New(main_zone(), 13));
AddGapMove(index, RegisterOperand(13), RegisterOperand(13));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ConstantOperand::New(main_zone(), 11),
RegisterOperand::New(main_zone(), 11));
AddGapMove(index, ConstantOperand(11), RegisterOperand(11));
}
void Other() {
Start();
@ -95,10 +93,11 @@ class TestCode : public HandleAndZoneScope {
CHECK(current_ == NULL);
Start(true);
}
void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to) {
void AddGapMove(int index, const InstructionOperand& from,
const InstructionOperand& to) {
sequence_.InstructionAt(index)
->GetOrCreateParallelMove(Instruction::START, main_zone())
->AddMove(from, to, main_zone());
->AddMove(from, to);
}
};

View File

@ -16,26 +16,24 @@ class MoveOptimizerTest : public InstructionSequenceTest {
void AddMove(Instruction* instr, TestOperand from, TestOperand to,
Instruction::GapPosition pos = Instruction::START) {
auto parallel_move = instr->GetOrCreateParallelMove(pos, zone());
parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to));
}
int NonRedundantSize(ParallelMove* move) {
int NonRedundantSize(ParallelMove* moves) {
int i = 0;
auto ops = move->move_operands();
for (auto op = ops->begin(); op != ops->end(); ++op) {
if (op->IsRedundant()) continue;
for (auto move : *moves) {
if (move->IsRedundant()) continue;
i++;
}
return i;
}
bool Contains(ParallelMove* move, TestOperand from_op, TestOperand to_op) {
bool Contains(ParallelMove* moves, TestOperand from_op, TestOperand to_op) {
auto from = ConvertMoveArg(from_op);
auto to = ConvertMoveArg(to_op);
auto ops = move->move_operands();
for (auto op = ops->begin(); op != ops->end(); ++op) {
if (op->IsRedundant()) continue;
if (op->source()->Equals(from) && op->destination()->Equals(to)) {
for (auto move : *moves) {
if (move->IsRedundant()) continue;
if (move->source() == from && move->destination() == to) {
return true;
}
}
@ -62,22 +60,22 @@ class MoveOptimizerTest : public InstructionSequenceTest {
}
private:
InstructionOperand* ConvertMoveArg(TestOperand op) {
InstructionOperand ConvertMoveArg(TestOperand op) {
CHECK_EQ(kNoValue, op.vreg_.value_);
CHECK_NE(kNoValue, op.value_);
switch (op.type_) {
case kConstant:
return ConstantOperand::New(zone(), op.value_);
return ConstantOperand(op.value_);
case kFixedSlot:
return StackSlotOperand::New(zone(), op.value_);
return StackSlotOperand(op.value_);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
return RegisterOperand::New(zone(), op.value_);
return RegisterOperand(op.value_);
default:
break;
}
CHECK(false);
return nullptr;
return InstructionOperand();
}
};