Reland: [turbofan] add MachineType to AllocatedOperand

- allows the optimization of emitted gap move code since the representation of the value in the register is known
- necessary preparation for vector register allocation
- prepare for slot sharing for any value of the same byte width

TBR=jarin@chromium.org
BUG=

Review URL: https://codereview.chromium.org/1111323003

Cr-Commit-Position: refs/heads/master@{#28140}
This commit is contained in:
dcarney 2015-04-29 12:36:16 -07:00 committed by Commit bot
parent 7eccb18148
commit 81345f1a2c
16 changed files with 528 additions and 353 deletions

View File

@ -20,13 +20,11 @@ class Frame : public ZoneObject {
Frame()
: register_save_area_size_(0),
spill_slot_count_(0),
double_spill_slot_count_(0),
osr_stack_slot_count_(0),
allocated_registers_(NULL),
allocated_double_registers_(NULL) {}
inline int GetSpillSlotCount() { return spill_slot_count_; }
inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
void SetAllocatedRegisters(BitVector* regs) {
DCHECK(allocated_registers_ == NULL);
@ -57,15 +55,13 @@ class Frame : public ZoneObject {
int GetOsrStackSlotCount() { return osr_stack_slot_count_; }
int AllocateSpillSlot(bool is_double) {
// If 32-bit, skip one if the new slot is a double.
if (is_double) {
if (kDoubleSize > kPointerSize) {
DCHECK(kDoubleSize == kPointerSize * 2);
spill_slot_count_++;
spill_slot_count_ |= 1;
}
double_spill_slot_count_++;
int AllocateSpillSlot(int width) {
DCHECK(width == 4 || width == 8);
// Skip one slot if necessary.
if (width > kPointerSize) {
DCHECK(width == kPointerSize * 2);
spill_slot_count_++;
spill_slot_count_ |= 1;
}
return spill_slot_count_++;
}
@ -78,7 +74,6 @@ class Frame : public ZoneObject {
private:
int register_save_area_size_;
int spill_slot_count_;
int double_spill_slot_count_;
int osr_stack_slot_count_;
BitVector* allocated_registers_;
BitVector* allocated_double_registers_;

View File

@ -75,7 +75,7 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) const {
// This move's source may have changed due to swaps to resolve cycles and so
// it may now be the last move in the cycle. If so remove it.
InstructionOperand source = move->source();
if (source == destination) {
if (source.EqualsModuloType(destination)) {
move->Eliminate();
return;
}

View File

@ -137,7 +137,7 @@ class OperandGenerator {
UnallocatedOperand op = UnallocatedOperand(
UnallocatedOperand::MUST_HAVE_REGISTER,
UnallocatedOperand::USED_AT_START, sequence()->NextVirtualRegister());
sequence()->MarkAsDouble(op.virtual_register());
sequence()->MarkAsRepresentation(kRepFloat64, op.virtual_register());
return op;
}

View File

@ -241,70 +241,17 @@ void InstructionSelector::MarkAsUsed(Node* node) {
}
bool InstructionSelector::IsDouble(const Node* node) const {
DCHECK_NOT_NULL(node);
int const virtual_register = virtual_registers_[node->id()];
if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
return false;
}
return sequence()->IsDouble(virtual_register);
}
void InstructionSelector::MarkAsDouble(Node* node) {
DCHECK_NOT_NULL(node);
DCHECK(!IsReference(node));
sequence()->MarkAsDouble(GetVirtualRegister(node));
}
bool InstructionSelector::IsReference(const Node* node) const {
DCHECK_NOT_NULL(node);
int const virtual_register = virtual_registers_[node->id()];
if (virtual_register == InstructionOperand::kInvalidVirtualRegister) {
return false;
}
return sequence()->IsReference(virtual_register);
}
void InstructionSelector::MarkAsReference(Node* node) {
DCHECK_NOT_NULL(node);
DCHECK(!IsDouble(node));
sequence()->MarkAsReference(GetVirtualRegister(node));
}
void InstructionSelector::MarkAsRepresentation(MachineType rep,
const InstructionOperand& op) {
UnallocatedOperand unalloc = UnallocatedOperand::cast(op);
switch (RepresentationOf(rep)) {
case kRepFloat32:
case kRepFloat64:
sequence()->MarkAsDouble(unalloc.virtual_register());
break;
case kRepTagged:
sequence()->MarkAsReference(unalloc.virtual_register());
break;
default:
break;
}
rep = RepresentationOf(rep);
sequence()->MarkAsRepresentation(rep, unalloc.virtual_register());
}
void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
DCHECK_NOT_NULL(node);
switch (RepresentationOf(rep)) {
case kRepFloat32:
case kRepFloat64:
MarkAsDouble(node);
break;
case kRepTagged:
MarkAsReference(node);
break;
default:
break;
}
rep = RepresentationOf(rep);
sequence()->MarkAsRepresentation(rep, GetVirtualRegister(node));
}
@ -625,9 +572,9 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kExternalConstant:
return VisitConstant(node);
case IrOpcode::kFloat32Constant:
return MarkAsDouble(node), VisitConstant(node);
return MarkAsFloat32(node), VisitConstant(node);
case IrOpcode::kFloat64Constant:
return MarkAsDouble(node), VisitConstant(node);
return MarkAsFloat64(node), VisitConstant(node);
case IrOpcode::kHeapConstant:
return MarkAsReference(node), VisitConstant(node);
case IrOpcode::kNumberConstant: {
@ -648,125 +595,125 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kWord32And:
return VisitWord32And(node);
return MarkAsWord32(node), VisitWord32And(node);
case IrOpcode::kWord32Or:
return VisitWord32Or(node);
return MarkAsWord32(node), VisitWord32Or(node);
case IrOpcode::kWord32Xor:
return VisitWord32Xor(node);
return MarkAsWord32(node), VisitWord32Xor(node);
case IrOpcode::kWord32Shl:
return VisitWord32Shl(node);
return MarkAsWord32(node), VisitWord32Shl(node);
case IrOpcode::kWord32Shr:
return VisitWord32Shr(node);
return MarkAsWord32(node), VisitWord32Shr(node);
case IrOpcode::kWord32Sar:
return VisitWord32Sar(node);
return MarkAsWord32(node), VisitWord32Sar(node);
case IrOpcode::kWord32Ror:
return VisitWord32Ror(node);
return MarkAsWord32(node), VisitWord32Ror(node);
case IrOpcode::kWord32Equal:
return VisitWord32Equal(node);
case IrOpcode::kWord32Clz:
return VisitWord32Clz(node);
return MarkAsWord32(node), VisitWord32Clz(node);
case IrOpcode::kWord64And:
return VisitWord64And(node);
return MarkAsWord64(node), VisitWord64And(node);
case IrOpcode::kWord64Or:
return VisitWord64Or(node);
return MarkAsWord64(node), VisitWord64Or(node);
case IrOpcode::kWord64Xor:
return VisitWord64Xor(node);
return MarkAsWord64(node), VisitWord64Xor(node);
case IrOpcode::kWord64Shl:
return VisitWord64Shl(node);
return MarkAsWord64(node), VisitWord64Shl(node);
case IrOpcode::kWord64Shr:
return VisitWord64Shr(node);
return MarkAsWord64(node), VisitWord64Shr(node);
case IrOpcode::kWord64Sar:
return VisitWord64Sar(node);
return MarkAsWord64(node), VisitWord64Sar(node);
case IrOpcode::kWord64Ror:
return VisitWord64Ror(node);
return MarkAsWord64(node), VisitWord64Ror(node);
case IrOpcode::kWord64Equal:
return VisitWord64Equal(node);
case IrOpcode::kInt32Add:
return VisitInt32Add(node);
return MarkAsWord32(node), VisitInt32Add(node);
case IrOpcode::kInt32AddWithOverflow:
return VisitInt32AddWithOverflow(node);
return MarkAsWord32(node), VisitInt32AddWithOverflow(node);
case IrOpcode::kInt32Sub:
return VisitInt32Sub(node);
return MarkAsWord32(node), VisitInt32Sub(node);
case IrOpcode::kInt32SubWithOverflow:
return VisitInt32SubWithOverflow(node);
case IrOpcode::kInt32Mul:
return VisitInt32Mul(node);
return MarkAsWord32(node), VisitInt32Mul(node);
case IrOpcode::kInt32MulHigh:
return VisitInt32MulHigh(node);
case IrOpcode::kInt32Div:
return VisitInt32Div(node);
return MarkAsWord32(node), VisitInt32Div(node);
case IrOpcode::kInt32Mod:
return VisitInt32Mod(node);
return MarkAsWord32(node), VisitInt32Mod(node);
case IrOpcode::kInt32LessThan:
return VisitInt32LessThan(node);
case IrOpcode::kInt32LessThanOrEqual:
return VisitInt32LessThanOrEqual(node);
case IrOpcode::kUint32Div:
return VisitUint32Div(node);
return MarkAsWord32(node), VisitUint32Div(node);
case IrOpcode::kUint32LessThan:
return VisitUint32LessThan(node);
case IrOpcode::kUint32LessThanOrEqual:
return VisitUint32LessThanOrEqual(node);
case IrOpcode::kUint32Mod:
return VisitUint32Mod(node);
return MarkAsWord32(node), VisitUint32Mod(node);
case IrOpcode::kUint32MulHigh:
return VisitUint32MulHigh(node);
case IrOpcode::kInt64Add:
return VisitInt64Add(node);
return MarkAsWord64(node), VisitInt64Add(node);
case IrOpcode::kInt64Sub:
return VisitInt64Sub(node);
return MarkAsWord64(node), VisitInt64Sub(node);
case IrOpcode::kInt64Mul:
return VisitInt64Mul(node);
return MarkAsWord64(node), VisitInt64Mul(node);
case IrOpcode::kInt64Div:
return VisitInt64Div(node);
return MarkAsWord64(node), VisitInt64Div(node);
case IrOpcode::kInt64Mod:
return VisitInt64Mod(node);
return MarkAsWord64(node), VisitInt64Mod(node);
case IrOpcode::kInt64LessThan:
return VisitInt64LessThan(node);
case IrOpcode::kInt64LessThanOrEqual:
return VisitInt64LessThanOrEqual(node);
case IrOpcode::kUint64Div:
return VisitUint64Div(node);
return MarkAsWord64(node), VisitUint64Div(node);
case IrOpcode::kUint64LessThan:
return VisitUint64LessThan(node);
case IrOpcode::kUint64Mod:
return VisitUint64Mod(node);
return MarkAsWord64(node), VisitUint64Mod(node);
case IrOpcode::kChangeFloat32ToFloat64:
return MarkAsDouble(node), VisitChangeFloat32ToFloat64(node);
return MarkAsFloat64(node), VisitChangeFloat32ToFloat64(node);
case IrOpcode::kChangeInt32ToFloat64:
return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
return MarkAsFloat64(node), VisitChangeInt32ToFloat64(node);
case IrOpcode::kChangeUint32ToFloat64:
return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
return MarkAsFloat64(node), VisitChangeUint32ToFloat64(node);
case IrOpcode::kChangeFloat64ToInt32:
return VisitChangeFloat64ToInt32(node);
return MarkAsWord32(node), VisitChangeFloat64ToInt32(node);
case IrOpcode::kChangeFloat64ToUint32:
return VisitChangeFloat64ToUint32(node);
return MarkAsWord32(node), VisitChangeFloat64ToUint32(node);
case IrOpcode::kChangeInt32ToInt64:
return VisitChangeInt32ToInt64(node);
return MarkAsWord64(node), VisitChangeInt32ToInt64(node);
case IrOpcode::kChangeUint32ToUint64:
return VisitChangeUint32ToUint64(node);
return MarkAsWord64(node), VisitChangeUint32ToUint64(node);
case IrOpcode::kTruncateFloat64ToFloat32:
return MarkAsDouble(node), VisitTruncateFloat64ToFloat32(node);
return MarkAsFloat32(node), VisitTruncateFloat64ToFloat32(node);
case IrOpcode::kTruncateFloat64ToInt32:
return VisitTruncateFloat64ToInt32(node);
return MarkAsWord32(node), VisitTruncateFloat64ToInt32(node);
case IrOpcode::kTruncateInt64ToInt32:
return VisitTruncateInt64ToInt32(node);
return MarkAsWord32(node), VisitTruncateInt64ToInt32(node);
case IrOpcode::kFloat32Add:
return MarkAsDouble(node), VisitFloat32Add(node);
return MarkAsFloat32(node), VisitFloat32Add(node);
case IrOpcode::kFloat32Sub:
return MarkAsDouble(node), VisitFloat32Sub(node);
return MarkAsFloat32(node), VisitFloat32Sub(node);
case IrOpcode::kFloat32Mul:
return MarkAsDouble(node), VisitFloat32Mul(node);
return MarkAsFloat32(node), VisitFloat32Mul(node);
case IrOpcode::kFloat32Div:
return MarkAsDouble(node), VisitFloat32Div(node);
return MarkAsFloat32(node), VisitFloat32Div(node);
case IrOpcode::kFloat32Min:
return MarkAsDouble(node), VisitFloat32Min(node);
return MarkAsFloat32(node), VisitFloat32Min(node);
case IrOpcode::kFloat32Max:
return MarkAsDouble(node), VisitFloat32Max(node);
return MarkAsFloat32(node), VisitFloat32Max(node);
case IrOpcode::kFloat32Abs:
return MarkAsDouble(node), VisitFloat32Abs(node);
return MarkAsFloat32(node), VisitFloat32Abs(node);
case IrOpcode::kFloat32Sqrt:
return MarkAsDouble(node), VisitFloat32Sqrt(node);
return MarkAsFloat32(node), VisitFloat32Sqrt(node);
case IrOpcode::kFloat32Equal:
return VisitFloat32Equal(node);
case IrOpcode::kFloat32LessThan:
@ -774,23 +721,23 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kFloat32LessThanOrEqual:
return VisitFloat32LessThanOrEqual(node);
case IrOpcode::kFloat64Add:
return MarkAsDouble(node), VisitFloat64Add(node);
return MarkAsFloat64(node), VisitFloat64Add(node);
case IrOpcode::kFloat64Sub:
return MarkAsDouble(node), VisitFloat64Sub(node);
return MarkAsFloat64(node), VisitFloat64Sub(node);
case IrOpcode::kFloat64Mul:
return MarkAsDouble(node), VisitFloat64Mul(node);
return MarkAsFloat64(node), VisitFloat64Mul(node);
case IrOpcode::kFloat64Div:
return MarkAsDouble(node), VisitFloat64Div(node);
return MarkAsFloat64(node), VisitFloat64Div(node);
case IrOpcode::kFloat64Mod:
return MarkAsDouble(node), VisitFloat64Mod(node);
return MarkAsFloat64(node), VisitFloat64Mod(node);
case IrOpcode::kFloat64Min:
return MarkAsDouble(node), VisitFloat64Min(node);
return MarkAsFloat64(node), VisitFloat64Min(node);
case IrOpcode::kFloat64Max:
return MarkAsDouble(node), VisitFloat64Max(node);
return MarkAsFloat64(node), VisitFloat64Max(node);
case IrOpcode::kFloat64Abs:
return MarkAsDouble(node), VisitFloat64Abs(node);
return MarkAsFloat64(node), VisitFloat64Abs(node);
case IrOpcode::kFloat64Sqrt:
return MarkAsDouble(node), VisitFloat64Sqrt(node);
return MarkAsFloat64(node), VisitFloat64Sqrt(node);
case IrOpcode::kFloat64Equal:
return VisitFloat64Equal(node);
case IrOpcode::kFloat64LessThan:
@ -798,19 +745,19 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kFloat64LessThanOrEqual:
return VisitFloat64LessThanOrEqual(node);
case IrOpcode::kFloat64RoundDown:
return MarkAsDouble(node), VisitFloat64RoundDown(node);
return MarkAsFloat64(node), VisitFloat64RoundDown(node);
case IrOpcode::kFloat64RoundTruncate:
return MarkAsDouble(node), VisitFloat64RoundTruncate(node);
return MarkAsFloat64(node), VisitFloat64RoundTruncate(node);
case IrOpcode::kFloat64RoundTiesAway:
return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
return MarkAsFloat64(node), VisitFloat64RoundTiesAway(node);
case IrOpcode::kFloat64ExtractLowWord32:
return VisitFloat64ExtractLowWord32(node);
return MarkAsWord32(node), VisitFloat64ExtractLowWord32(node);
case IrOpcode::kFloat64ExtractHighWord32:
return VisitFloat64ExtractHighWord32(node);
return MarkAsWord32(node), VisitFloat64ExtractHighWord32(node);
case IrOpcode::kFloat64InsertLowWord32:
return MarkAsDouble(node), VisitFloat64InsertLowWord32(node);
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsDouble(node), VisitFloat64InsertHighWord32(node);
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
case IrOpcode::kLoadStackPointer:
return VisitLoadStackPointer(node);
case IrOpcode::kCheckedLoad: {

View File

@ -146,21 +146,14 @@ class InstructionSelector final {
// will need to generate code for it.
void MarkAsUsed(Node* node);
// Checks if {node} is marked as double.
bool IsDouble(const Node* node) const;
// Inform the register allocator of a double result.
void MarkAsDouble(Node* node);
// Checks if {node} is marked as reference.
bool IsReference(const Node* node) const;
// Inform the register allocator of a reference result.
void MarkAsReference(Node* node);
// Inform the register allocation of the representation of the value produced
// by {node}.
void MarkAsRepresentation(MachineType rep, Node* node);
void MarkAsWord32(Node* node) { MarkAsRepresentation(kRepWord32, node); }
void MarkAsWord64(Node* node) { MarkAsRepresentation(kRepWord64, node); }
void MarkAsFloat32(Node* node) { MarkAsRepresentation(kRepFloat32, node); }
void MarkAsFloat64(Node* node) { MarkAsRepresentation(kRepFloat64, node); }
void MarkAsReference(Node* node) { MarkAsRepresentation(kRepTagged, node); }
// Inform the register allocation of the representation of the unallocated
// operand {op}.

View File

@ -53,22 +53,48 @@ std::ostream& operator<<(std::ostream& os,
return os << "[immediate:" << imm.indexed_value() << "]";
}
}
case InstructionOperand::ALLOCATED:
switch (AllocatedOperand::cast(op).allocated_kind()) {
case InstructionOperand::ALLOCATED: {
auto allocated = AllocatedOperand::cast(op);
switch (allocated.allocated_kind()) {
case AllocatedOperand::STACK_SLOT:
return os << "[stack:" << StackSlotOperand::cast(op).index() << "]";
os << "[stack:" << StackSlotOperand::cast(op).index();
break;
case AllocatedOperand::DOUBLE_STACK_SLOT:
return os << "[double_stack:"
<< DoubleStackSlotOperand::cast(op).index() << "]";
os << "[double_stack:" << DoubleStackSlotOperand::cast(op).index();
break;
case AllocatedOperand::REGISTER:
return os << "["
<< conf->general_register_name(
RegisterOperand::cast(op).index()) << "|R]";
os << "["
<< conf->general_register_name(RegisterOperand::cast(op).index())
<< "|R";
break;
case AllocatedOperand::DOUBLE_REGISTER:
return os << "["
<< conf->double_register_name(
DoubleRegisterOperand::cast(op).index()) << "|R]";
os << "["
<< conf->double_register_name(
DoubleRegisterOperand::cast(op).index()) << "|R";
break;
}
switch (allocated.machine_type()) {
case kRepWord32:
os << "|w32";
break;
case kRepWord64:
os << "|w64";
break;
case kRepFloat32:
os << "|f32";
break;
case kRepFloat64:
os << "|f64";
break;
case kRepTagged:
os << "|t";
break;
default:
os << "|?";
break;
}
return os << "]";
}
case InstructionOperand::INVALID:
return os << "(x)";
}
@ -83,7 +109,7 @@ std::ostream& operator<<(std::ostream& os,
PrintableInstructionOperand printable_op = {printable.register_configuration_,
mo.destination()};
os << printable_op;
if (mo.source() != mo.destination()) {
if (!mo.source().Equals(mo.destination())) {
printable_op.op_ = mo.source();
os << " = " << printable_op;
}
@ -104,11 +130,11 @@ MoveOperands* ParallelMove::PrepareInsertAfter(MoveOperands* move) const {
MoveOperands* to_eliminate = nullptr;
for (auto curr : *this) {
if (curr->IsEliminated()) continue;
if (curr->destination() == move->source()) {
if (curr->destination().EqualsModuloType(move->source())) {
DCHECK(!replacement);
replacement = curr;
if (to_eliminate != nullptr) break;
} else if (curr->destination() == move->destination()) {
} else if (curr->destination().EqualsModuloType(move->destination())) {
DCHECK(!to_eliminate);
to_eliminate = curr;
if (replacement != nullptr) break;
@ -479,8 +505,7 @@ InstructionSequence::InstructionSequence(Isolate* isolate,
instructions_(zone()),
next_virtual_register_(0),
reference_maps_(zone()),
doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
references_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
representations_(zone()),
deoptimization_entries_(zone()) {
block_starts_.reserve(instruction_blocks_->size());
}
@ -548,23 +573,48 @@ const InstructionBlock* InstructionSequence::GetInstructionBlock(
}
bool InstructionSequence::IsReference(int virtual_register) const {
return references_.find(virtual_register) != references_.end();
static MachineType FilterRepresentation(MachineType rep) {
DCHECK_EQ(rep, RepresentationOf(rep));
switch (rep) {
case kRepBit:
case kRepWord8:
case kRepWord16:
return InstructionSequence::DefaultRepresentation();
case kRepWord32:
case kRepWord64:
case kRepFloat32:
case kRepFloat64:
case kRepTagged:
return rep;
default:
break;
}
UNREACHABLE();
return kMachNone;
}
bool InstructionSequence::IsDouble(int virtual_register) const {
return doubles_.find(virtual_register) != doubles_.end();
MachineType InstructionSequence::GetRepresentation(int virtual_register) const {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
return DefaultRepresentation();
}
return representations_[virtual_register];
}
void InstructionSequence::MarkAsReference(int virtual_register) {
references_.insert(virtual_register);
}
void InstructionSequence::MarkAsDouble(int virtual_register) {
doubles_.insert(virtual_register);
void InstructionSequence::MarkAsRepresentation(MachineType machine_type,
int virtual_register) {
DCHECK_LE(0, virtual_register);
DCHECK_LT(virtual_register, VirtualRegisterCount());
if (virtual_register >= static_cast<int>(representations_.size())) {
representations_.resize(VirtualRegisterCount(), DefaultRepresentation());
}
machine_type = FilterRepresentation(machine_type);
DCHECK_IMPLIES(representations_[virtual_register] != machine_type,
representations_[virtual_register] == DefaultRepresentation());
representations_[virtual_register] = machine_type;
}

View File

@ -50,19 +50,6 @@ class InstructionOperand {
inline bool IsStackSlot() const;
inline bool IsDoubleStackSlot() const;
// Useful for map/set keys.
bool operator<(const InstructionOperand& op) const {
return value_ < op.value_;
}
bool operator==(const InstructionOperand& op) const {
return value_ == op.value_;
}
bool operator!=(const InstructionOperand& op) const {
return value_ != op.value_;
}
template <typename SubKindOperand>
static SubKindOperand* New(Zone* zone, const SubKindOperand& op) {
void* buffer = zone->New(sizeof(op));
@ -74,22 +61,43 @@ class InstructionOperand {
*dest = *src;
}
bool Equals(const InstructionOperand& that) const {
return this->value_ == that.value_;
}
bool Compare(const InstructionOperand& that) const {
return this->value_ < that.value_;
}
bool EqualsModuloType(const InstructionOperand& that) const {
return this->GetValueModuloType() == that.GetValueModuloType();
}
bool CompareModuloType(const InstructionOperand& that) const {
return this->GetValueModuloType() < that.GetValueModuloType();
}
protected:
explicit InstructionOperand(Kind kind) : value_(KindField::encode(kind)) {}
inline uint64_t GetValueModuloType() const;
class KindField : public BitField64<Kind, 0, 3> {};
uint64_t value_;
};
struct PrintableInstructionOperand {
const RegisterConfiguration* register_configuration_;
InstructionOperand op_;
};
std::ostream& operator<<(std::ostream& os,
const PrintableInstructionOperand& op);
#define INSTRUCTION_OPERAND_CASTS(OperandType, OperandKind) \
\
static OperandType* cast(InstructionOperand* op) { \
@ -346,6 +354,8 @@ class ImmediateOperand : public InstructionOperand {
class AllocatedOperand : public InstructionOperand {
public:
// TODO(dcarney): machine_type makes this now redundant. Just need to know is
// the operand is a slot or a register.
enum AllocatedKind {
STACK_SLOT,
DOUBLE_STACK_SLOT,
@ -353,10 +363,12 @@ class AllocatedOperand : public InstructionOperand {
DOUBLE_REGISTER
};
AllocatedOperand(AllocatedKind kind, int index)
AllocatedOperand(AllocatedKind kind, MachineType machine_type, int index)
: InstructionOperand(ALLOCATED) {
DCHECK_IMPLIES(kind == REGISTER || kind == DOUBLE_REGISTER, index >= 0);
DCHECK(IsSupportedMachineType(machine_type));
value_ |= AllocatedKindField::encode(kind);
value_ |= MachineTypeField::encode(machine_type);
value_ |= static_cast<int64_t>(index) << IndexField::kShift;
}
@ -368,14 +380,33 @@ class AllocatedOperand : public InstructionOperand {
return AllocatedKindField::decode(value_);
}
static AllocatedOperand* New(Zone* zone, AllocatedKind kind, int index) {
return InstructionOperand::New(zone, AllocatedOperand(kind, index));
MachineType machine_type() const { return MachineTypeField::decode(value_); }
static AllocatedOperand* New(Zone* zone, AllocatedKind kind,
MachineType machine_type, int index) {
return InstructionOperand::New(zone,
AllocatedOperand(kind, machine_type, index));
}
static bool IsSupportedMachineType(MachineType machine_type) {
if (RepresentationOf(machine_type) != machine_type) return false;
switch (machine_type) {
case kRepWord32:
case kRepWord64:
case kRepFloat32:
case kRepFloat64:
case kRepTagged:
return true;
default:
return false;
}
}
INSTRUCTION_OPERAND_CASTS(AllocatedOperand, ALLOCATED);
STATIC_ASSERT(KindField::kSize == 3);
class AllocatedKindField : public BitField64<AllocatedKind, 3, 2> {};
class MachineTypeField : public BitField64<MachineType, 5, 16> {};
class IndexField : public BitField64<int32_t, 35, 29> {};
};
@ -400,14 +431,17 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_IS)
#undef ALLOCATED_OPERAND_IS
// TODO(dcarney): these subkinds are now pretty useless, nuke.
#define ALLOCATED_OPERAND_CLASS(SubKind, kOperandKind) \
class SubKind##Operand final : public AllocatedOperand { \
public: \
explicit SubKind##Operand(int index) \
: AllocatedOperand(kOperandKind, index) {} \
explicit SubKind##Operand(MachineType machine_type, int index) \
: AllocatedOperand(kOperandKind, machine_type, index) {} \
\
static SubKind##Operand* New(Zone* zone, int index) { \
return InstructionOperand::New(zone, SubKind##Operand(index)); \
static SubKind##Operand* New(Zone* zone, MachineType machine_type, \
int index) { \
return InstructionOperand::New(zone, \
SubKind##Operand(machine_type, index)); \
} \
\
static SubKind##Operand* cast(InstructionOperand* op) { \
@ -429,6 +463,24 @@ ALLOCATED_OPERAND_LIST(ALLOCATED_OPERAND_CLASS)
#undef ALLOCATED_OPERAND_CLASS
uint64_t InstructionOperand::GetValueModuloType() const {
if (IsAllocated()) {
// TODO(dcarney): put machine type last and mask.
return AllocatedOperand::MachineTypeField::update(this->value_, kMachNone);
}
return this->value_;
}
// Required for maps that don't care about machine type.
struct CompareOperandModuloType {
bool operator()(const InstructionOperand& a,
const InstructionOperand& b) const {
return a.CompareModuloType(b);
}
};
class MoveOperands final : public ZoneObject {
public:
MoveOperands(const InstructionOperand& source,
@ -456,14 +508,14 @@ class MoveOperands final : public ZoneObject {
// True if this move a move into the given destination operand.
bool Blocks(const InstructionOperand& operand) const {
return !IsEliminated() && source() == operand;
return !IsEliminated() && source().EqualsModuloType(operand);
}
// A move is redundant if it's been eliminated or if its source and
// destination are the same.
bool IsRedundant() const {
DCHECK_IMPLIES(!destination_.IsInvalid(), !destination_.IsConstant());
return IsEliminated() || source_ == destination_;
return IsEliminated() || source_.EqualsModuloType(destination_);
}
// We clear both operands to indicate move that's been eliminated.
@ -551,7 +603,7 @@ class ReferenceMap final : public ZoneObject {
std::ostream& operator<<(std::ostream& os, const ReferenceMap& pm);
class Instruction {
class Instruction final {
public:
size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
const InstructionOperand* OutputAt(size_t i) const {
@ -676,10 +728,9 @@ class Instruction {
ParallelMove* const* parallel_moves() const { return &parallel_moves_[0]; }
ParallelMove** parallel_moves() { return &parallel_moves_[0]; }
protected:
private:
explicit Instruction(InstructionCode opcode);
private:
Instruction(InstructionCode opcode, size_t output_count,
InstructionOperand* outputs, size_t input_count,
InstructionOperand* inputs, size_t temp_count,
@ -696,7 +747,6 @@ class Instruction {
ReferenceMap* reference_map_;
InstructionOperand operands_[1];
private:
DISALLOW_COPY_AND_ASSIGN(Instruction);
};
@ -1004,11 +1054,24 @@ class InstructionSequence final : public ZoneObject {
const InstructionBlock* GetInstructionBlock(int instruction_index) const;
bool IsReference(int virtual_register) const;
bool IsDouble(int virtual_register) const;
static MachineType DefaultRepresentation() {
return kPointerSize == 8 ? kRepWord64 : kRepWord32;
}
MachineType GetRepresentation(int virtual_register) const;
void MarkAsRepresentation(MachineType machine_type, int virtual_register);
void MarkAsReference(int virtual_register);
void MarkAsDouble(int virtual_register);
bool IsReference(int virtual_register) const {
return GetRepresentation(virtual_register) == kRepTagged;
}
bool IsFloat(int virtual_register) const {
switch (GetRepresentation(virtual_register)) {
case kRepFloat32:
case kRepFloat64:
return true;
default:
return false;
}
}
Instruction* GetBlockStart(RpoNumber rpo) const;
@ -1111,8 +1174,7 @@ class InstructionSequence final : public ZoneObject {
InstructionDeque instructions_;
int next_virtual_register_;
ReferenceMapDeque reference_maps_;
VirtualRegisterSet doubles_;
VirtualRegisterSet references_;
ZoneVector<MachineType> representations_;
DeoptimizationVector deoptimization_entries_;
DISALLOW_COPY_AND_ASSIGN(InstructionSequence);

View File

@ -11,8 +11,18 @@ namespace compiler {
namespace {
typedef std::pair<InstructionOperand, InstructionOperand> MoveKey;
typedef ZoneMap<MoveKey, unsigned> MoveMap;
typedef ZoneSet<InstructionOperand> OperandSet;
struct MoveKeyCompare {
bool operator()(const MoveKey& a, const MoveKey& b) const {
if (a.first.EqualsModuloType(b.first)) {
return a.second.CompareModuloType(b.second);
}
return a.first.CompareModuloType(b.first);
}
};
typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
bool GapsCanMoveOver(Instruction* instr) { return instr->IsNop(); }
@ -224,10 +234,12 @@ bool IsSlot(const InstructionOperand& op) {
bool LoadCompare(const MoveOperands* a, const MoveOperands* b) {
if (a->source() != b->source()) return a->source() < b->source();
if (!a->source().EqualsModuloType(b->source())) {
return a->source().CompareModuloType(b->source());
}
if (IsSlot(a->destination()) && !IsSlot(b->destination())) return false;
if (!IsSlot(a->destination()) && IsSlot(b->destination())) return true;
return a->destination() < b->destination();
return a->destination().CompareModuloType(b->destination());
}
} // namespace
@ -252,7 +264,8 @@ void MoveOptimizer::FinalizeMoves(Instruction* instr) {
MoveOperands* group_begin = nullptr;
for (auto load : loads) {
// New group.
if (group_begin == nullptr || load->source() != group_begin->source()) {
if (group_begin == nullptr ||
!load->source().EqualsModuloType(group_begin->source())) {
group_begin = load;
continue;
}

View File

@ -163,7 +163,7 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
CHECK(false);
break;
case UnallocatedOperand::NONE:
if (sequence()->IsDouble(vreg)) {
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kNoneDouble;
} else {
constraint->type_ = kNone;
@ -178,14 +178,14 @@ void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
constraint->value_ = unallocated->fixed_register_index();
break;
case UnallocatedOperand::MUST_HAVE_REGISTER:
if (sequence()->IsDouble(vreg)) {
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kDoubleRegister;
} else {
constraint->type_ = kRegister;
}
break;
case UnallocatedOperand::MUST_HAVE_SLOT:
if (sequence()->IsDouble(vreg)) {
if (sequence()->IsFloat(vreg)) {
constraint->type_ = kDoubleSlot;
} else {
constraint->type_ = kSlot;
@ -286,7 +286,7 @@ class PhiMap : public ZoneMap<int, PhiData*>, public ZoneObject {
struct OperandLess {
bool operator()(const InstructionOperand* a,
const InstructionOperand* b) const {
return *a < *b;
return a->CompareModuloType(*b);
}
};
@ -320,7 +320,7 @@ class OperandMap : public ZoneObject {
this->erase(it++);
if (it == this->end()) return;
}
if (*it->first == *o.first) {
if (it->first->EqualsModuloType(*o.first)) {
++it;
if (it == this->end()) return;
} else {
@ -372,13 +372,14 @@ class OperandMap : public ZoneObject {
}
void DropRegisters(const RegisterConfiguration* config) {
for (int i = 0; i < config->num_general_registers(); ++i) {
RegisterOperand op(i);
Drop(&op);
}
for (int i = 0; i < config->num_double_registers(); ++i) {
DoubleRegisterOperand op(i);
Drop(&op);
// TODO(dcarney): sort map by kind and drop range.
for (auto it = map().begin(); it != map().end();) {
auto op = it->first;
if (op->IsRegister() || op->IsDoubleRegister()) {
map().erase(it++);
} else {
++it;
}
}
}

View File

@ -89,6 +89,27 @@ bool IsOutputDoubleRegisterOf(Instruction* instr, int index) {
return false;
}
// TODO(dcarney): fix frame to allow frame accesses to half size location.
int GetByteWidth(MachineType machine_type) {
DCHECK_EQ(RepresentationOf(machine_type), machine_type);
switch (machine_type) {
case kRepBit:
case kRepWord8:
case kRepWord16:
case kRepWord32:
case kRepTagged:
return kPointerSize;
case kRepFloat32:
case kRepWord64:
case kRepFloat64:
return 8;
default:
UNREACHABLE();
return 0;
}
}
} // namespace
@ -214,7 +235,7 @@ struct LiveRange::SpillAtDefinitionList : ZoneObject {
};
LiveRange::LiveRange(int id)
LiveRange::LiveRange(int id, MachineType machine_type)
: id_(id),
spill_start_index_(kMaxInt),
bits_(0),
@ -228,8 +249,10 @@ LiveRange::LiveRange(int id)
current_interval_(nullptr),
last_processed_use_(nullptr),
current_hint_position_(nullptr) {
DCHECK(AllocatedOperand::IsSupportedMachineType(machine_type));
bits_ = SpillTypeField::encode(SpillType::kNoSpillType) |
AssignedRegisterField::encode(kUnassignedRegister);
AssignedRegisterField::encode(kUnassignedRegister) |
MachineTypeField::encode(machine_type);
}
@ -268,6 +291,18 @@ void LiveRange::Spill() {
}
RegisterKind LiveRange::kind() const {
switch (RepresentationOf(machine_type())) {
case kRepFloat32:
case kRepFloat64:
return DOUBLE_REGISTERS;
default:
break;
}
return GENERAL_REGISTERS;
}
void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand) {
DCHECK(HasNoSpillType());
@ -277,9 +312,9 @@ void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
InstructionOperand* op,
const InstructionOperand& op,
bool might_be_duplicated) {
DCHECK_IMPLIES(op->IsConstant(), spills_at_definition_ == nullptr);
DCHECK_IMPLIES(op.IsConstant(), spills_at_definition_ == nullptr);
DCHECK(!IsChild());
auto zone = sequence->zone();
for (auto to_spill = spills_at_definition_; to_spill != nullptr;
@ -292,15 +327,15 @@ void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
bool found = false;
for (auto move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source() == *to_spill->operand &&
move_op->destination() == *op) {
if (move_op->source().Equals(*to_spill->operand) &&
move_op->destination().Equals(op)) {
found = true;
break;
}
}
if (found) continue;
}
move->AddMove(*to_spill->operand, *op);
move->AddMove(*to_spill->operand, op);
}
}
@ -329,14 +364,6 @@ void LiveRange::SetSpillRange(SpillRange* spill_range) {
}
void LiveRange::CommitSpillOperand(AllocatedOperand* operand) {
DCHECK(HasSpillRange());
DCHECK(!IsChild());
set_spill_type(SpillType::kSpillOperand);
spill_operand_ = operand;
}
UsePosition* LiveRange::NextUsePosition(LifetimePosition start) const {
UsePosition* use_pos = last_processed_use_;
if (use_pos == nullptr || use_pos->pos() > start) {
@ -395,18 +422,33 @@ InstructionOperand LiveRange::GetAssignedOperand() const {
DCHECK(!spilled());
switch (kind()) {
case GENERAL_REGISTERS:
return RegisterOperand(assigned_register());
return RegisterOperand(machine_type(), assigned_register());
case DOUBLE_REGISTERS:
return DoubleRegisterOperand(assigned_register());
default:
UNREACHABLE();
return DoubleRegisterOperand(machine_type(), assigned_register());
}
}
DCHECK(spilled());
DCHECK(!HasRegisterAssigned());
auto op = TopLevel()->GetSpillOperand();
DCHECK(!op->IsUnallocated());
return *op;
if (TopLevel()->HasSpillOperand()) {
auto op = TopLevel()->GetSpillOperand();
DCHECK(!op->IsUnallocated());
return *op;
}
return TopLevel()->GetSpillRangeOperand();
}
AllocatedOperand LiveRange::GetSpillRangeOperand() const {
auto spill_range = GetSpillRange();
int index = spill_range->assigned_slot();
switch (kind()) {
case GENERAL_REGISTERS:
return StackSlotOperand(machine_type(), index);
case DOUBLE_REGISTERS:
return DoubleStackSlotOperand(machine_type(), index);
}
UNREACHABLE();
return StackSlotOperand(kMachNone, 0);
}
@ -512,7 +554,6 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
// Link the new live range in the chain before any of the other
// ranges linked from the range before the split.
result->parent_ = (parent_ == nullptr) ? this : parent_;
result->set_kind(result->parent_->kind());
result->next_ = next_;
next_ = result;
@ -626,15 +667,14 @@ void LiveRange::AddUsePosition(UsePosition* use_pos) {
void LiveRange::ConvertUsesToOperand(const InstructionOperand& op,
InstructionOperand* spill_op) {
const InstructionOperand& spill_op) {
for (auto pos = first_pos(); pos != nullptr; pos = pos->next()) {
DCHECK(Start() <= pos->pos() && pos->pos() <= End());
if (!pos->HasOperand()) continue;
switch (pos->type()) {
case UsePositionType::kRequiresSlot:
if (spill_op != nullptr) {
InstructionOperand::ReplaceWith(pos->operand(), spill_op);
}
DCHECK(spill_op.IsStackSlot() || spill_op.IsDoubleStackSlot());
InstructionOperand::ReplaceWith(pos->operand(), &spill_op);
break;
case UsePositionType::kRequiresRegister:
DCHECK(op.IsRegister() || op.IsDoubleRegister());
@ -726,7 +766,8 @@ static bool AreUseIntervalsIntersecting(UseInterval* interval1,
}
SpillRange::SpillRange(LiveRange* parent, Zone* zone) : live_ranges_(zone) {
SpillRange::SpillRange(LiveRange* parent, Zone* zone)
: live_ranges_(zone), assigned_slot_(kUnassignedSlot) {
DCHECK(!parent->IsChild());
UseInterval* result = nullptr;
UseInterval* node = nullptr;
@ -752,6 +793,11 @@ SpillRange::SpillRange(LiveRange* parent, Zone* zone) : live_ranges_(zone) {
}
int SpillRange::ByteWidth() const {
return GetByteWidth(live_ranges_[0]->machine_type());
}
bool SpillRange::IsIntersectingWith(SpillRange* other) const {
if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
this->End() <= other->use_interval_->start() ||
@ -763,7 +809,11 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
bool SpillRange::TryMerge(SpillRange* other) {
if (kind() != other->kind() || IsIntersectingWith(other)) return false;
// TODO(dcarney): byte widths should be compared here not kinds.
if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
IsIntersectingWith(other)) {
return false;
}
auto max = LifetimePosition::MaxPosition();
if (End() < other->End() && other->End() != max) {
@ -787,14 +837,6 @@ bool SpillRange::TryMerge(SpillRange* other) {
}
void SpillRange::SetOperand(AllocatedOperand* op) {
for (auto range : live_ranges()) {
DCHECK(range->GetSpillRange() == this);
range->CommitSpillOperand(op);
}
}
void SpillRange::MergeDisjointIntervals(UseInterval* other) {
UseInterval* tail = nullptr;
auto current = use_interval_;
@ -861,7 +903,8 @@ RegisterAllocationData::RegisterAllocationData(
allocation_zone()),
spill_ranges_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr) {
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()) {
DCHECK(this->config()->num_general_registers() <=
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
@ -876,19 +919,6 @@ RegisterAllocationData::RegisterAllocationData(
}
LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
auto result = live_ranges()[index];
if (result == nullptr) {
result = NewLiveRange(index);
live_ranges()[index] = result;
}
return result;
}
MoveOperands* RegisterAllocationData::AddGapMove(
int index, Instruction::GapPosition position,
const InstructionOperand& from, const InstructionOperand& to) {
@ -898,8 +928,40 @@ MoveOperands* RegisterAllocationData::AddGapMove(
}
LiveRange* RegisterAllocationData::NewLiveRange(int index) {
return new (allocation_zone()) LiveRange(index);
MachineType RegisterAllocationData::MachineTypeFor(int virtual_register) {
DCHECK_LT(virtual_register, code()->VirtualRegisterCount());
return code()->GetRepresentation(virtual_register);
}
LiveRange* RegisterAllocationData::LiveRangeFor(int index) {
if (index >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(index + 1, nullptr);
}
auto result = live_ranges()[index];
if (result == nullptr) {
result = NewLiveRange(index, MachineTypeFor(index));
live_ranges()[index] = result;
}
return result;
}
LiveRange* RegisterAllocationData::NewLiveRange(int index,
MachineType machine_type) {
return new (allocation_zone()) LiveRange(index, machine_type);
}
LiveRange* RegisterAllocationData::NewChildRangeFor(LiveRange* range) {
int vreg = virtual_register_count_++;
if (vreg >= static_cast<int>(live_ranges().size())) {
live_ranges().resize(vreg + 1, nullptr);
}
auto child = new (allocation_zone()) LiveRange(vreg, range->machine_type());
DCHECK_NULL(live_ranges()[vreg]);
live_ranges()[vreg] = child;
return child;
}
@ -972,15 +1034,21 @@ InstructionOperand* ConstraintBuilder::AllocateFixed(
TRACE("Allocating fixed reg for op %d\n", operand->virtual_register());
DCHECK(operand->HasFixedPolicy());
InstructionOperand allocated;
MachineType machine_type = InstructionSequence::DefaultRepresentation();
int virtual_register = operand->virtual_register();
if (virtual_register != InstructionOperand::kInvalidVirtualRegister) {
machine_type = data()->MachineTypeFor(virtual_register);
}
if (operand->HasFixedSlotPolicy()) {
allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT,
allocated = AllocatedOperand(AllocatedOperand::STACK_SLOT, machine_type,
operand->fixed_slot_index());
} else if (operand->HasFixedRegisterPolicy()) {
allocated = AllocatedOperand(AllocatedOperand::REGISTER,
allocated = AllocatedOperand(AllocatedOperand::REGISTER, machine_type,
operand->fixed_register_index());
} else if (operand->HasFixedDoubleRegisterPolicy()) {
DCHECK_NE(InstructionOperand::kInvalidVirtualRegister, virtual_register);
allocated = AllocatedOperand(AllocatedOperand::DOUBLE_REGISTER,
operand->fixed_register_index());
machine_type, operand->fixed_register_index());
} else {
UNREACHABLE();
}
@ -1248,9 +1316,9 @@ LiveRange* LiveRangeBuilder::FixedLiveRangeFor(int index) {
DCHECK(index < config()->num_general_registers());
auto result = data()->fixed_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedLiveRangeID(index));
result = data()->NewLiveRange(FixedLiveRangeID(index),
InstructionSequence::DefaultRepresentation());
DCHECK(result->IsFixed());
result->set_kind(GENERAL_REGISTERS);
result->set_assigned_register(index);
data()->MarkAllocated(GENERAL_REGISTERS, index);
data()->fixed_live_ranges()[index] = result;
@ -1263,9 +1331,8 @@ LiveRange* LiveRangeBuilder::FixedDoubleLiveRangeFor(int index) {
DCHECK(index < config()->num_aliased_double_registers());
auto result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedDoubleLiveRangeID(index));
result = data()->NewLiveRange(FixedDoubleLiveRangeID(index), kRepFloat64);
DCHECK(result->IsFixed());
result->set_kind(DOUBLE_REGISTERS);
result->set_assigned_register(index);
data()->MarkAllocated(DOUBLE_REGISTERS, index);
data()->fixed_double_live_ranges()[index] = result;
@ -1565,7 +1632,6 @@ void LiveRangeBuilder::BuildLiveRanges() {
// Postprocess the ranges.
for (auto range : data()->live_ranges()) {
if (range == nullptr) continue;
range->set_kind(RequiredRegisterKind(range->id()));
// Give slots to all ranges with a non fixed slot use.
if (range->has_slot_use() && range->HasNoSpillType()) {
data()->AssignSpillRangeToLiveRange(range);
@ -1610,13 +1676,6 @@ void LiveRangeBuilder::ResolvePhiHint(InstructionOperand* operand,
}
RegisterKind LiveRangeBuilder::RequiredRegisterKind(
int virtual_register) const {
return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
: GENERAL_REGISTERS;
}
void LiveRangeBuilder::Verify() const {
for (auto& hint : phi_hints_) {
CHECK(hint.second->IsResolved());
@ -1647,8 +1706,7 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
(GetInstructionBlock(code(), pos)->last_instruction_index() !=
pos.ToInstructionIndex()));
int vreg = code()->NextVirtualRegister();
auto result = LiveRangeFor(vreg);
auto result = data()->NewChildRangeFor(range);
range->SplitAt(pos, result, allocation_zone());
return result;
}
@ -2652,13 +2710,9 @@ void OperandAssigner::AssignSpillSlots() {
for (auto range : spill_ranges) {
if (range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
auto kind = range->kind();
int index = data()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
auto op_kind = kind == DOUBLE_REGISTERS
? AllocatedOperand::DOUBLE_STACK_SLOT
: AllocatedOperand::STACK_SLOT;
auto op = AllocatedOperand::New(data()->code_zone(), op_kind, index);
range->SetOperand(op);
int byte_width = range->ByteWidth();
int index = data()->frame()->AllocateSpillSlot(byte_width);
range->set_assigned_slot(index);
}
}
@ -2666,16 +2720,18 @@ void OperandAssigner::AssignSpillSlots() {
void OperandAssigner::CommitAssignment() {
for (auto range : data()->live_ranges()) {
if (range == nullptr || range->IsEmpty()) continue;
InstructionOperand* spill_operand = nullptr;
if (!range->TopLevel()->HasNoSpillType()) {
spill_operand = range->TopLevel()->GetSpillOperand();
InstructionOperand spill_operand;
if (range->TopLevel()->HasSpillOperand()) {
spill_operand = *range->TopLevel()->GetSpillOperand();
} else if (range->TopLevel()->HasSpillRange()) {
spill_operand = range->TopLevel()->GetSpillRangeOperand();
}
auto assigned = range->GetAssignedOperand();
range->ConvertUsesToOperand(assigned, spill_operand);
if (range->is_phi()) {
data()->GetPhiMapValueFor(range->id())->CommitAssignment(assigned);
}
if (!range->IsChild() && spill_operand != nullptr) {
if (!range->IsChild() && !spill_operand.IsInvalid()) {
range->CommitSpillsAtDefinition(data()->code(), spill_operand,
range->has_slot_use());
}
@ -2756,12 +2812,21 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Check if the live range is spilled and the safe point is after
// the spill position.
if (range->HasSpillOperand() &&
safe_point >= range->spill_start_index() &&
!range->GetSpillOperand()->IsConstant()) {
if (((range->HasSpillOperand() &&
!range->GetSpillOperand()->IsConstant()) ||
range->HasSpillRange()) &&
safe_point >= range->spill_start_index()) {
TRACE("Pointer for range %d (spilled at %d) at safe point %d\n",
range->id(), range->spill_start_index(), safe_point);
map->RecordReference(*range->GetSpillOperand());
InstructionOperand operand;
if (range->HasSpillOperand()) {
operand = *range->GetSpillOperand();
} else {
operand = range->GetSpillRangeOperand();
}
DCHECK(operand.IsStackSlot());
DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
map->RecordReference(operand);
}
if (!cur->spilled()) {
@ -2771,6 +2836,7 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
cur->id(), cur->Start().value(), safe_point);
auto operand = cur->GetAssignedOperand();
DCHECK(!operand.IsStackSlot());
DCHECK_EQ(kRepTagged, AllocatedOperand::cast(operand).machine_type());
map->RecordReference(operand);
}
}
@ -2909,6 +2975,24 @@ class LiveRangeFinder {
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
struct DelayedInsertionMapCompare {
bool operator()(const DelayedInsertionMapKey& a,
const DelayedInsertionMapKey& b) const {
if (a.first == b.first) {
return a.second.Compare(b.second);
}
return a.first < b.first;
}
};
typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
DelayedInsertionMapCompare> DelayedInsertionMap;
} // namespace
@ -2942,7 +3026,7 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
continue;
auto pred_op = result.pred_cover_->GetAssignedOperand();
auto cur_op = result.cur_cover_->GetAssignedOperand();
if (pred_op == cur_op) continue;
if (pred_op.Equals(cur_op)) continue;
ResolveControlFlow(block, cur_op, pred_block, pred_op);
}
iterator.Advance();
@ -2955,7 +3039,7 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
const InstructionOperand& cur_op,
const InstructionBlock* pred,
const InstructionOperand& pred_op) {
DCHECK(pred_op != cur_op);
DCHECK(!pred_op.Equals(cur_op));
int gap_index;
Instruction::GapPosition position;
if (block->PredecessorCount() == 1) {
@ -2974,8 +3058,7 @@ void LiveRangeConnector::ResolveControlFlow(const InstructionBlock* block,
void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
ZoneMap<std::pair<ParallelMove*, InstructionOperand>, InstructionOperand>
delayed_insertion_map(local_zone);
DelayedInsertionMap delayed_insertion_map(local_zone);
for (auto first_range : data()->live_ranges()) {
if (first_range == nullptr || first_range->IsChild()) continue;
for (auto second_range = first_range->next(); second_range != nullptr;
@ -2991,7 +3074,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
}
auto prev_operand = first_range->GetAssignedOperand();
auto cur_operand = second_range->GetAssignedOperand();
if (prev_operand == cur_operand) continue;
if (prev_operand.Equals(cur_operand)) continue;
bool delay_insertion = false;
Instruction::GapPosition gap_pos;
int gap_index = pos.ToInstructionIndex();

View File

@ -13,7 +13,6 @@ namespace internal {
namespace compiler {
enum RegisterKind {
UNALLOCATED_REGISTERS,
GENERAL_REGISTERS,
DOUBLE_REGISTERS
};
@ -272,7 +271,7 @@ class SpillRange;
// intervals over the instruction ordering.
class LiveRange final : public ZoneObject {
public:
explicit LiveRange(int id);
explicit LiveRange(int id, MachineType machine_type);
UseInterval* first_interval() const { return first_interval_; }
UsePosition* first_pos() const { return first_pos_; }
@ -289,6 +288,8 @@ class LiveRange final : public ZoneObject {
InstructionOperand GetAssignedOperand() const;
int spill_start_index() const { return spill_start_index_; }
MachineType machine_type() const { return MachineTypeField::decode(bits_); }
int assigned_register() const { return AssignedRegisterField::decode(bits_); }
bool HasRegisterAssigned() const {
return assigned_register() != kUnassignedRegister;
@ -299,10 +300,7 @@ class LiveRange final : public ZoneObject {
bool spilled() const { return SpilledField::decode(bits_); }
void Spill();
RegisterKind kind() const { return RegisterKindField::decode(bits_); }
void set_kind(RegisterKind kind) {
bits_ = RegisterKindField::update(bits_, kind);
}
RegisterKind kind() const;
// Correct only for parent.
bool is_phi() const { return IsPhiField::decode(bits_); }
@ -386,14 +384,14 @@ class LiveRange final : public ZoneObject {
return spill_type() == SpillType::kSpillOperand;
}
bool HasSpillRange() const { return spill_type() == SpillType::kSpillRange; }
AllocatedOperand GetSpillRangeOperand() const;
void SpillAtDefinition(Zone* zone, int gap_index,
InstructionOperand* operand);
void SetSpillOperand(InstructionOperand* operand);
void SetSpillRange(SpillRange* spill_range);
void CommitSpillOperand(AllocatedOperand* operand);
void CommitSpillsAtDefinition(InstructionSequence* sequence,
InstructionOperand* operand,
const InstructionOperand& operand,
bool might_be_duplicated);
void SetSpillStartIndex(int start) {
@ -416,7 +414,7 @@ class LiveRange final : public ZoneObject {
void Verify() const;
void ConvertUsesToOperand(const InstructionOperand& op,
InstructionOperand* spill_op);
const InstructionOperand& spill_op);
void SetUseHints(int register_index);
void UnsetUseHints() { SetUseHints(kUnassignedRegister); }
@ -437,9 +435,9 @@ class LiveRange final : public ZoneObject {
typedef BitField<bool, 1, 1> HasSlotUseField;
typedef BitField<bool, 2, 1> IsPhiField;
typedef BitField<bool, 3, 1> IsNonLoopPhiField;
typedef BitField<RegisterKind, 4, 2> RegisterKindField;
typedef BitField<SpillType, 6, 2> SpillTypeField;
typedef BitField<int32_t, 8, 6> AssignedRegisterField;
typedef BitField<SpillType, 4, 2> SpillTypeField;
typedef BitField<int32_t, 6, 6> AssignedRegisterField;
typedef BitField<MachineType, 12, 15> MachineTypeField;
int id_;
int spill_start_index_;
@ -468,13 +466,23 @@ class LiveRange final : public ZoneObject {
class SpillRange final : public ZoneObject {
public:
static const int kUnassignedSlot = -1;
SpillRange(LiveRange* range, Zone* zone);
UseInterval* interval() const { return use_interval_; }
RegisterKind kind() const { return live_ranges_[0]->kind(); }
// Currently, only 4 or 8 byte slots are supported.
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
void SetOperand(AllocatedOperand* op);
void set_assigned_slot(int index) {
DCHECK_EQ(kUnassignedSlot, assigned_slot_);
assigned_slot_ = index;
}
int assigned_slot() {
DCHECK_NE(kUnassignedSlot, assigned_slot_);
return assigned_slot_;
}
private:
LifetimePosition End() const { return end_position_; }
@ -486,6 +494,7 @@ class SpillRange final : public ZoneObject {
ZoneVector<LiveRange*> live_ranges_;
UseInterval* use_interval_;
LifetimePosition end_position_;
int assigned_slot_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
@ -549,7 +558,12 @@ class RegisterAllocationData final : public ZoneObject {
const char* debug_name() const { return debug_name_; }
const RegisterConfiguration* config() const { return config_; }
MachineType MachineTypeFor(int virtual_register);
LiveRange* LiveRangeFor(int index);
// Creates a new live range.
LiveRange* NewLiveRange(int index, MachineType machine_type);
LiveRange* NewChildRangeFor(LiveRange* range);
SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
@ -563,9 +577,6 @@ class RegisterAllocationData final : public ZoneObject {
bool ExistsUseWithoutDefinition();
// Creates a new live range.
LiveRange* NewLiveRange(int index);
void MarkAllocated(RegisterKind kind, int index);
PhiMapValue* InitializePhiMap(const InstructionBlock* block,
@ -586,6 +597,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<SpillRange*> spill_ranges_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
int virtual_register_count_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};
@ -664,9 +676,6 @@ class LiveRangeBuilder final : public ZoneObject {
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
// Returns the register kind required by the given virtual register.
RegisterKind RequiredRegisterKind(int virtual_register) const;
UsePosition* NewUsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type);
UsePosition* NewUsePosition(LifetimePosition pos) {

View File

@ -89,7 +89,8 @@ class InterpreterState {
if (key.is_constant) {
return ConstantOperand(key.index);
}
return AllocatedOperand(key.kind, key.index);
return AllocatedOperand(
key.kind, InstructionSequence::DefaultRepresentation(), key.index);
}
friend std::ostream& operator<<(std::ostream& os,
@ -148,7 +149,7 @@ class ParallelMoveCreator : public HandleAndZoneScope {
ParallelMove* Create(int size) {
ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
std::set<InstructionOperand> seen;
std::set<InstructionOperand, CompareOperandModuloType> seen;
for (int i = 0; i < size; ++i) {
MoveOperands mo(CreateRandomOperand(true), CreateRandomOperand(false));
if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
@ -160,18 +161,38 @@ class ParallelMoveCreator : public HandleAndZoneScope {
}
private:
MachineType RandomType() {
int index = rng_->NextInt(3);
switch (index) {
case 0:
return kRepWord32;
case 1:
return kRepWord64;
case 2:
return kRepTagged;
}
UNREACHABLE();
return kMachNone;
}
MachineType RandomDoubleType() {
int index = rng_->NextInt(2);
if (index == 0) return kRepFloat64;
return kRepFloat32;
}
InstructionOperand CreateRandomOperand(bool is_source) {
int index = rng_->NextInt(6);
// destination can't be Constant.
switch (rng_->NextInt(is_source ? 5 : 4)) {
case 0:
return StackSlotOperand(index);
return StackSlotOperand(RandomType(), index);
case 1:
return DoubleStackSlotOperand(index);
return DoubleStackSlotOperand(RandomDoubleType(), index);
case 2:
return RegisterOperand(index);
return RegisterOperand(RandomType(), index);
case 3:
return DoubleRegisterOperand(index);
return DoubleRegisterOperand(RandomDoubleType(), index);
case 4:
return ConstantOperand(index);
}

View File

@ -263,8 +263,8 @@ TEST(InstructionAddGapMove) {
CHECK(move);
CHECK_EQ(1u, move->size());
MoveOperands* cur = move->at(0);
CHECK(op1 == cur->source());
CHECK(op2 == cur->destination());
CHECK(op1.Equals(cur->source()));
CHECK(op2.Equals(cur->destination()));
}
}
@ -308,15 +308,15 @@ TEST(InstructionOperands) {
CHECK(k == m->TempCount());
for (size_t z = 0; z < i; z++) {
CHECK(outputs[z] == *m->OutputAt(z));
CHECK(outputs[z].Equals(*m->OutputAt(z)));
}
for (size_t z = 0; z < j; z++) {
CHECK(inputs[z] == *m->InputAt(z));
CHECK(inputs[z].Equals(*m->InputAt(z)));
}
for (size_t z = 0; z < k; z++) {
CHECK(temps[z] == *m->TempAt(z));
CHECK(temps[z].Equals(*m->TempAt(z)));
}
}
}

View File

@ -59,13 +59,14 @@ class TestCode : public HandleAndZoneScope {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, RegisterOperand(13), RegisterOperand(13));
AddGapMove(index, RegisterOperand(kRepWord32, 13),
RegisterOperand(kRepWord32, 13));
}
void NonRedundantMoves() {
Start();
sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
int index = static_cast<int>(sequence_.instructions().size()) - 1;
AddGapMove(index, ConstantOperand(11), RegisterOperand(11));
AddGapMove(index, ConstantOperand(11), RegisterOperand(kRepWord32, 11));
}
void Other() {
Start();

View File

@ -96,12 +96,12 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
}
for (auto i : s.virtual_registers_) {
int const virtual_register = i.second;
if (sequence.IsDouble(virtual_register)) {
if (sequence.IsFloat(virtual_register)) {
EXPECT_FALSE(sequence.IsReference(virtual_register));
s.doubles_.insert(virtual_register);
}
if (sequence.IsReference(virtual_register)) {
EXPECT_FALSE(sequence.IsDouble(virtual_register));
EXPECT_FALSE(sequence.IsFloat(virtual_register));
s.references_.insert(virtual_register);
}
}

View File

@ -33,7 +33,7 @@ class MoveOptimizerTest : public InstructionSequenceTest {
auto to = ConvertMoveArg(to_op);
for (auto move : *moves) {
if (move->IsRedundant()) continue;
if (move->source() == from && move->destination() == to) {
if (move->source().Equals(from) && move->destination().Equals(to)) {
return true;
}
}
@ -67,10 +67,10 @@ class MoveOptimizerTest : public InstructionSequenceTest {
case kConstant:
return ConstantOperand(op.value_);
case kFixedSlot:
return StackSlotOperand(op.value_);
return StackSlotOperand(kRepWord32, op.value_);
case kFixedRegister:
CHECK(0 <= op.value_ && op.value_ < num_general_registers());
return RegisterOperand(op.value_);
return RegisterOperand(kRepWord32, op.value_);
default:
break;
}