[Turbofan] Merge SpillRanges by byte_width rather than kind.

- Uses byte_width() to determine if spill ranges can be merged.
- Modifies InstructionOperand canonicalization to ignore representation for stack slots.

LOG=N
BUG=v8:4124

Review-Url: https://codereview.chromium.org/2074323002
Cr-Commit-Position: refs/heads/master@{#37463}
This commit is contained in:
bbudge 2016-06-30 17:56:00 -07:00 committed by Commit bot
parent 2907c726b2
commit 55841409c0
4 changed files with 11 additions and 17 deletions

View File

@ -581,9 +581,9 @@ void GraphC1Visualizer::PrintLiveRange(const LiveRange* range, const char* type,
<< "\"";
} else {
index = AllocatedOperand::cast(top->GetSpillOperand())->index();
if (top->kind() == FP_REGISTERS) {
os_ << " \"double_stack:" << index << "\"";
} else if (top->kind() == GENERAL_REGISTERS) {
if (IsFloatingPoint(top->representation())) {
os_ << " \"fp_stack:" << index << "\"";
} else {
os_ << " \"stack:" << index << "\"";
}
}

View File

@ -605,16 +605,15 @@ bool InstructionOperand::IsSimd128StackSlot() const {
uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAllocated() || IsExplicit()) {
MachineRepresentation rep = LocationOperand::cast(this)->representation();
MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFloatingPoint(rep)) {
if (IsFPRegister()) {
if (kSimpleFPAliasing) {
// Archs with simple aliasing can treat all FP operands the same.
// We treat all FP register operands the same for simple aliasing.
canonical = MachineRepresentation::kFloat64;
} else {
// We need to distinguish FP operands of different reps when FP
// We need to distinguish FP register operands of different reps when
// aliasing is not simple (e.g. ARM).
canonical = rep;
canonical = LocationOperand::cast(this)->representation();
}
}
return InstructionOperand::KindField::update(

View File

@ -1191,12 +1191,10 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
SpillRange::SpillRange(TopLevelLiveRange* parent, Zone* zone)
: live_ranges_(zone),
assigned_slot_(kUnassignedSlot),
byte_width_(GetByteWidth(parent->representation())),
kind_(parent->kind()) {
byte_width_(GetByteWidth(parent->representation())) {
// Spill ranges are created for top level, non-splintered ranges. This is so
// that, when merging decisions are made, we consider the full extent of the
// virtual register, and avoid clobbering it.
@ -1235,11 +1233,8 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
bool SpillRange::TryMerge(SpillRange* other) {
if (HasSlot() || other->HasSlot()) return false;
// TODO(dcarney): byte widths should be compared here not kinds.
if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
IsIntersectingWith(other)) {
if (byte_width() != other->byte_width() || IsIntersectingWith(other))
return false;
}
LifetimePosition max = LifetimePosition::MaxPosition();
if (End() < other->End() && other->End() != max) {

View File

@ -695,8 +695,9 @@ class SpillRange final : public ZoneObject {
return live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& live_ranges() { return live_ranges_; }
// Currently, only 4 or 8 byte slots are supported in stack frames.
// TODO(bbudge) Add 16 byte slots for SIMD.
int byte_width() const { return byte_width_; }
RegisterKind kind() const { return kind_; }
void Print() const;
private:
@ -710,7 +711,6 @@ class SpillRange final : public ZoneObject {
LifetimePosition end_position_;
int assigned_slot_;
int byte_width_;
RegisterKind kind_;
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};