[turbofan] regalloc: model context and function mark as reg-defined.

If we model them as memory operands ("SpillOperands"), as we
currently do, they are treated by the register allocator as being defined
in memory, so spilling them up to the first use requiring them in a
register is free.

That's not the case for context and function marker. They come in
registers, and the frame construction also pushes them on the stack.
This conflicts with the goals of frame elision: the allocator should avoid
eagerly spilling them, which would force a frame construction; also,
their not being spilled, should frame elision succeed for the first block,
means modeling them as spill operands incorrect.

The natural choice would be to fully decouple their spilling from frame
construction, and let the register allocator spill them. That means they
need to be presented to the register allocator as vanilla live ranges,
with pre-assigned spill slots.

The main challenge there is that not all instructions (mainly, stack checks) list their dependency on these ranges being spilled. In this
change, we change the model but leave the frame construction as-is.
This has the benefit that it unblocks frame elision, but has the drawback
that we may see double spills in the case where these live ranges spill
only in deferred blocks. I plan to enable frame elision next, after which
tackle this issue with spilling.

BUG= v8:4533
LOG=N

Review URL: https://codereview.chromium.org/1501363002

Cr-Commit-Position: refs/heads/master@{#32775}
This commit is contained in:
mtrofin 2015-12-10 18:43:50 -08:00 committed by Commit bot
parent 8b968b70e9
commit 0b1261439b
2 changed files with 29 additions and 13 deletions

View File

@ -767,7 +767,9 @@ void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
}
if (found) continue;
}
move->AddMove(*to_spill->operand, op);
if (!has_preassigned_slot()) {
move->AddMove(*to_spill->operand, op);
}
}
}
@ -1126,6 +1128,7 @@ bool SpillRange::IsIntersectingWith(SpillRange* other) const {
bool SpillRange::TryMerge(SpillRange* other) {
if (HasSlot() || other->HasSlot()) return false;
// TODO(dcarney): byte widths should be compared here not kinds.
if (live_ranges_[0]->kind() != other->live_ranges_[0]->kind() ||
IsIntersectingWith(other)) {
@ -1227,7 +1230,8 @@ RegisterAllocationData::RegisterAllocationData(
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
assigned_double_registers_(nullptr),
virtual_register_count_(code->VirtualRegisterCount()) {
virtual_register_count_(code->VirtualRegisterCount()),
preassigned_slot_ranges_(zone) {
DCHECK(this->config()->num_general_registers() <=
RegisterConfiguration::kMaxGeneralRegisters);
DCHECK(this->config()->num_double_registers() <=
@ -1620,14 +1624,8 @@ void ConstraintBuilder::MeetConstraintsAfter(int instr_index) {
bool is_tagged = code()->IsReference(output_vreg);
if (first_output->HasSecondaryStorage()) {
range->MarkHasPreassignedSlot();
InstructionOperand* spill_op = AllocatedOperand::New(
data()->code_zone(), LocationOperand::LocationKind::STACK_SLOT,
range->representation(), first_output->GetSecondaryStorage());
range->RecordSpillLocation(allocation_zone(), instr_index + 1,
first_output);
range->SetSpillOperand(spill_op);
range->SetSpillStartIndex(instr_index + 1);
assigned = true;
data()->preassigned_slot_ranges().push_back(
std::make_pair(range, first_output->GetSecondaryStorage()));
}
AllocateFixed(first_output, instr_index, is_tagged);
@ -2159,6 +2157,14 @@ void LiveRangeBuilder::BuildLiveRanges() {
}
}
}
for (auto preassigned : data()->preassigned_slot_ranges()) {
TopLevelLiveRange* range = preassigned.first;
int slot_id = preassigned.second;
SpillRange* spill = range->HasSpillRange()
? range->GetSpillRange()
: data()->AssignSpillRangeToLiveRange(range);
spill->set_assigned_slot(slot_id);
}
#ifdef DEBUG
Verify();
#endif
@ -2978,9 +2984,11 @@ void OperandAssigner::AssignSpillSlots() {
for (SpillRange* range : spill_ranges) {
if (range == nullptr || range->IsEmpty()) continue;
// Allocate a new operand referring to the spill slot.
int byte_width = range->ByteWidth();
int index = data()->frame()->AllocateSpillSlot(byte_width);
range->set_assigned_slot(index);
if (!range->HasSlot()) {
int byte_width = range->ByteWidth();
int index = data()->frame()->AllocateSpillSlot(byte_width);
range->set_assigned_slot(index);
}
}
}

View File

@ -672,6 +672,7 @@ class SpillRange final : public ZoneObject {
int ByteWidth() const;
bool IsEmpty() const { return live_ranges_.empty(); }
bool TryMerge(SpillRange* other);
bool HasSlot() const { return assigned_slot_ != kUnassignedSlot; }
void set_assigned_slot(int index) {
DCHECK_EQ(kUnassignedSlot, assigned_slot_);
@ -738,6 +739,8 @@ class RegisterAllocationData final : public ZoneObject {
InstructionOperand* operand;
};
typedef ZoneVector<DelayedReference> DelayedReferences;
typedef ZoneVector<std::pair<TopLevelLiveRange*, int>>
RangesWithPreassignedSlots;
RegisterAllocationData(const RegisterConfiguration* config,
Zone* allocation_zone, Frame* frame,
@ -804,6 +807,10 @@ class RegisterAllocationData final : public ZoneObject {
PhiMapValue* GetPhiMapValueFor(int virtual_register);
bool IsBlockBoundary(LifetimePosition pos) const;
RangesWithPreassignedSlots& preassigned_slot_ranges() {
return preassigned_slot_ranges_;
}
void Print(const InstructionSequence* instructionSequence);
void Print(const Instruction* instruction);
void Print(const LiveRange* range, bool with_children = false);
@ -832,6 +839,7 @@ class RegisterAllocationData final : public ZoneObject {
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
int virtual_register_count_;
RangesWithPreassignedSlots preassigned_slot_ranges_;
DISALLOW_COPY_AND_ASSIGN(RegisterAllocationData);
};