[turbofan] optimize spills in defered blocks

Up to now, for ranges spilled in deferred blocks, we would spill every
time a range would switch from using a register to spill slots. That can
be redundant, leading to avoidable code size  cost.

This change addresses this issue, by performing the spills as early as
possible.

BUG=

Review URL: https://codereview.chromium.org/1551013002

Cr-Commit-Position: refs/heads/master@{#33413}
This commit is contained in:
mtrofin 2016-01-20 08:31:15 -08:00 committed by Commit bot
parent a910cb4006
commit 7c54dc3385
3 changed files with 301 additions and 220 deletions

View File

@ -119,6 +119,7 @@ void LiveRangeSeparator::Splinter() {
void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() { void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
const InstructionSequence *code = data()->code();
for (TopLevelLiveRange *top : data()->live_ranges()) { for (TopLevelLiveRange *top : data()->live_ranges()) {
if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr || if (top == nullptr || top->IsEmpty() || top->splinter() == nullptr ||
top->HasSpillOperand() || !top->splinter()->HasSpillRange()) { top->HasSpillOperand() || !top->splinter()->HasSpillRange()) {
@ -132,7 +133,10 @@ void LiveRangeMerger::MarkRangesSpilledInDeferredBlocks() {
break; break;
} }
} }
if (child == nullptr) top->MarkSpilledInDeferredBlock(); if (child == nullptr) {
top->TreatAsSpilledInDeferredBlock(data()->allocation_zone(),
code->InstructionBlockCount());
}
} }
} }

View File

@ -113,6 +113,165 @@ int GetByteWidth(MachineRepresentation rep) {
} // namespace } // namespace
class LiveRangeBound {
public:
explicit LiveRangeBound(LiveRange* range, bool skip)
: range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
bool CanCover(LifetimePosition position) {
return start_ <= position && position < end_;
}
LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
struct FindResult {
LiveRange* cur_cover_;
LiveRange* pred_cover_;
};
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, TopLevelLiveRange* range) {
length_ = range->GetChildCount();
start_ = zone->NewArray<LiveRangeBound>(length_);
LiveRangeBound* curr = start_;
// Normally, spilled ranges do not need connecting moves, because the spill
// location has been assigned at definition. For ranges spilled in deferred
// blocks, that is not the case, so we need to connect the spilled children.
for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
new (curr) LiveRangeBound(i, i->spilled());
}
}
LiveRangeBound* Find(const LifetimePosition position) const {
size_t left_index = 0;
size_t right_index = length_;
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
left_index = current_index;
} else {
right_index = current_index;
}
}
}
LiveRangeBound* FindPred(const InstructionBlock* pred) {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
}
bool FindConnectableSubranges(const InstructionBlock* block,
const InstructionBlock* pred,
FindResult* result) const {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
if (bound->CanCover(cur_start)) {
// Both blocks are covered by the same range, so there is nothing to
// connect.
return false;
}
bound = Find(cur_start);
if (bound->skip_) {
return false;
}
result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
return (result->cur_cover_ != result->pred_cover_);
}
private:
size_t length_;
LiveRangeBound* start_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
: data_(data),
bounds_length_(static_cast<int>(data_->live_ranges().size())),
bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
zone_(zone) {
for (int i = 0; i < bounds_length_; ++i) {
new (&bounds_[i]) LiveRangeBoundArray();
}
}
LiveRangeBoundArray* ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
return array;
}
private:
const RegisterAllocationData* const data_;
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
struct DelayedInsertionMapCompare {
bool operator()(const DelayedInsertionMapKey& a,
const DelayedInsertionMapKey& b) const {
if (a.first == b.first) {
return a.second.Compare(b.second);
}
return a.first < b.first;
}
};
typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
DelayedInsertionMapCompare> DelayedInsertionMap;
UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand, UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
void* hint, UsePositionHintType hint_type) void* hint, UsePositionHintType hint_type)
@ -734,51 +893,13 @@ void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
gap_index, operand, spill_move_insertion_locations_); gap_index, operand, spill_move_insertion_locations_);
} }
bool TopLevelLiveRange::TryCommitSpillInDeferredBlock(
InstructionSequence* code, const InstructionOperand& spill_operand) {
if (!IsSpilledOnlyInDeferredBlocks()) return false;
TRACE("Live Range %d will be spilled only in deferred blocks.\n", vreg());
// If we have ranges that aren't spilled but require the operand on the stack,
// make sure we insert the spill.
for (const LiveRange* child = this; child != nullptr; child = child->next()) {
if (!child->spilled() &&
child->NextSlotPosition(child->Start()) != nullptr) {
Instruction* instr =
code->InstructionAt(child->Start().ToInstructionIndex());
// Insert spill at the end to let live range connections happen at START.
ParallelMove* move =
instr->GetOrCreateParallelMove(Instruction::END, code->zone());
InstructionOperand assigned = child->GetAssignedOperand();
if (TopLevel()->has_slot_use()) {
bool found = false;
for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(assigned) &&
move_op->destination().Equals(spill_operand)) {
found = true;
break;
}
}
if (found) continue;
}
move->AddMove(assigned, spill_operand);
}
}
return true;
}
void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence, void TopLevelLiveRange::CommitSpillMoves(InstructionSequence* sequence,
const InstructionOperand& op, const InstructionOperand& op,
bool might_be_duplicated) { bool might_be_duplicated) {
DCHECK_IMPLIES(op.IsConstant(), spill_move_insertion_locations() == nullptr); DCHECK_IMPLIES(op.IsConstant(), GetSpillMoveInsertionLocations() == nullptr);
Zone* zone = sequence->zone(); Zone* zone = sequence->zone();
for (SpillMoveInsertionList* to_spill = spill_move_insertion_locations(); for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations();
to_spill != nullptr; to_spill = to_spill->next) { to_spill != nullptr; to_spill = to_spill->next) {
Instruction* instr = sequence->InstructionAt(to_spill->gap_index); Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
ParallelMove* move = ParallelMove* move =
@ -2965,7 +3086,7 @@ void SpillSlotLocator::LocateSpillSlots() {
} }
} else { } else {
TopLevelLiveRange::SpillMoveInsertionList* spills = TopLevelLiveRange::SpillMoveInsertionList* spills =
range->spill_move_insertion_locations(); range->GetSpillMoveInsertionLocations();
DCHECK_NOT_NULL(spills); DCHECK_NOT_NULL(spills);
for (; spills != nullptr; spills = spills->next) { for (; spills != nullptr; spills = spills->next) {
code->GetInstructionBlock(spills->gap_index)->mark_needs_frame(); code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
@ -3032,12 +3153,10 @@ void OperandAssigner::CommitAssignment() {
// connecting move when a successor child range is spilled - because the // connecting move when a successor child range is spilled - because the
// spilled range picks up its value from the slot which was assigned at // spilled range picks up its value from the slot which was assigned at
// definition. For ranges that are determined to spill only in deferred // definition. For ranges that are determined to spill only in deferred
// blocks, we let ConnectLiveRanges and ResolveControlFlow insert such // blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
// moves between ranges. Because of how the ranges are split around // where a spill operand is expected, and then finalize by inserting the
// deferred blocks, this amounts to spilling and filling inside such // spills in the deferred blocks dominators.
// blocks. if (!top_range->IsSpilledOnlyInDeferredBlocks()) {
if (!top_range->TryCommitSpillInDeferredBlock(data()->code(),
spill_operand)) {
// Spill at definition if the range isn't spilled only in deferred // Spill at definition if the range isn't spilled only in deferred
// blocks. // blocks.
top_range->CommitSpillMoves( top_range->CommitSpillMoves(
@ -3188,171 +3307,6 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
} }
namespace {
class LiveRangeBound {
public:
explicit LiveRangeBound(const LiveRange* range, bool skip)
: range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
bool CanCover(LifetimePosition position) {
return start_ <= position && position < end_;
}
const LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
struct FindResult {
const LiveRange* cur_cover_;
const LiveRange* pred_cover_;
};
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, const TopLevelLiveRange* const range) {
length_ = range->GetChildCount();
start_ = zone->NewArray<LiveRangeBound>(length_);
LiveRangeBound* curr = start_;
// Normally, spilled ranges do not need connecting moves, because the spill
// location has been assigned at definition. For ranges spilled in deferred
// blocks, that is not the case, so we need to connect the spilled children.
bool spilled_in_blocks = range->IsSpilledOnlyInDeferredBlocks();
for (const LiveRange *i = range; i != nullptr; i = i->next(), ++curr) {
new (curr) LiveRangeBound(i, !spilled_in_blocks && i->spilled());
}
}
LiveRangeBound* Find(const LifetimePosition position) const {
size_t left_index = 0;
size_t right_index = length_;
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
left_index = current_index;
} else {
right_index = current_index;
}
}
}
LiveRangeBound* FindPred(const InstructionBlock* pred) {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
}
bool FindConnectableSubranges(const InstructionBlock* block,
const InstructionBlock* pred,
FindResult* result) const {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
if (bound->CanCover(cur_start)) {
// Both blocks are covered by the same range, so there is nothing to
// connect.
return false;
}
bound = Find(cur_start);
if (bound->skip_) {
return false;
}
result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
return (result->cur_cover_ != result->pred_cover_);
}
private:
size_t length_;
LiveRangeBound* start_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const RegisterAllocationData* data, Zone* zone)
: data_(data),
bounds_length_(static_cast<int>(data_->live_ranges().size())),
bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
zone_(zone) {
for (int i = 0; i < bounds_length_; ++i) {
new (&bounds_[i]) LiveRangeBoundArray();
}
}
LiveRangeBoundArray* ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
return array;
}
private:
const RegisterAllocationData* const data_;
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
typedef std::pair<ParallelMove*, InstructionOperand> DelayedInsertionMapKey;
struct DelayedInsertionMapCompare {
bool operator()(const DelayedInsertionMapKey& a,
const DelayedInsertionMapKey& b) const {
if (a.first == b.first) {
return a.second.Compare(b.second);
}
return a.first < b.first;
}
};
typedef ZoneMap<DelayedInsertionMapKey, InstructionOperand,
DelayedInsertionMapCompare> DelayedInsertionMap;
} // namespace
LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data) LiveRangeConnector::LiveRangeConnector(RegisterAllocationData* data)
: data_(data) {} : data_(data) {}
@ -3383,6 +3337,18 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand(); InstructionOperand pred_op = result.pred_cover_->GetAssignedOperand();
InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand(); InstructionOperand cur_op = result.cur_cover_->GetAssignedOperand();
if (pred_op.Equals(cur_op)) continue; if (pred_op.Equals(cur_op)) continue;
if (!pred_op.IsAnyRegister() && cur_op.IsAnyRegister()) {
// We're doing a reload.
const LiveRange* current = result.cur_cover_;
if (current->TopLevel()->IsSpilledOnlyInDeferredBlocks() &&
pred_block->IsDeferred()) {
// The spill location should be defined in pred_block, so add
// pred_block to the list of blocks requiring a spill operand.
current->TopLevel()->GetListOfBlocksRequiringSpillOperands()->Add(
pred_block->rpo_number().ToInt());
}
}
int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op); int move_loc = ResolveControlFlow(block, cur_op, pred_block, pred_op);
USE(move_loc); USE(move_loc);
DCHECK_IMPLIES( DCHECK_IMPLIES(
@ -3393,6 +3359,16 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
iterator.Advance(); iterator.Advance();
} }
} }
// At this stage, we collected blocks needing a spill operand from
// ConnectRanges and from ResolveControlFlow. Time to commit the spills for
// deferred blocks.
for (TopLevelLiveRange* top : data()->live_ranges()) {
if (top == nullptr || top->IsEmpty() ||
!top->IsSpilledOnlyInDeferredBlocks())
continue;
CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
}
} }
@ -3430,7 +3406,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
LifetimePosition pos = second_range->Start(); LifetimePosition pos = second_range->Start();
// Add gap move if the two live ranges touch and there is no block // Add gap move if the two live ranges touch and there is no block
// boundary. // boundary.
if (!connect_spilled && second_range->spilled()) continue; if (second_range->spilled()) continue;
if (first_range->End() != pos) continue; if (first_range->End() != pos) continue;
if (data()->IsBlockBoundary(pos) && if (data()->IsBlockBoundary(pos) &&
!CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) { !CanEagerlyResolveControlFlow(GetInstructionBlock(code(), pos))) {
@ -3442,6 +3418,16 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
bool delay_insertion = false; bool delay_insertion = false;
Instruction::GapPosition gap_pos; Instruction::GapPosition gap_pos;
int gap_index = pos.ToInstructionIndex(); int gap_index = pos.ToInstructionIndex();
if (connect_spilled && !prev_operand.IsAnyRegister() &&
cur_operand.IsAnyRegister()) {
const InstructionBlock* block = code()->GetInstructionBlock(gap_index);
DCHECK(block->IsDeferred());
// Performing a reload in this block, meaning the spill operand must
// be defined here.
top_range->GetListOfBlocksRequiringSpillOperands()->Add(
block->rpo_number().ToInt());
}
if (pos.IsGapPosition()) { if (pos.IsGapPosition()) {
gap_pos = pos.IsStart() ? Instruction::START : Instruction::END; gap_pos = pos.IsStart() ? Instruction::START : Instruction::END;
} else { } else {
@ -3452,7 +3438,7 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
} }
gap_pos = delay_insertion ? Instruction::END : Instruction::START; gap_pos = delay_insertion ? Instruction::END : Instruction::START;
} }
// Fills or spills for spilled in deferred blocks ranges must happen // Reloads or spills for spilled in deferred blocks ranges must happen
// only in deferred blocks. // only in deferred blocks.
DCHECK_IMPLIES( DCHECK_IMPLIES(
connect_spilled && connect_spilled &&
@ -3503,6 +3489,73 @@ void LiveRangeConnector::ConnectRanges(Zone* local_zone) {
} }
void LiveRangeConnector::CommitSpillsInDeferredBlocks(
TopLevelLiveRange* range, LiveRangeBoundArray* array, Zone* temp_zone) {
DCHECK(range->IsSpilledOnlyInDeferredBlocks());
DCHECK(!range->spilled());
InstructionSequence* code = data()->code();
InstructionOperand spill_operand = range->GetSpillRangeOperand();
TRACE("Live Range %d will be spilled only in deferred blocks.\n",
range->vreg());
// If we have ranges that aren't spilled but require the operand on the stack,
// make sure we insert the spill.
for (const LiveRange* child = range; child != nullptr;
child = child->next()) {
for (const UsePosition* pos = child->first_pos(); pos != nullptr;
pos = pos->next()) {
if (pos->type() != UsePositionType::kRequiresSlot && !child->spilled())
continue;
range->AddBlockRequiringSpillOperand(
code->GetInstructionBlock(pos->pos().ToInstructionIndex())
->rpo_number());
}
}
ZoneQueue<int> worklist(temp_zone);
for (BitVector::Iterator iterator(
range->GetListOfBlocksRequiringSpillOperands());
!iterator.Done(); iterator.Advance()) {
worklist.push(iterator.Current());
}
// Seek the deferred blocks that dominate locations requiring spill operands,
// and spill there. We only need to spill at the start of such blocks.
BitVector done_blocks(
range->GetListOfBlocksRequiringSpillOperands()->length(), temp_zone);
while (!worklist.empty()) {
int block_id = worklist.front();
worklist.pop();
if (done_blocks.Contains(block_id)) continue;
done_blocks.Add(block_id);
const InstructionBlock* spill_block =
code->InstructionBlockAt(RpoNumber::FromInt(block_id));
for (const RpoNumber& pred : spill_block->predecessors()) {
const InstructionBlock* pred_block = code->InstructionBlockAt(pred);
if (pred_block->IsDeferred()) {
worklist.push(pred_block->rpo_number().ToInt());
} else {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred_block->last_instruction_index());
LiveRangeBound* bound = array->Find(pred_end);
InstructionOperand pred_op = bound->range_->GetAssignedOperand();
data()->AddGapMove(spill_block->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
spill_operand);
}
}
}
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8

View File

@ -579,14 +579,17 @@ class TopLevelLiveRange final : public LiveRange {
// and instead let the LiveRangeConnector perform the spills within the // and instead let the LiveRangeConnector perform the spills within the
// deferred blocks. If so, we insert here spills for non-spilled ranges // deferred blocks. If so, we insert here spills for non-spilled ranges
// with slot use positions. // with slot use positions.
void MarkSpilledInDeferredBlock() { void TreatAsSpilledInDeferredBlock(Zone* zone, int total_block_count) {
spill_start_index_ = -1; spill_start_index_ = -1;
spilled_in_deferred_blocks_ = true; spilled_in_deferred_blocks_ = true;
spill_move_insertion_locations_ = nullptr; spill_move_insertion_locations_ = nullptr;
list_of_blocks_requiring_spill_operands_ =
new (zone) BitVector(total_block_count, zone);
} }
bool TryCommitSpillInDeferredBlock(InstructionSequence* code, void CommitSpillInDeferredBlocks(RegisterAllocationData* data,
const InstructionOperand& spill_operand); const InstructionOperand& spill_operand,
BitVector* necessary_spill_points);
TopLevelLiveRange* splintered_from() const { return splintered_from_; } TopLevelLiveRange* splintered_from() const { return splintered_from_; }
bool IsSplinter() const { return splintered_from_ != nullptr; } bool IsSplinter() const { return splintered_from_ != nullptr; }
@ -617,7 +620,8 @@ class TopLevelLiveRange final : public LiveRange {
struct SpillMoveInsertionList; struct SpillMoveInsertionList;
SpillMoveInsertionList* spill_move_insertion_locations() const { SpillMoveInsertionList* GetSpillMoveInsertionLocations() const {
DCHECK(!IsSpilledOnlyInDeferredBlocks());
return spill_move_insertion_locations_; return spill_move_insertion_locations_;
} }
TopLevelLiveRange* splinter() const { return splinter_; } TopLevelLiveRange* splinter() const { return splinter_; }
@ -634,6 +638,16 @@ class TopLevelLiveRange final : public LiveRange {
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; } void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; } bool has_preassigned_slot() const { return has_preassigned_slot_; }
void AddBlockRequiringSpillOperand(RpoNumber block_id) {
DCHECK(IsSpilledOnlyInDeferredBlocks());
GetListOfBlocksRequiringSpillOperands()->Add(block_id.ToInt());
}
BitVector* GetListOfBlocksRequiringSpillOperands() const {
DCHECK(IsSpilledOnlyInDeferredBlocks());
return list_of_blocks_requiring_spill_operands_;
}
private: private:
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent); void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
@ -650,7 +664,12 @@ class TopLevelLiveRange final : public LiveRange {
InstructionOperand* spill_operand_; InstructionOperand* spill_operand_;
SpillRange* spill_range_; SpillRange* spill_range_;
}; };
union {
SpillMoveInsertionList* spill_move_insertion_locations_; SpillMoveInsertionList* spill_move_insertion_locations_;
BitVector* list_of_blocks_requiring_spill_operands_;
};
// TODO(mtrofin): generalize spilling after definition, currently specialized // TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block. // just for spill in a single deferred block.
bool spilled_in_deferred_blocks_; bool spilled_in_deferred_blocks_;
@ -1125,6 +1144,7 @@ class ReferenceMapPopulator final : public ZoneObject {
}; };
class LiveRangeBoundArray;
// Insert moves of the form // Insert moves of the form
// //
// Operand(child_(k+1)) = Operand(child_k) // Operand(child_(k+1)) = Operand(child_k)
@ -1157,6 +1177,10 @@ class LiveRangeConnector final : public ZoneObject {
const InstructionBlock* pred, const InstructionBlock* pred,
const InstructionOperand& pred_op); const InstructionOperand& pred_op);
void CommitSpillsInDeferredBlocks(TopLevelLiveRange* range,
LiveRangeBoundArray* array,
Zone* temp_zone);
RegisterAllocationData* const data_; RegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector); DISALLOW_COPY_AND_ASSIGN(LiveRangeConnector);