Reland "[regalloc] Place spill instructions optimally"

This is a reland of f4548e7598

Original change's description:
> [regalloc] Place spill instructions optimally
>
> Design doc:
> https://docs.google.com/document/d/1n9ADWnDI-sw0OvdSmrthf61prmDqbDmQq-NSrQw2MVI/edit?usp=sharing
>
> Most of this change follows directly what is discussed in the design
> document. A few other things are also changed:
>
> - PopulateReferenceMapsPhase is moved after ResolveControlFlowPhase so
>   that it can make use of the decision regarding whether a value is
>   spilled at its definition or later.
> - SpillSlotLocator is removed. It was already somewhat confusing,
>   because the responsibility for marking blocks as needing frames was
>   split: in some cases they were marked by SpillSlotLocator, and in
>   other cases they were marked by CommitSpillsInDeferredBlocks. With
>   this change, that split responsibility would become yet more
>   confusing if we kept SpillSlotLocator for the values that are spilled
>   at their definition, so I propose a simpler rule that whatever code
>   adds the spill move also marks the block.
> - A few class definitions (LiveRangeBound, FindResult,
>   LiveRangeBoundArray, and LiveRangeFinder) are moved without
>   modification from register-allocator.cc to register-allocator.h so
>   that we can refer to them from another cc file.
>
> Bug: v8:10606
> Change-Id: I374a3219a5de477a53bc48117e230287eae89e72
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2285390
> Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
> Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
> Reviewed-by: Thibaud Michaud <thibaudm@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#69345}

Bug: v8:10606
Change-Id: I10fc1ef4b0bebb6c9f55ebdefe33e8c1e5646f0a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2352483
Commit-Queue: Seth Brenith <seth.brenith@microsoft.com>
Reviewed-by: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: Thibaud Michaud <thibaudm@chromium.org>
Cr-Commit-Position: refs/heads/master@{#69427}
This commit is contained in:
Seth Brenith 2020-08-12 13:12:37 -07:00 committed by Commit Bot
parent f5051f02d7
commit 2f80953131
8 changed files with 913 additions and 215 deletions

View File

@ -1942,6 +1942,8 @@ v8_compiler_sources = [
"src/compiler/backend/register-allocator-verifier.h",
"src/compiler/backend/register-allocator.cc",
"src/compiler/backend/register-allocator.h",
"src/compiler/backend/spill-placer.cc",
"src/compiler/backend/spill-placer.h",
"src/compiler/backend/unwinding-info-writer.h",
"src/compiler/basic-block-instrumentor.cc",
"src/compiler/basic-block-instrumentor.h",

View File

@ -5,5 +5,6 @@ zhin@chromium.org
# Plus src/compiler owners.
per-file register-allocator*=thibaudm@chromium.org
per-file spill-placer*=thibaudm@chromium.org
# COMPONENT: Blink>JavaScript>Compiler

View File

@ -10,6 +10,7 @@
#include "src/base/small-vector.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/tick-counter.h"
#include "src/compiler/backend/spill-placer.h"
#include "src/compiler/linkage.h"
#include "src/strings/string-stream.h"
#include "src/utils/vector.h"
@ -52,145 +53,94 @@ Instruction* GetLastInstruction(InstructionSequence* code,
} // namespace
class LiveRangeBound {
public:
explicit LiveRangeBound(LiveRange* range, bool skip)
: range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
void LiveRangeBoundArray::Initialize(Zone* zone, TopLevelLiveRange* range) {
size_t max_child_count = range->GetMaxChildCount();
start_ = zone->NewArray<LiveRangeBound>(max_child_count);
length_ = 0;
LiveRangeBound* curr = start_;
// The primary loop in ResolveControlFlow is not responsible for inserting
// connecting moves for spilled ranges.
for (LiveRange* i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
new (curr) LiveRangeBound(i, i->spilled());
}
}
bool CanCover(LifetimePosition position) {
return start_ <= position && position < end_;
}
LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
struct FindResult {
LiveRange* cur_cover_;
LiveRange* pred_cover_;
};
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, TopLevelLiveRange* range) {
size_t max_child_count = range->GetMaxChildCount();
start_ = zone->NewArray<LiveRangeBound>(max_child_count);
length_ = 0;
LiveRangeBound* curr = start_;
// Normally, spilled ranges do not need connecting moves, because the spill
// location has been assigned at definition. For ranges spilled in deferred
// blocks, that is not the case, so we need to connect the spilled children.
for (LiveRange *i = range; i != nullptr; i = i->next(), ++curr, ++length_) {
new (curr) LiveRangeBound(i, i->spilled());
LiveRangeBound* LiveRangeBoundArray::Find(
const LifetimePosition position) const {
size_t left_index = 0;
size_t right_index = length_;
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
left_index = current_index;
} else {
right_index = current_index;
}
}
}
LiveRangeBound* Find(const LifetimePosition position) const {
size_t left_index = 0;
size_t right_index = length_;
while (true) {
size_t current_index = left_index + (right_index - left_index) / 2;
DCHECK(right_index > current_index);
LiveRangeBound* bound = &start_[current_index];
if (bound->start_ <= position) {
if (position < bound->end_) return bound;
DCHECK(left_index < current_index);
left_index = current_index;
} else {
right_index = current_index;
}
}
LiveRangeBound* LiveRangeBoundArray::FindPred(const InstructionBlock* pred) {
LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
return Find(pred_end);
}
LiveRangeBound* LiveRangeBoundArray::FindSucc(const InstructionBlock* succ) {
LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
}
bool LiveRangeBoundArray::FindConnectableSubranges(
const InstructionBlock* block, const InstructionBlock* pred,
FindResult* result) const {
LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
if (bound->CanCover(cur_start)) {
// Both blocks are covered by the same range, so there is nothing to
// connect.
return false;
}
LiveRangeBound* FindPred(const InstructionBlock* pred) {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
return Find(pred_end);
bound = Find(cur_start);
if (bound->skip_) {
return false;
}
result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
return (result->cur_cover_ != result->pred_cover_);
}
LiveRangeBound* FindSucc(const InstructionBlock* succ) {
LifetimePosition succ_start = LifetimePosition::GapFromInstructionIndex(
succ->first_instruction_index());
return Find(succ_start);
LiveRangeFinder::LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone)
: data_(data),
bounds_length_(static_cast<int>(data_->live_ranges().size())),
bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
zone_(zone) {
for (int i = 0; i < bounds_length_; ++i) {
new (&bounds_[i]) LiveRangeBoundArray();
}
}
bool FindConnectableSubranges(const InstructionBlock* block,
const InstructionBlock* pred,
FindResult* result) const {
LifetimePosition pred_end =
LifetimePosition::InstructionFromInstructionIndex(
pred->last_instruction_index());
LiveRangeBound* bound = Find(pred_end);
result->pred_cover_ = bound->range_;
LifetimePosition cur_start = LifetimePosition::GapFromInstructionIndex(
block->first_instruction_index());
if (bound->CanCover(cur_start)) {
// Both blocks are covered by the same range, so there is nothing to
// connect.
return false;
}
bound = Find(cur_start);
if (bound->skip_) {
return false;
}
result->cur_cover_ = bound->range_;
DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
return (result->cur_cover_ != result->pred_cover_);
LiveRangeBoundArray* LiveRangeFinder::ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
private:
size_t length_;
LiveRangeBound* start_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone)
: data_(data),
bounds_length_(static_cast<int>(data_->live_ranges().size())),
bounds_(zone->NewArray<LiveRangeBoundArray>(bounds_length_)),
zone_(zone) {
for (int i = 0; i < bounds_length_; ++i) {
new (&bounds_[i]) LiveRangeBoundArray();
}
}
LiveRangeBoundArray* ArrayFor(int operand_index) {
DCHECK(operand_index < bounds_length_);
TopLevelLiveRange* range = data_->live_ranges()[operand_index];
DCHECK(range != nullptr && !range->IsEmpty());
LiveRangeBoundArray* array = &bounds_[operand_index];
if (array->ShouldInitialize()) {
array->Initialize(zone_, range);
}
return array;
}
private:
const TopTierRegisterAllocationData* const data_;
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
return array;
}
using DelayedInsertionMapKey = std::pair<ParallelMove*, InstructionOperand>;
@ -862,7 +812,7 @@ struct TopLevelLiveRange::SpillMoveInsertionList : ZoneObject {
: gap_index(gap_index), operand(operand), next(next) {}
const int gap_index;
InstructionOperand* const operand;
SpillMoveInsertionList* const next;
SpillMoveInsertionList* next;
};
TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
@ -873,11 +823,11 @@ TopLevelLiveRange::TopLevelLiveRange(int vreg, MachineRepresentation rep)
spill_operand_(nullptr),
spill_move_insertion_locations_(nullptr),
spilled_in_deferred_blocks_(false),
has_preassigned_slot_(false),
spill_start_index_(kMaxInt),
last_pos_(nullptr),
last_child_covers_(this),
splinter_(nullptr),
has_preassigned_slot_(false) {
splinter_(nullptr) {
bits_ |= SpillTypeField::encode(SpillType::kNoSpillType);
}
@ -895,10 +845,14 @@ void TopLevelLiveRange::RecordSpillLocation(Zone* zone, int gap_index,
}
void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& op,
bool might_be_duplicated) {
const InstructionOperand& op) {
DCHECK_IMPLIES(op.IsConstant(),
GetSpillMoveInsertionLocations(data) == nullptr);
if (HasGeneralSpillRange()) {
SetLateSpillingSelected(false);
}
InstructionSequence* sequence = data->code();
Zone* zone = sequence->zone();
@ -907,10 +861,27 @@ void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
ParallelMove* move =
instr->GetOrCreateParallelMove(Instruction::START, zone);
move->AddMove(*to_spill->operand, op);
instr->block()->mark_needs_frame();
}
}
void TopLevelLiveRange::FilterSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& op) {
DCHECK_IMPLIES(op.IsConstant(),
GetSpillMoveInsertionLocations(data) == nullptr);
bool might_be_duplicated = has_slot_use() || spilled();
InstructionSequence* sequence = data->code();
SpillMoveInsertionList* previous = nullptr;
for (SpillMoveInsertionList* to_spill = GetSpillMoveInsertionLocations(data);
to_spill != nullptr; previous = to_spill, to_spill = to_spill->next) {
Instruction* instr = sequence->InstructionAt(to_spill->gap_index);
ParallelMove* move = instr->GetParallelMove(Instruction::START);
// Skip insertion if it's possible that the move exists already as a
// constraint move from a fixed output register to a slot.
if (might_be_duplicated || has_preassigned_slot()) {
bool found = false;
bool found = false;
if (move != nullptr && (might_be_duplicated || has_preassigned_slot())) {
for (MoveOperands* move_op : *move) {
if (move_op->IsEliminated()) continue;
if (move_op->source().Equals(*to_spill->operand) &&
@ -920,10 +891,17 @@ void TopLevelLiveRange::CommitSpillMoves(TopTierRegisterAllocationData* data,
break;
}
}
if (found) continue;
}
if (!has_preassigned_slot()) {
move->AddMove(*to_spill->operand, op);
if (found || has_preassigned_slot()) {
// Remove the item from the list.
if (previous == nullptr) {
spill_move_insertion_locations_ = to_spill->next;
} else {
previous->next = to_spill->next;
}
// Even though this location doesn't need a spill instruction, the
// block does require a frame.
instr->block()->mark_needs_frame();
}
}
}
@ -1786,8 +1764,10 @@ void TopTierRegisterAllocationData::MarkAllocated(MachineRepresentation rep,
bool TopTierRegisterAllocationData::IsBlockBoundary(
LifetimePosition pos) const {
return pos.IsFullStart() &&
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
pos.ToInstructionIndex();
(static_cast<size_t>(pos.ToInstructionIndex()) ==
code()->instructions().size() ||
code()->GetInstructionBlock(pos.ToInstructionIndex())->code_start() ==
pos.ToInstructionIndex());
}
ConstraintBuilder::ConstraintBuilder(TopTierRegisterAllocationData* data)
@ -4741,30 +4721,6 @@ void LinearScanAllocator::SpillBetweenUntil(LiveRange* range,
}
}
SpillSlotLocator::SpillSlotLocator(TopTierRegisterAllocationData* data)
: data_(data) {}
void SpillSlotLocator::LocateSpillSlots() {
const InstructionSequence* code = data()->code();
const size_t live_ranges_size = data()->live_ranges().size();
for (TopLevelLiveRange* range : data()->live_ranges()) {
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (range == nullptr || range->IsEmpty()) continue;
// We care only about ranges which spill in the frame.
if (!range->HasSpillRange() ||
range->IsSpilledOnlyInDeferredBlocks(data())) {
continue;
}
TopLevelLiveRange::SpillMoveInsertionList* spills =
range->GetSpillMoveInsertionLocations(data());
DCHECK_NOT_NULL(spills);
for (; spills != nullptr; spills = spills->next) {
code->GetInstructionBlock(spills->gap_index)->mark_needs_frame();
}
}
}
OperandAssigner::OperandAssigner(TopTierRegisterAllocationData* data)
: data_(data) {}
@ -4866,12 +4822,12 @@ void OperandAssigner::CommitAssignment() {
// blocks, we let ConnectLiveRanges and ResolveControlFlow find the blocks
// where a spill operand is expected, and then finalize by inserting the
// spills in the deferred blocks dominators.
if (!top_range->IsSpilledOnlyInDeferredBlocks(data())) {
// Spill at definition if the range isn't spilled only in deferred
// blocks.
top_range->CommitSpillMoves(
data(), spill_operand,
top_range->has_slot_use() || top_range->spilled());
if (!top_range->IsSpilledOnlyInDeferredBlocks(data()) &&
!top_range->HasGeneralSpillRange()) {
// Spill at definition if the range isn't spilled in a way that will be
// handled later.
top_range->FilterSpillMoves(data(), spill_operand);
top_range->CommitSpillMoves(data(), spill_operand);
}
}
}
@ -4991,7 +4947,8 @@ void ReferenceMapPopulator::PopulateReferenceMaps() {
// Check if the live range is spilled and the safe point is after
// the spill position.
int spill_index = range->IsSpilledOnlyInDeferredBlocks(data())
int spill_index = range->IsSpilledOnlyInDeferredBlocks(data()) ||
range->LateSpillingSelected()
? cur->Start().ToInstructionIndex()
: range->spill_start_index();
@ -5058,7 +5015,10 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
LifetimePosition block_end =
LifetimePosition::GapFromInstructionIndex(block->code_end());
const LiveRange* current = result.cur_cover_;
// TODO(herhut): This is not the successor if we have control flow!
// Note that this is not the successor if we have control flow!
// However, in the following condition, we only refer to it if it
// begins in the current block, in which case we can safely declare it
// to be the successor.
const LiveRange* successor = current->next();
if (current->End() < block_end &&
(successor == nullptr || successor->spilled())) {
@ -5099,17 +5059,22 @@ void LiveRangeConnector::ResolveControlFlow(Zone* local_zone) {
}
}
// At this stage, we collected blocks needing a spill operand from
// ConnectRanges and from ResolveControlFlow. Time to commit the spills for
// deferred blocks.
// At this stage, we collected blocks needing a spill operand due to reloads
// from ConnectRanges and from ResolveControlFlow. Time to commit the spills
// for deferred blocks. This is a convenient time to commit spills for general
// spill ranges also, because they need to use the LiveRangeFinder.
const size_t live_ranges_size = data()->live_ranges().size();
SpillPlacer spill_placer(&finder, data(), local_zone);
for (TopLevelLiveRange* top : data()->live_ranges()) {
CHECK_EQ(live_ranges_size,
data()->live_ranges().size()); // TODO(neis): crbug.com/831822
if (top == nullptr || top->IsEmpty() ||
!top->IsSpilledOnlyInDeferredBlocks(data()))
continue;
CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()), local_zone);
if (top == nullptr || top->IsEmpty()) continue;
if (top->IsSpilledOnlyInDeferredBlocks(data())) {
CommitSpillsInDeferredBlocks(top, finder.ArrayFor(top->vreg()),
local_zone);
} else if (top->HasGeneralSpillRange()) {
spill_placer.Add(top);
}
}
}

View File

@ -706,7 +706,7 @@ class V8_EXPORT_PRIVATE LiveRange : public NON_EXPORTED_BASE(ZoneObject) {
using RepresentationField = base::BitField<MachineRepresentation, 13, 8>;
using RecombineField = base::BitField<bool, 21, 1>;
using ControlFlowRegisterHint = base::BitField<uint8_t, 22, 6>;
// Bits 28,29 are used by TopLevelLiveRange.
// Bits 28-31 are used by TopLevelLiveRange.
// Unique among children and splinters of the same virtual register.
int relative_id_;
@ -814,6 +814,7 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
void set_is_phi(bool value) { bits_ = IsPhiField::update(bits_, value); }
bool is_non_loop_phi() const { return IsNonLoopPhiField::decode(bits_); }
bool is_loop_phi() const { return is_phi() && !is_non_loop_phi(); }
void set_is_non_loop_phi(bool value) {
bits_ = IsNonLoopPhiField::update(bits_, value);
}
@ -865,10 +866,12 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// Spill range management.
void SetSpillRange(SpillRange* spill_range);
// Encodes whether a range is also available from a memory localtion:
// Encodes whether a range is also available from a memory location:
// kNoSpillType: not availble in memory location.
// kSpillOperand: computed in a memory location at range start.
// kSpillRange: copied (spilled) to memory location at range start.
// kSpillRange: copied (spilled) to memory location at the definition,
// or at the beginning of some later blocks if
// LateSpillingSelected() is true.
// kDeferredSpillRange: copied (spilled) to memory location at entry
// to deferred blocks that have a use from memory.
//
@ -919,9 +922,13 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
spill_start_index_ = Min(start, spill_start_index_);
}
// Omits any moves from spill_move_insertion_locations_ that can be skipped.
void FilterSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& operand);
// Writes all moves from spill_move_insertion_locations_ to the schedule.
void CommitSpillMoves(TopTierRegisterAllocationData* data,
const InstructionOperand& operand,
bool might_be_duplicated);
const InstructionOperand& operand);
// If all the children of this range are spilled in deferred blocks, and if
// for any non-spilled child with a use position requiring a slot, that range
@ -1015,6 +1022,26 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
void MarkHasPreassignedSlot() { has_preassigned_slot_ = true; }
bool has_preassigned_slot() const { return has_preassigned_slot_; }
// Late spilling refers to spilling at places after the definition. These
// spills are guaranteed to cover at least all of the sub-ranges where the
// register allocator chose to evict the value from a register.
void SetLateSpillingSelected(bool late_spilling_selected) {
DCHECK(spill_type() == SpillType::kSpillRange);
SpillRangeMode new_mode = late_spilling_selected
? SpillRangeMode::kSpillLater
: SpillRangeMode::kSpillAtDefinition;
// A single TopLevelLiveRange should never be used in both modes.
DCHECK(SpillRangeModeField::decode(bits_) == SpillRangeMode::kNotSet ||
SpillRangeModeField::decode(bits_) == new_mode);
bits_ = SpillRangeModeField::update(bits_, new_mode);
}
bool LateSpillingSelected() const {
// Nobody should be reading this value until it's been decided.
DCHECK_IMPLIES(HasGeneralSpillRange(), SpillRangeModeField::decode(bits_) !=
SpillRangeMode::kNotSet);
return SpillRangeModeField::decode(bits_) == SpillRangeMode::kSpillLater;
}
void AddBlockRequiringSpillOperand(
RpoNumber block_id, const TopTierRegisterAllocationData* data) {
DCHECK(IsSpilledOnlyInDeferredBlocks(data));
@ -1031,12 +1058,21 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
friend class LiveRange;
void SetSplinteredFrom(TopLevelLiveRange* splinter_parent);
// If spill type is kSpillRange, then this value indicates whether we've
// chosen to spill at the definition or at some later points.
enum class SpillRangeMode : uint8_t {
kNotSet,
kSpillAtDefinition,
kSpillLater,
};
using HasSlotUseField = base::BitField<SlotUseKind, 1, 2>;
using IsPhiField = base::BitField<bool, 3, 1>;
using IsNonLoopPhiField = base::BitField<bool, 4, 1>;
using SpillTypeField = base::BitField<SpillType, 5, 2>;
using DeferredFixedField = base::BitField<bool, 28, 1>;
using SpillAtLoopHeaderNotBeneficialField = base::BitField<bool, 29, 1>;
using SpillRangeModeField = base::BitField<SpillRangeMode, 30, 2>;
int vreg_;
int last_child_id_;
@ -1055,11 +1091,12 @@ class V8_EXPORT_PRIVATE TopLevelLiveRange final : public LiveRange {
// TODO(mtrofin): generalize spilling after definition, currently specialized
// just for spill in a single deferred block.
bool spilled_in_deferred_blocks_;
bool has_preassigned_slot_;
int spill_start_index_;
UsePosition* last_pos_;
LiveRange* last_child_covers_;
TopLevelLiveRange* splinter_;
bool has_preassigned_slot_;
DISALLOW_COPY_AND_ASSIGN(TopLevelLiveRange);
};
@ -1114,6 +1151,65 @@ class SpillRange final : public ZoneObject {
DISALLOW_COPY_AND_ASSIGN(SpillRange);
};
class LiveRangeBound {
public:
explicit LiveRangeBound(LiveRange* range, bool skip)
: range_(range), start_(range->Start()), end_(range->End()), skip_(skip) {
DCHECK(!range->IsEmpty());
}
bool CanCover(LifetimePosition position) {
return start_ <= position && position < end_;
}
LiveRange* const range_;
const LifetimePosition start_;
const LifetimePosition end_;
const bool skip_;
private:
DISALLOW_COPY_AND_ASSIGN(LiveRangeBound);
};
struct FindResult {
LiveRange* cur_cover_;
LiveRange* pred_cover_;
};
class LiveRangeBoundArray {
public:
LiveRangeBoundArray() : length_(0), start_(nullptr) {}
bool ShouldInitialize() { return start_ == nullptr; }
void Initialize(Zone* zone, TopLevelLiveRange* range);
LiveRangeBound* Find(const LifetimePosition position) const;
LiveRangeBound* FindPred(const InstructionBlock* pred);
LiveRangeBound* FindSucc(const InstructionBlock* succ);
bool FindConnectableSubranges(const InstructionBlock* block,
const InstructionBlock* pred,
FindResult* result) const;
private:
size_t length_;
LiveRangeBound* start_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeBoundArray);
};
class LiveRangeFinder {
public:
explicit LiveRangeFinder(const TopTierRegisterAllocationData* data,
Zone* zone);
LiveRangeBoundArray* ArrayFor(int operand_index);
private:
const TopTierRegisterAllocationData* const data_;
const int bounds_length_;
LiveRangeBoundArray* const bounds_;
Zone* const zone_;
DISALLOW_COPY_AND_ASSIGN(LiveRangeFinder);
};
class ConstraintBuilder final : public ZoneObject {
public:
explicit ConstraintBuilder(TopTierRegisterAllocationData* data);
@ -1469,20 +1565,6 @@ class LinearScanAllocator final : public RegisterAllocator {
DISALLOW_COPY_AND_ASSIGN(LinearScanAllocator);
};
class SpillSlotLocator final : public ZoneObject {
public:
explicit SpillSlotLocator(TopTierRegisterAllocationData* data);
void LocateSpillSlots();
private:
TopTierRegisterAllocationData* data() const { return data_; }
TopTierRegisterAllocationData* const data_;
DISALLOW_COPY_AND_ASSIGN(SpillSlotLocator);
};
class OperandAssigner final : public ZoneObject {
public:
explicit OperandAssigner(TopTierRegisterAllocationData* data);
@ -1508,7 +1590,7 @@ class ReferenceMapPopulator final : public ZoneObject {
public:
explicit ReferenceMapPopulator(TopTierRegisterAllocationData* data);
// Phase 8: compute values for pointer maps.
// Phase 10: compute values for pointer maps.
void PopulateReferenceMaps();
private:
@ -1533,13 +1615,14 @@ class LiveRangeConnector final : public ZoneObject {
public:
explicit LiveRangeConnector(TopTierRegisterAllocationData* data);
// Phase 9: reconnect split ranges with moves, when the control flow
// Phase 8: reconnect split ranges with moves, when the control flow
// between the ranges is trivial (no branches).
void ConnectRanges(Zone* local_zone);
// Phase 10: insert moves to connect ranges across basic blocks, when the
// Phase 9: insert moves to connect ranges across basic blocks, when the
// control flow between them cannot be trivially resolved, such as joining
// branches.
// branches. Also determines whether to spill at the definition or later, and
// adds spill moves to the gaps in the schedule.
void ResolveControlFlow(Zone* local_zone);
private:

View File

@ -0,0 +1,484 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/backend/spill-placer.h"
#include "src/base/bits-iterator.h"
#include "src/compiler/backend/register-allocator.h"
namespace v8 {
namespace internal {
namespace compiler {
SpillPlacer::SpillPlacer(LiveRangeFinder* finder,
TopTierRegisterAllocationData* data, Zone* zone)
: finder_(finder), data_(data), zone_(zone) {}
SpillPlacer::~SpillPlacer() {
if (assigned_indices_ > 0) {
CommitSpills();
}
}
void SpillPlacer::Add(TopLevelLiveRange* range) {
DCHECK(range->HasGeneralSpillRange());
InstructionOperand spill_operand = range->GetSpillRangeOperand();
range->FilterSpillMoves(data(), spill_operand);
InstructionSequence* code = data_->code();
InstructionBlock* top_start_block =
code->GetInstructionBlock(range->Start().ToInstructionIndex());
RpoNumber top_start_block_number = top_start_block->rpo_number();
// Check for several cases where spilling at the definition is best.
// - The value is already moved on-stack somehow so the list of insertion
// locations for spilling at the definition is empty.
// - If the first LiveRange is spilled, then there's no sense in doing
// anything other than spilling at the definition.
// - If the value is defined in a deferred block, then the logic to select
// the earliest deferred block as the insertion point would cause
// incorrect behavior, so the value must be spilled at the definition.
// - We haven't seen any indication of performance improvements from seeking
// optimal spilling positions except on loop-top phi values, so spill
// any value that isn't a loop-top phi at the definition to avoid
// increasing the code size for no benefit.
if (range->GetSpillMoveInsertionLocations(data()) == nullptr ||
range->spilled() || top_start_block->IsDeferred() ||
(!FLAG_stress_turbo_late_spilling && !range->is_loop_phi())) {
range->CommitSpillMoves(data(), spill_operand);
return;
}
// Iterate through the range and mark every block that needs the value to be
// spilled.
for (const LiveRange* child = range; child != nullptr;
child = child->next()) {
if (child->spilled()) {
// Add every block that contains part of this live range.
for (UseInterval* interval = child->first_interval(); interval != nullptr;
interval = interval->next()) {
RpoNumber start_block =
code->GetInstructionBlock(interval->start().ToInstructionIndex())
->rpo_number();
if (start_block == top_start_block_number) {
// Can't do late spilling if the first spill is within the
// definition block.
range->CommitSpillMoves(data(), spill_operand);
// Verify that we never added any data for this range to the table.
DCHECK(!IsLatestVreg(range->vreg()));
return;
}
LifetimePosition end = interval->end();
int end_instruction = end.ToInstructionIndex();
// The end position is exclusive, so an end position exactly on a block
// boundary indicates that the range applies only to the prior block.
if (data()->IsBlockBoundary(end)) {
--end_instruction;
}
RpoNumber end_block =
code->GetInstructionBlock(end_instruction)->rpo_number();
while (start_block <= end_block) {
SetSpillRequired(code->InstructionBlockAt(start_block), range->vreg(),
top_start_block_number);
start_block = start_block.Next();
}
}
} else {
// Add every block that contains a use which requires the on-stack value.
for (const UsePosition* pos = child->first_pos(); pos != nullptr;
pos = pos->next()) {
if (pos->type() != UsePositionType::kRequiresSlot) continue;
InstructionBlock* block =
code->GetInstructionBlock(pos->pos().ToInstructionIndex());
RpoNumber block_number = block->rpo_number();
if (block_number == top_start_block_number) {
// Can't do late spilling if the first spill is within the
// definition block.
range->CommitSpillMoves(data(), spill_operand);
// Verify that we never added any data for this range to the table.
DCHECK(!IsLatestVreg(range->vreg()));
return;
}
SetSpillRequired(block, range->vreg(), top_start_block_number);
}
}
}
// If we haven't yet marked anything for this range, then it never needs to
// spill at all.
if (!IsLatestVreg(range->vreg())) {
range->SetLateSpillingSelected(true);
return;
}
SetDefinition(top_start_block_number, range->vreg());
}
class SpillPlacer::Entry {
public:
// Functions operating on single values (during setup):
void SetSpillRequiredSingleValue(int value_index) {
DCHECK_LT(value_index, kValueIndicesPerEntry);
uint64_t bit = uint64_t{1} << value_index;
SetSpillRequired(bit);
}
void SetDefinitionSingleValue(int value_index) {
DCHECK_LT(value_index, kValueIndicesPerEntry);
uint64_t bit = uint64_t{1} << value_index;
SetDefinition(bit);
}
// Functions operating on all values simultaneously, as bitfields:
uint64_t SpillRequired() const { return GetValuesInState<kSpillRequired>(); }
void SetSpillRequired(uint64_t mask) {
UpdateValuesToState<kSpillRequired>(mask);
}
uint64_t SpillRequiredInNonDeferredSuccessor() const {
return GetValuesInState<kSpillRequiredInNonDeferredSuccessor>();
}
void SetSpillRequiredInNonDeferredSuccessor(uint64_t mask) {
UpdateValuesToState<kSpillRequiredInNonDeferredSuccessor>(mask);
}
uint64_t SpillRequiredInDeferredSuccessor() const {
return GetValuesInState<kSpillRequiredInDeferredSuccessor>();
}
void SetSpillRequiredInDeferredSuccessor(uint64_t mask) {
UpdateValuesToState<kSpillRequiredInDeferredSuccessor>(mask);
}
uint64_t Definition() const { return GetValuesInState<kDefinition>(); }
void SetDefinition(uint64_t mask) { UpdateValuesToState<kDefinition>(mask); }
private:
// Possible states for every value, at every block.
enum State {
// This block is not (yet) known to require the on-stack value.
kUnmarked,
// The value must be on the stack in this block.
kSpillRequired,
// The value doesn't need to be on-stack in this block, but some
// non-deferred successor needs it.
kSpillRequiredInNonDeferredSuccessor,
// The value doesn't need to be on-stack in this block, but some
// deferred successor needs it.
kSpillRequiredInDeferredSuccessor,
// The value is defined in this block.
kDefinition,
};
template <State state>
uint64_t GetValuesInState() const {
STATIC_ASSERT(state < 8);
return ((state & 1) ? first_bit_ : ~first_bit_) &
((state & 2) ? second_bit_ : ~second_bit_) &
((state & 4) ? third_bit_ : ~third_bit_);
}
template <State state>
void UpdateValuesToState(uint64_t mask) {
STATIC_ASSERT(state < 8);
first_bit_ =
Entry::UpdateBitDataWithMask<(state & 1) != 0>(first_bit_, mask);
second_bit_ =
Entry::UpdateBitDataWithMask<(state & 2) != 0>(second_bit_, mask);
third_bit_ =
Entry::UpdateBitDataWithMask<(state & 4) != 0>(third_bit_, mask);
}
template <bool set_ones>
static uint64_t UpdateBitDataWithMask(uint64_t data, uint64_t mask) {
return set_ones ? data | mask : data & ~mask;
}
// Storage for the states of up to 64 live ranges.
uint64_t first_bit_ = 0;
uint64_t second_bit_ = 0;
uint64_t third_bit_ = 0;
};
int SpillPlacer::GetOrCreateIndexForLatestVreg(int vreg) {
DCHECK_LE(assigned_indices_, kValueIndicesPerEntry);
// If this vreg isn't yet the last one in the list, then add it.
if (!IsLatestVreg(vreg)) {
if (vreg_numbers_ == nullptr) {
DCHECK_EQ(assigned_indices_, 0);
DCHECK_EQ(entries_, nullptr);
// We lazily allocate these arrays because many functions don't have any
// values that use SpillPlacer.
entries_ =
zone_->NewArray<Entry>(data()->code()->instruction_blocks().size());
for (size_t i = 0; i < data()->code()->instruction_blocks().size(); ++i) {
new (&entries_[i]) Entry();
}
vreg_numbers_ = zone_->NewArray<int>(kValueIndicesPerEntry);
}
if (assigned_indices_ == kValueIndicesPerEntry) {
// The table is full; commit the current set of values and clear it.
CommitSpills();
ClearData();
}
vreg_numbers_[assigned_indices_] = vreg;
++assigned_indices_;
}
return assigned_indices_ - 1;
}
void SpillPlacer::CommitSpills() {
FirstBackwardPass();
ForwardPass();
SecondBackwardPass();
}
void SpillPlacer::ClearData() {
assigned_indices_ = 0;
for (int i = 0; i < data()->code()->InstructionBlockCount(); ++i) {
new (&entries_[i]) Entry();
}
first_block_ = RpoNumber::Invalid();
last_block_ = RpoNumber::Invalid();
}
void SpillPlacer::ExpandBoundsToInclude(RpoNumber block) {
if (!first_block_.IsValid()) {
DCHECK(!last_block_.IsValid());
first_block_ = block;
last_block_ = block;
} else {
if (first_block_ > block) {
first_block_ = block;
}
if (last_block_ < block) {
last_block_ = block;
}
}
}
void SpillPlacer::SetSpillRequired(InstructionBlock* block, int vreg,
RpoNumber top_start_block) {
// Spilling in loops is bad, so if the block is non-deferred and nested
// within a loop, and the definition is before that loop, then mark the loop
// top instead. Of course we must find the outermost such loop.
if (!block->IsDeferred()) {
while (block->loop_header().IsValid() &&
block->loop_header() > top_start_block) {
block = data()->code()->InstructionBlockAt(block->loop_header());
}
}
int value_index = GetOrCreateIndexForLatestVreg(vreg);
entries_[block->rpo_number().ToSize()].SetSpillRequiredSingleValue(
value_index);
ExpandBoundsToInclude(block->rpo_number());
}
void SpillPlacer::SetDefinition(RpoNumber block, int vreg) {
int value_index = GetOrCreateIndexForLatestVreg(vreg);
entries_[block.ToSize()].SetDefinitionSingleValue(value_index);
ExpandBoundsToInclude(block);
}
void SpillPlacer::FirstBackwardPass() {
InstructionSequence* code = data()->code();
for (int i = last_block_.ToInt(); i >= first_block_.ToInt(); --i) {
RpoNumber block_id = RpoNumber::FromInt(i);
InstructionBlock* block = code->instruction_blocks()[i];
Entry& entry = entries_[i];
// State that will be accumulated from successors.
uint64_t spill_required_in_non_deferred_successor = 0;
uint64_t spill_required_in_deferred_successor = 0;
for (RpoNumber successor_id : block->successors()) {
// Ignore loop back-edges.
if (successor_id <= block_id) continue;
InstructionBlock* successor = code->InstructionBlockAt(successor_id);
const Entry& successor_entry = entries_[successor_id.ToSize()];
if (successor->IsDeferred()) {
spill_required_in_deferred_successor |= successor_entry.SpillRequired();
} else {
spill_required_in_non_deferred_successor |=
successor_entry.SpillRequired();
}
spill_required_in_deferred_successor |=
successor_entry.SpillRequiredInDeferredSuccessor();
spill_required_in_non_deferred_successor |=
successor_entry.SpillRequiredInNonDeferredSuccessor();
}
// Starting state of the current block.
uint64_t defs = entry.Definition();
uint64_t needs_spill = entry.SpillRequired();
// Info about successors doesn't get to override existing info about
// definitions and spills required by this block itself.
spill_required_in_deferred_successor &= ~(defs | needs_spill);
spill_required_in_non_deferred_successor &= ~(defs | needs_spill);
entry.SetSpillRequiredInDeferredSuccessor(
spill_required_in_deferred_successor);
entry.SetSpillRequiredInNonDeferredSuccessor(
spill_required_in_non_deferred_successor);
}
}
void SpillPlacer::ForwardPass() {
InstructionSequence* code = data()->code();
for (int i = first_block_.ToInt(); i <= last_block_.ToInt(); ++i) {
RpoNumber block_id = RpoNumber::FromInt(i);
InstructionBlock* block = code->instruction_blocks()[i];
// Deferred blocks don't need to participate in the forward pass, because
// their spills all get pulled forward to the earliest possible deferred
// block (where a non-deferred block jumps to a deferred block), and
// decisions about spill requirements for non-deferred blocks don't take
// deferred blocks into account.
if (block->IsDeferred()) continue;
Entry& entry = entries_[i];
// State that will be accumulated from predecessors.
uint64_t spill_required_in_non_deferred_predecessor = 0;
uint64_t spill_required_in_all_non_deferred_predecessors =
static_cast<uint64_t>(int64_t{-1});
for (RpoNumber predecessor_id : block->predecessors()) {
// Ignore loop back-edges.
if (predecessor_id >= block_id) continue;
InstructionBlock* predecessor = code->InstructionBlockAt(predecessor_id);
if (predecessor->IsDeferred()) continue;
const Entry& predecessor_entry = entries_[predecessor_id.ToSize()];
spill_required_in_non_deferred_predecessor |=
predecessor_entry.SpillRequired();
spill_required_in_all_non_deferred_predecessors &=
predecessor_entry.SpillRequired();
}
// Starting state of the current block.
uint64_t spill_required_in_non_deferred_successor =
entry.SpillRequiredInNonDeferredSuccessor();
uint64_t spill_required_in_any_successor =
spill_required_in_non_deferred_successor |
entry.SpillRequiredInDeferredSuccessor();
// If all of the predecessors agree that a spill is required, then a
// spill is required. Note that we don't set anything for values that
// currently have no markings in this block, to avoid pushing data too
// far down the graph and confusing the next backward pass.
entry.SetSpillRequired(spill_required_in_any_successor &
spill_required_in_non_deferred_predecessor &
spill_required_in_all_non_deferred_predecessors);
// If only some of the predecessors require a spill, but some successor
// of this block also requires a spill, then this merge point requires a
// spill. This ensures that no control-flow path through non-deferred
// blocks ever has to spill twice.
entry.SetSpillRequired(spill_required_in_non_deferred_successor &
spill_required_in_non_deferred_predecessor);
}
}
void SpillPlacer::SecondBackwardPass() {
InstructionSequence* code = data()->code();
for (int i = last_block_.ToInt(); i >= first_block_.ToInt(); --i) {
RpoNumber block_id = RpoNumber::FromInt(i);
InstructionBlock* block = code->instruction_blocks()[i];
Entry& entry = entries_[i];
// State that will be accumulated from successors.
uint64_t spill_required_in_non_deferred_successor = 0;
uint64_t spill_required_in_deferred_successor = 0;
uint64_t spill_required_in_all_non_deferred_successors =
static_cast<uint64_t>(int64_t{-1});
for (RpoNumber successor_id : block->successors()) {
// Ignore loop back-edges.
if (successor_id <= block_id) continue;
InstructionBlock* successor = code->InstructionBlockAt(successor_id);
const Entry& successor_entry = entries_[successor_id.ToSize()];
if (successor->IsDeferred()) {
spill_required_in_deferred_successor |= successor_entry.SpillRequired();
} else {
spill_required_in_non_deferred_successor |=
successor_entry.SpillRequired();
spill_required_in_all_non_deferred_successors &=
successor_entry.SpillRequired();
}
}
// Starting state of the current block.
uint64_t defs = entry.Definition();
// If all of the successors of a definition need the value to be
// spilled, then the value should be spilled at the definition.
uint64_t spill_at_def = defs & spill_required_in_non_deferred_successor &
spill_required_in_all_non_deferred_successors;
for (int index_to_spill : base::bits::IterateBits(spill_at_def)) {
int vreg_to_spill = vreg_numbers_[index_to_spill];
TopLevelLiveRange* top = data()->live_ranges()[vreg_to_spill];
top->CommitSpillMoves(data(), top->GetSpillRangeOperand());
}
if (block->IsDeferred()) {
DCHECK_EQ(defs, 0);
// Any deferred successor needing a spill is sufficient to make the
// current block need a spill.
entry.SetSpillRequired(spill_required_in_deferred_successor);
}
// Propagate data upward if there are non-deferred successors and they
// all need a spill, regardless of whether the current block is
// deferred.
entry.SetSpillRequired(~defs & spill_required_in_non_deferred_successor &
spill_required_in_all_non_deferred_successors);
// Iterate the successors again to find out which ones require spills at
// their beginnings, and insert those spills.
for (RpoNumber successor_id : block->successors()) {
// Ignore loop back-edges.
if (successor_id <= block_id) continue;
InstructionBlock* successor = code->InstructionBlockAt(successor_id);
const Entry& successor_entry = entries_[successor_id.ToSize()];
for (int index_to_spill :
base::bits::IterateBits(successor_entry.SpillRequired() &
~entry.SpillRequired() & ~spill_at_def)) {
CommitSpill(vreg_numbers_[index_to_spill], block, successor);
}
}
}
}
void SpillPlacer::CommitSpill(int vreg, InstructionBlock* predecessor,
InstructionBlock* successor) {
TopLevelLiveRange* top = data()->live_ranges()[vreg];
LiveRangeBoundArray* array = finder_->ArrayFor(vreg);
LifetimePosition pred_end = LifetimePosition::InstructionFromInstructionIndex(
predecessor->last_instruction_index());
LiveRangeBound* bound = array->Find(pred_end);
InstructionOperand pred_op = bound->range_->GetAssignedOperand();
DCHECK(pred_op.IsAnyRegister());
DCHECK_EQ(successor->PredecessorCount(), 1);
data()->AddGapMove(successor->first_instruction_index(),
Instruction::GapPosition::START, pred_op,
top->GetSpillRangeOperand());
successor->mark_needs_frame();
top->SetLateSpillingSelected(true);
}
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -0,0 +1,169 @@
// Copyright 2020 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_COMPILER_BACKEND_SPILL_PLACER_H_
#define V8_COMPILER_BACKEND_SPILL_PLACER_H_
#include "src/compiler/backend/instruction.h"
namespace v8 {
namespace internal {
namespace compiler {
class LiveRangeFinder;
class TopLevelLiveRange;
class TopTierRegisterAllocationData;
// SpillPlacer is an implementation of an algorithm to find optimal spill
// insertion positions, where optimal is defined as:
//
// 1. Spills needed by deferred code don't affect non-deferred code.
// 2. No control-flow path spills the same value more than once in non-deferred
// blocks.
// 3. Where possible based on #2, control-flow paths through non-deferred code
// that don't need the value to be on the stack don't execute any spills.
// 4. The fewest number of spill instructions is written to meet these rules.
// 5. Spill instructions are placed as early as possible.
//
// These rules are an attempt to make code paths that don't need to spill faster
// while not increasing code size too much.
//
// Considering just one value at a time for now, the steps are:
//
// 1. If the value is defined in a deferred block, or needs its value to be on
// the stack during the definition block, emit a move right after the
// definition and exit.
// 2. Build an array representing the state at each block, where the state can
// be any of the following:
// - unmarked (default/initial state)
// - definition
// - spill required
// - spill required in non-deferred successor
// - spill required in deferred successor
// 3. Mark the block containing the definition.
// 4. Mark as "spill required" all blocks that contain any part of a spilled
// LiveRange, or any use that requires the value to be on the stack.
// 5. Walk the block list backward, setting the "spill required in successor"
// values where appropriate. If both deferred and non-deferred successors
// require a spill, then the result should be "spill required in non-deferred
// successor".
// 6. Walk the block list forward, updating marked blocks to "spill required" if
// all of their predecessors agree that a spill is required. Furthermore, if
// a block is marked as "spill required in non-deferred successor" and any
// non-deferred predecessor is marked as "spill required", then the current
// block is updated to "spill required". We must mark these merge points as
// "spill required" to obey rule #2 above: if we didn't, then there would
// exist a control-flow path through two different spilled regions.
// 7. Walk the block list backward again, updating blocks to "spill required" if
// all of their successors agree that a spill is required, or if the current
// block is deferred and any of its successors require spills. If only some
// successors of a non-deferred block require spills, then insert spill moves
// at the beginning of those successors. If we manage to smear the "spill
// required" value all the way to the definition block, then insert a spill
// move at the definition instead. (Spilling at the definition implies that
// we didn't emit any other spill moves, and there is a DCHECK mechanism to
// ensure that invariant.)
//
// Loop back-edges can be safely ignored in every step. Anything that the loop
// header needs on-stack will be spilled either in the loop header itself or
// sometime before entering the loop, so its back-edge predecessors don't need
// to contain any data about the loop header.
//
// The operations described in those steps are simple Boolean logic, so we can
// easily process a batch of values at the same time as an optimization.
class SpillPlacer {
public:
SpillPlacer(LiveRangeFinder* finder, TopTierRegisterAllocationData* data,
Zone* zone);
~SpillPlacer();
// Adds the given TopLevelLiveRange to the SpillPlacer's state. Will
// eventually commit spill moves for that range and mark the range to indicate
// whether its value is spilled at the definition or some later point, so that
// subsequent phases can know whether to assume the value is always on-stack.
// However, those steps may happen during a later call to Add or during the
// destructor.
void Add(TopLevelLiveRange* range);
private:
TopTierRegisterAllocationData* data() const { return data_; }
// While initializing data for a range, returns the index within each Entry
// where data about that range should be stored. May cause data about previous
// ranges to be committed to make room if the table is full.
int GetOrCreateIndexForLatestVreg(int vreg);
bool IsLatestVreg(int vreg) const {
return assigned_indices_ > 0 &&
vreg_numbers_[assigned_indices_ - 1] == vreg;
}
// Processes all of the ranges which have been added, inserts spill moves for
// them to the instruction sequence, and marks the ranges with whether they
// are spilled at the definition or later.
void CommitSpills();
void ClearData();
// Updates the iteration bounds first_block_ and last_block_ so that they
// include the new value.
void ExpandBoundsToInclude(RpoNumber block);
void SetSpillRequired(InstructionBlock* block, int vreg,
RpoNumber top_start_block);
void SetDefinition(RpoNumber block, int vreg);
// The first backward pass is responsible for marking blocks which do not
// themselves need the value to be on the stack, but which do have successors
// requiring the value to be on the stack.
void FirstBackwardPass();
// The forward pass is responsible for selecting merge points that should
// require the value to be on the stack.
void ForwardPass();
// The second backward pass is responsible for propagating the spill
// requirements to the earliest block where all successors can agree a spill
// is required. It also emits the actual spill instructions.
void SecondBackwardPass();
void CommitSpill(int vreg, InstructionBlock* predecessor,
InstructionBlock* successor);
// Each Entry represents the state for 64 values at a block, so that we can
// compute a batch of values in parallel.
class Entry;
static constexpr int kValueIndicesPerEntry = 64;
// Objects provided to the constructor, which all outlive this SpillPlacer.
LiveRangeFinder* finder_;
TopTierRegisterAllocationData* data_;
Zone* zone_;
// An array of one Entry per block, where blocks are in reverse post-order.
Entry* entries_ = nullptr;
// An array representing which TopLevelLiveRange is in each bit.
int* vreg_numbers_ = nullptr;
// The number of vreg_numbers_ that have been assigned.
int assigned_indices_ = 0;
// The first and last block that have any definitions or uses in the current
// batch of values. In large functions, tracking these bounds can help prevent
// additional work.
RpoNumber first_block_ = RpoNumber::Invalid();
RpoNumber last_block_ = RpoNumber::Invalid();
DISALLOW_COPY_AND_ASSIGN(SpillPlacer);
};
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_BACKEND_SPILL_PLACER_H_

View File

@ -2245,16 +2245,6 @@ struct MergeSplintersPhase {
}
};
struct LocateSpillSlotsPhase {
DECL_PIPELINE_PHASE_CONSTANTS(LocateSpillSlots)
void Run(PipelineData* data, Zone* temp_zone) {
SpillSlotLocator locator(data->top_tier_register_allocation_data());
locator.LocateSpillSlots();
}
};
struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
@ -3701,15 +3691,16 @@ void PipelineImpl::AllocateRegistersForTopTier(
verifier->VerifyAssignment("Immediately after CommitAssignmentPhase.");
}
Run<PopulateReferenceMapsPhase>();
Run<ConnectRangesPhase>();
Run<ResolveControlFlowPhase>();
Run<PopulateReferenceMapsPhase>();
if (FLAG_turbo_move_optimization) {
Run<OptimizeMovesPhase>();
}
Run<LocateSpillSlotsPhase>();
TraceSequence(info(), data, "after register allocation");

View File

@ -546,6 +546,9 @@ DEFINE_BOOL(turbo_sp_frame_access, false,
"use stack pointer-relative access to frame wherever possible")
DEFINE_BOOL(turbo_control_flow_aware_allocation, true,
"consider control flow while allocating registers")
DEFINE_BOOL(
stress_turbo_late_spilling, false,
"optimize placement of all spill instructions, not just loop-top phis")
DEFINE_STRING(turbo_filter, "*", "optimization filter for TurboFan compiler")
DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")