[turbofan] in-block scheduling: schedule non-effect-chain nodes late

By giving higher priority to processing non-effect-chain nodes, this
changes the scheduling inside of basic blocks to place nodes before the
latest possible effect chain node.

Change-Id: I8b130904a1bb2360b995eb9de4f471a911a4e388
Reviewed-on: https://chromium-review.googlesource.com/c/1337743
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57656}
This commit is contained in:
Tobias Tebbi 2018-11-20 16:25:25 +01:00 committed by Commit Bot
parent 280776f85c
commit 41ad531f4f
2 changed files with 63 additions and 19 deletions

View File

@ -25,6 +25,41 @@ namespace compiler {
if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
} while (false)
// This is a simple priority queue for nodes, with priority classes being
// assigned by the heurisitcs in SchedulableNodesQueue::PriorityClass. The queue
// is used in the schedule late phase, for nodes where all uses have been
// scheduled already. The priorities only affect the ordering of nodes within a
// basic block, with nodes with higher priority (i.e., smaller
// {PriorityClass()}) being scheduled towards the end of the block (because
// basic blocks are filled backwards).
class SchedulableNodesQueue : public ZoneObject {
public:
explicit SchedulableNodesQueue(Zone* zone)
: queues_{ZoneQueue<Node*>(zone), ZoneQueue<Node*>(zone)} {}
void push(Node* node) { queues_[PriorityClass(node)].push(node); }
Node* front() { return ActiveQueue().front(); }
void pop() { ActiveQueue().pop(); }
bool empty() const {
for (const ZoneQueue<Node*>& queue : queues_) {
if (!queue.empty()) return false;
}
return true;
}
private:
ZoneQueue<Node*>& ActiveQueue() {
for (ZoneQueue<Node*>& queue : queues_) {
if (!queue.empty()) return queue;
}
UNREACHABLE();
}
int PriorityClass(Node* node);
static constexpr int kPriorityClasses = 2;
std::array<ZoneQueue<Node*>, kPriorityClasses> queues_;
};
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
size_t node_count_hint)
: zone_(zone),
@ -33,7 +68,7 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
flags_(flags),
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
schedule_queue_(zone),
schedule_queue_(new (zone) SchedulableNodesQueue(zone)),
node_data_(zone) {
node_data_.reserve(node_count_hint);
node_data_.resize(graph->NodeCount(), DefaultSchedulerData());
@ -225,7 +260,7 @@ void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
}
if (GetData(node)->unscheduled_count_ == 0) {
TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
schedule_queue_.push(node);
schedule_queue_->push(node);
}
}
@ -1352,6 +1387,13 @@ void Scheduler::ScheduleEarly() {
// -----------------------------------------------------------------------------
// Phase 5: Schedule nodes late.
// Deprioritize effect chain nodes. Since we schedule backwards,
// this means that value nodes are scheduled before the latest effect
// chain node possible.
int SchedulableNodesQueue::PriorityClass(Node* node) {
if (node->op()->EffectOutputCount() > 0) return 1;
return 0;
}
class ScheduleLateNodeVisitor {
public:
@ -1365,13 +1407,20 @@ class ScheduleLateNodeVisitor {
// Run the schedule late algorithm on a set of fixed root nodes.
void Run(NodeVector* roots) {
for (Node* const root : *roots) {
ProcessQueue(root);
EnqueueRootInputs(root);
}
// Fixed point to drain the queue of schedulable nodes.
SchedulableNodesQueue* queue = scheduler_->schedule_queue_;
do {
Node* const node = queue->front();
queue->pop();
VisitNode(node);
} while (!queue->empty());
}
private:
void ProcessQueue(Node* root) {
ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
void EnqueueRootInputs(Node* root) {
SchedulableNodesQueue* queue = scheduler_->schedule_queue_;
for (Node* node : root->inputs()) {
// Don't schedule coupled nodes on their own.
if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
@ -1382,11 +1431,6 @@ class ScheduleLateNodeVisitor {
if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
queue->push(node);
do {
Node* const node = queue->front();
queue->pop();
VisitNode(node);
} while (!queue->empty());
}
}
@ -1540,7 +1584,7 @@ class ScheduleLateNodeVisitor {
use_node = CloneNode(node);
TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
use_node->op()->mnemonic(), use_block->id().ToInt());
scheduler_->schedule_queue_.push(use_node);
scheduler_->schedule_queue_->push(use_node);
}
}
edge.UpdateTo(use_node);

View File

@ -21,9 +21,9 @@ namespace compiler {
class CFGBuilder;
class ControlEquivalence;
class Graph;
class SchedulableNodesQueue;
class SpecialRPONumberer;
// Computes a schedule from a graph, placing nodes into basic blocks and
// ordering the basic blocks in the special RPO order.
class V8_EXPORT_PRIVATE Scheduler {
@ -71,13 +71,13 @@ class V8_EXPORT_PRIVATE Scheduler {
Schedule* schedule_;
Flags flags_;
ZoneVector<NodeVector*>
scheduled_nodes_; // Per-block list of nodes in reverse.
NodeVector schedule_root_nodes_; // Fixed root nodes seed the worklist.
ZoneQueue<Node*> schedule_queue_; // Worklist of schedulable nodes.
ZoneVector<SchedulerData> node_data_; // Per-node data for all nodes.
CFGBuilder* control_flow_builder_; // Builds basic blocks for controls.
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
scheduled_nodes_; // Per-block list of nodes in reverse.
NodeVector schedule_root_nodes_; // Fixed root nodes seed the worklist.
SchedulableNodesQueue* schedule_queue_; // Worklist of schedulable nodes.
ZoneVector<SchedulerData> node_data_; // Per-node data for all nodes.
CFGBuilder* control_flow_builder_; // Builds basic blocks for controls.
SpecialRPONumberer* special_rpo_; // Special RPO numbering of blocks.
ControlEquivalence* equivalence_; // Control dependence equivalence.
Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags,
size_t node_count_hint_);