Improve comments and readability of scheduler.

R=jarin@chromium.org

Review URL: https://codereview.chromium.org/642803003

git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@24526 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
This commit is contained in:
mstarzinger@chromium.org 2014-10-10 11:57:55 +00:00
parent 6490b9a656
commit 2d29390448
2 changed files with 183 additions and 165 deletions

View File

@ -28,6 +28,102 @@ static inline void Trace(const char* msg, ...) {
}
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
: zone_(zone),
graph_(graph),
schedule_(schedule),
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
has_floating_control_(false) {}
Schedule* Scheduler::ComputeSchedule(Graph* graph) {
Schedule* schedule;
bool had_floating_control = false;
do {
Zone tmp_zone(graph->zone()->isolate());
schedule = new (graph->zone())
Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
Scheduler scheduler(&tmp_zone, graph, schedule);
scheduler.BuildCFG();
Scheduler::ComputeSpecialRPO(schedule);
scheduler.GenerateImmediateDominatorTree();
scheduler.PrepareUses();
scheduler.ScheduleEarly();
scheduler.ScheduleLate();
had_floating_control = scheduler.ConnectFloatingControl();
} while (had_floating_control);
return schedule;
}
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
SchedulerData def = {0, -1, false, false, kUnknown};
return def;
}
Scheduler::Placement Scheduler::GetPlacement(Node* node) {
SchedulerData* data = GetData(node);
if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
switch (node->opcode()) {
case IrOpcode::kParameter:
// Parameters are always fixed to the start node.
data->placement_ = kFixed;
break;
case IrOpcode::kPhi:
case IrOpcode::kEffectPhi: {
// Phis and effect phis are fixed if their control inputs are.
data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
break;
}
#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
#undef DEFINE_FLOATING_CONTROL_CASE
{
// Control nodes that were not control-reachable from end may float.
data->placement_ = kSchedulable;
if (!data->is_connected_control_) {
data->is_floating_control_ = true;
has_floating_control_ = true;
Trace("Floating control found: #%d:%s\n", node->id(),
node->op()->mnemonic());
}
break;
}
default:
data->placement_ = kSchedulable;
break;
}
}
return data->placement_;
}
BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
while (b1 != b2) {
int b1_rpo = GetRPONumber(b1);
int b2_rpo = GetRPONumber(b2);
DCHECK(b1_rpo != b2_rpo);
if (b1_rpo < b2_rpo) {
b2 = b2->dominator();
} else {
b1 = b1->dominator();
}
}
return b1;
}
// -----------------------------------------------------------------------------
// Phase 1: Build control-flow graph and dominator tree.
// Internal class to build a control flow graph (i.e the basic blocks and edges
// between them within a Schedule) from the node graph.
// Visits the control edges of the graph backwards from end in order to find
@ -218,84 +314,6 @@ class CFGBuilder {
};
Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
SchedulerData def = {0, -1, false, false, kUnknown};
return def;
}
Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
: zone_(zone),
graph_(graph),
schedule_(schedule),
scheduled_nodes_(zone),
schedule_root_nodes_(zone),
node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
has_floating_control_(false) {}
Schedule* Scheduler::ComputeSchedule(Graph* graph) {
Schedule* schedule;
bool had_floating_control = false;
do {
Zone tmp_zone(graph->zone()->isolate());
schedule = new (graph->zone())
Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
Scheduler scheduler(&tmp_zone, graph, schedule);
scheduler.BuildCFG();
Scheduler::ComputeSpecialRPO(schedule);
scheduler.GenerateImmediateDominatorTree();
scheduler.PrepareUses();
scheduler.ScheduleEarly();
scheduler.ScheduleLate();
had_floating_control = scheduler.ConnectFloatingControl();
} while (had_floating_control);
return schedule;
}
Scheduler::Placement Scheduler::GetPlacement(Node* node) {
SchedulerData* data = GetData(node);
if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
switch (node->opcode()) {
case IrOpcode::kParameter:
// Parameters are always fixed to the start node.
data->placement_ = kFixed;
break;
case IrOpcode::kPhi:
case IrOpcode::kEffectPhi: {
// Phis and effect phis are fixed if their control inputs are.
data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
break;
}
#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
#undef DEFINE_FLOATING_CONTROL_CASE
{
// Control nodes that were not control-reachable from end may float.
data->placement_ = kSchedulable;
if (!data->is_connected_control_) {
data->is_floating_control_ = true;
has_floating_control_ = true;
Trace("Floating control found: #%d:%s\n", node->id(),
node->op()->mnemonic());
}
break;
}
default:
data->placement_ = kSchedulable;
break;
}
}
return data->placement_;
}
void Scheduler::BuildCFG() {
Trace("---------------- CREATING CFG ------------------\n");
CFGBuilder cfg_builder(zone_, this);
@ -305,21 +323,6 @@ void Scheduler::BuildCFG() {
}
BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
while (b1 != b2) {
int b1_rpo = GetRPONumber(b1);
int b2_rpo = GetRPONumber(b2);
DCHECK(b1_rpo != b2_rpo);
if (b1_rpo < b2_rpo) {
b2 = b2->dominator();
} else {
b1 = b1->dominator();
}
}
return b1;
}
void Scheduler::GenerateImmediateDominatorTree() {
// Build the dominator graph. TODO(danno): consider using Lengauer & Tarjan's
// if this becomes really slow.
@ -352,6 +355,69 @@ void Scheduler::GenerateImmediateDominatorTree() {
}
// -----------------------------------------------------------------------------
// Phase 2: Prepare use counts for nodes.
class PrepareUsesVisitor : public NullNodeVisitor {
public:
explicit PrepareUsesVisitor(Scheduler* scheduler)
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
GenericGraphVisit::Control Pre(Node* node) {
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
// Fixed nodes are always roots for schedule late.
scheduler_->schedule_root_nodes_.push_back(node);
if (!schedule_->IsScheduled(node)) {
// Make sure root nodes are scheduled in their respective blocks.
Trace(" Scheduling fixed position node #%d:%s\n", node->id(),
node->op()->mnemonic());
IrOpcode::Value opcode = node->opcode();
BasicBlock* block =
opcode == IrOpcode::kParameter
? schedule_->start()
: schedule_->block(NodeProperties::GetControlInput(node));
DCHECK(block != NULL);
schedule_->AddNode(block, node);
}
}
return GenericGraphVisit::CONTINUE;
}
void PostEdge(Node* from, int index, Node* to) {
// If the edge is from an unscheduled node, then tally it in the use count
// for all of its inputs. The same criterion will be used in ScheduleLate
// for decrementing use counts.
if (!schedule_->IsScheduled(from)) {
DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
++(scheduler_->GetData(to)->unscheduled_count_);
Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
scheduler_->GetData(to)->unscheduled_count_);
}
}
private:
Scheduler* scheduler_;
Schedule* schedule_;
};
void Scheduler::PrepareUses() {
Trace("------------------- PREPARE USES ------------------\n");
// Count the uses of every node, it will be used to ensure that all of a
// node's uses are scheduled before the node itself.
PrepareUsesVisitor prepare_uses(this);
graph_->VisitNodeInputsFromEnd(&prepare_uses);
}
// -----------------------------------------------------------------------------
// Phase 3: Schedule nodes early.
class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
public:
explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
@ -429,59 +495,8 @@ void Scheduler::ScheduleEarly() {
}
class PrepareUsesVisitor : public NullNodeVisitor {
public:
explicit PrepareUsesVisitor(Scheduler* scheduler)
: scheduler_(scheduler), schedule_(scheduler->schedule_) {}
GenericGraphVisit::Control Pre(Node* node) {
if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
// Fixed nodes are always roots for schedule late.
scheduler_->schedule_root_nodes_.push_back(node);
if (!schedule_->IsScheduled(node)) {
// Make sure root nodes are scheduled in their respective blocks.
Trace(" Scheduling fixed position node #%d:%s\n", node->id(),
node->op()->mnemonic());
IrOpcode::Value opcode = node->opcode();
BasicBlock* block =
opcode == IrOpcode::kParameter
? schedule_->start()
: schedule_->block(NodeProperties::GetControlInput(node));
DCHECK(block != NULL);
schedule_->AddNode(block, node);
}
}
return GenericGraphVisit::CONTINUE;
}
void PostEdge(Node* from, int index, Node* to) {
// If the edge is from an unscheduled node, then tally it in the use count
// for all of its inputs. The same criterion will be used in ScheduleLate
// for decrementing use counts.
if (!schedule_->IsScheduled(from)) {
DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
++(scheduler_->GetData(to)->unscheduled_count_);
Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
scheduler_->GetData(to)->unscheduled_count_);
}
}
private:
Scheduler* scheduler_;
Schedule* schedule_;
};
void Scheduler::PrepareUses() {
Trace("------------------- PREPARE USES ------------------\n");
// Count the uses of every node, it will be used to ensure that all of a
// node's uses are scheduled before the node itself.
PrepareUsesVisitor prepare_uses(this);
graph_->VisitNodeInputsFromEnd(&prepare_uses);
}
// -----------------------------------------------------------------------------
// Phase 4: Schedule nodes late.
class ScheduleLateNodeVisitor : public NullNodeVisitor {
@ -637,6 +652,9 @@ void Scheduler::ScheduleLate() {
}
// -----------------------------------------------------------------------------
bool Scheduler::ConnectFloatingControl() {
if (!has_floating_control_) return false;
@ -1137,6 +1155,7 @@ BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
#endif
return final_order;
}
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8

View File

@ -19,17 +19,13 @@ namespace compiler {
// ordering the basic blocks in the special RPO order.
class Scheduler {
public:
// The complete scheduling algorithm.
// Create a new schedule and place all nodes from the graph into it.
// The complete scheduling algorithm. Creates a new schedule and places all
// nodes from the graph into it.
static Schedule* ComputeSchedule(Graph* graph);
// Compute the RPO of blocks in an existing schedule.
static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule);
// (Exposed for testing only)
// Build and connect the CFG for a node graph, but don't schedule nodes.
static void ComputeCFG(Graph* graph, Schedule* schedule);
private:
enum Placement { kUnknown, kSchedulable, kFixed };
@ -61,8 +57,6 @@ class Scheduler {
return &node_data_[node->id()];
}
void BuildCFG();
Placement GetPlacement(Node* node);
int GetRPONumber(BasicBlock* block) {
@ -73,26 +67,31 @@ class Scheduler {
return block->rpo_number();
}
void GenerateImmediateDominatorTree();
BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
// Phase 1: Build control-flow graph and dominator tree.
friend class CFGBuilder;
void BuildCFG();
void GenerateImmediateDominatorTree();
friend class ScheduleEarlyNodeVisitor;
void ScheduleEarly();
// Phase 2: Prepare use counts for nodes.
friend class PrepareUsesVisitor;
void PrepareUses();
// Phase 3: Schedule nodes early.
friend class ScheduleEarlyNodeVisitor;
void ScheduleEarly();
// Phase 4: Schedule nodes late.
friend class ScheduleLateNodeVisitor;
void ScheduleLate();
bool ConnectFloatingControl();
void ConnectFloatingControlSubgraph(BasicBlock* block, Node* node);
};
}
}
} // namespace v8::internal::compiler
} // namespace compiler
} // namespace internal
} // namespace v8
#endif // V8_COMPILER_SCHEDULER_H_