blob: f12c6318d3da8d29e47028980da4f12c328883ec [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <deque>
6#include <queue>
7
8#include "src/compiler/scheduler.h"
9
Emily Bernierd0a1eb72015-03-24 16:35:39 -040010#include "src/bit-vector.h"
11#include "src/compiler/control-equivalence.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000012#include "src/compiler/graph.h"
13#include "src/compiler/graph-inl.h"
14#include "src/compiler/node.h"
15#include "src/compiler/node-properties.h"
16#include "src/compiler/node-properties-inl.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000017
18namespace v8 {
19namespace internal {
20namespace compiler {
21
22static inline void Trace(const char* msg, ...) {
23 if (FLAG_trace_turbo_scheduler) {
24 va_list arguments;
25 va_start(arguments, msg);
26 base::OS::VPrint(msg, arguments);
27 va_end(arguments);
28 }
29}
30
31
Emily Bernierd0a1eb72015-03-24 16:35:39 -040032Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
33 : zone_(zone),
34 graph_(graph),
35 schedule_(schedule),
36 scheduled_nodes_(zone),
37 schedule_root_nodes_(zone),
38 schedule_queue_(zone),
39 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000040
Emily Bernierd0a1eb72015-03-24 16:35:39 -040041
42Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) {
43 Schedule* schedule = new (graph->zone())
44 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
45 Scheduler scheduler(zone, graph, schedule);
46
47 scheduler.BuildCFG();
48 scheduler.ComputeSpecialRPONumbering();
49 scheduler.GenerateImmediateDominatorTree();
50
51 scheduler.PrepareUses();
52 scheduler.ScheduleEarly();
53 scheduler.ScheduleLate();
54
55 scheduler.SealFinalSchedule();
56
57 return schedule;
58}
59
60
61Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
62 SchedulerData def = {schedule_->start(), 0, kUnknown};
63 return def;
64}
65
66
67Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
68 DCHECK(node->id() < static_cast<int>(node_data_.size()));
69 return &node_data_[node->id()];
70}
71
72
73Scheduler::Placement Scheduler::GetPlacement(Node* node) {
74 SchedulerData* data = GetData(node);
75 if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
76 switch (node->opcode()) {
77 case IrOpcode::kParameter:
78 // Parameters are always fixed to the start node.
79 data->placement_ = kFixed;
80 break;
81 case IrOpcode::kPhi:
82 case IrOpcode::kEffectPhi: {
83 // Phis and effect phis are fixed if their control inputs are, whereas
84 // otherwise they are coupled to a floating control node.
85 Placement p = GetPlacement(NodeProperties::GetControlInput(node));
86 data->placement_ = (p == kFixed ? kFixed : kCoupled);
87 break;
88 }
89#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
90 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
91#undef DEFINE_CONTROL_CASE
92 {
93 // Control nodes that were not control-reachable from end may float.
94 data->placement_ = kSchedulable;
95 break;
96 }
97 default:
98 data->placement_ = kSchedulable;
99 break;
100 }
101 }
102 return data->placement_;
103}
104
105
106void Scheduler::UpdatePlacement(Node* node, Placement placement) {
107 SchedulerData* data = GetData(node);
108 if (data->placement_ != kUnknown) { // Trap on mutation, not initialization.
109 switch (node->opcode()) {
110 case IrOpcode::kParameter:
111 // Parameters are fixed once and for all.
112 UNREACHABLE();
113 break;
114 case IrOpcode::kPhi:
115 case IrOpcode::kEffectPhi: {
116 // Phis and effect phis are coupled to their respective blocks.
117 DCHECK_EQ(Scheduler::kCoupled, data->placement_);
118 DCHECK_EQ(Scheduler::kFixed, placement);
119 Node* control = NodeProperties::GetControlInput(node);
120 BasicBlock* block = schedule_->block(control);
121 schedule_->AddNode(block, node);
122 break;
123 }
124#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
125 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
126#undef DEFINE_CONTROL_CASE
127 {
128 // Control nodes force coupled uses to be placed.
129 Node::Uses uses = node->uses();
130 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
131 if (GetPlacement(*i) == Scheduler::kCoupled) {
132 DCHECK_EQ(node, NodeProperties::GetControlInput(*i));
133 UpdatePlacement(*i, placement);
134 }
135 }
136 break;
137 }
138 default:
139 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
140 DCHECK_EQ(Scheduler::kScheduled, placement);
141 break;
142 }
143 // Reduce the use count of the node's inputs to potentially make them
144 // schedulable. If all the uses of a node have been scheduled, then the node
145 // itself can be scheduled.
146 for (Edge const edge : node->input_edges()) {
147 DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
148 }
149 }
150 data->placement_ = placement;
151}
152
153
154bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
155 return GetPlacement(node) == kCoupled &&
156 NodeProperties::FirstControlIndex(node) == index;
157}
158
159
160void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
161 Node* from) {
162 // Make sure that control edges from coupled nodes are not counted.
163 if (IsCoupledControlEdge(from, index)) return;
164
165 // Tracking use counts for fixed nodes is useless.
166 if (GetPlacement(node) == kFixed) return;
167
168 // Use count for coupled nodes is summed up on their control.
169 if (GetPlacement(node) == kCoupled) {
170 Node* control = NodeProperties::GetControlInput(node);
171 return IncrementUnscheduledUseCount(control, index, from);
172 }
173
174 ++(GetData(node)->unscheduled_count_);
175 if (FLAG_trace_turbo_scheduler) {
176 Trace(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
177 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
178 GetData(node)->unscheduled_count_);
179 }
180}
181
182
183void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
184 Node* from) {
185 // Make sure that control edges from coupled nodes are not counted.
186 if (IsCoupledControlEdge(from, index)) return;
187
188 // Tracking use counts for fixed nodes is useless.
189 if (GetPlacement(node) == kFixed) return;
190
191 // Use count for coupled nodes is summed up on their control.
192 if (GetPlacement(node) == kCoupled) {
193 Node* control = NodeProperties::GetControlInput(node);
194 return DecrementUnscheduledUseCount(control, index, from);
195 }
196
197 DCHECK(GetData(node)->unscheduled_count_ > 0);
198 --(GetData(node)->unscheduled_count_);
199 if (FLAG_trace_turbo_scheduler) {
200 Trace(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
201 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
202 GetData(node)->unscheduled_count_);
203 }
204 if (GetData(node)->unscheduled_count_ == 0) {
205 Trace(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
206 schedule_queue_.push(node);
207 }
208}
209
210
211BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
212 while (b1 != b2) {
213 int32_t b1_depth = b1->dominator_depth();
214 int32_t b2_depth = b2->dominator_depth();
215 if (b1_depth < b2_depth) {
216 b2 = b2->dominator();
217 } else {
218 b1 = b1->dominator();
219 }
220 }
221 return b1;
222}
223
224
225// -----------------------------------------------------------------------------
226// Phase 1: Build control-flow graph.
227
228
229// Internal class to build a control flow graph (i.e the basic blocks and edges
230// between them within a Schedule) from the node graph. Visits control edges of
231// the graph backwards from an end node in order to find the connected control
232// subgraph, needed for scheduling.
233class CFGBuilder : public ZoneObject {
234 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000235 CFGBuilder(Zone* zone, Scheduler* scheduler)
236 : scheduler_(scheduler),
237 schedule_(scheduler->schedule_),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400238 queued_(scheduler->graph_, 2),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000239 queue_(zone),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400240 control_(zone),
241 component_entry_(NULL),
242 component_start_(NULL),
243 component_end_(NULL) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000244
245 // Run the control flow graph construction algorithm by walking the graph
246 // backwards from end through control edges, building and connecting the
247 // basic blocks for control nodes.
248 void Run() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400249 ResetDataStructures();
250 Queue(scheduler_->graph_->end());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000251
252 while (!queue_.empty()) { // Breadth-first backwards traversal.
253 Node* node = queue_.front();
254 queue_.pop();
255 int max = NodeProperties::PastControlIndex(node);
256 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
257 Queue(node->InputAt(i));
258 }
259 }
260
261 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
262 ConnectBlocks(*i); // Connect block to its predecessor/successors.
263 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000264 }
265
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400266 // Run the control flow graph construction for a minimal control-connected
267 // component ending in {exit} and merge that component into an existing
268 // control flow graph at the bottom of {block}.
269 void Run(BasicBlock* block, Node* exit) {
270 ResetDataStructures();
271 Queue(exit);
272
273 component_entry_ = NULL;
274 component_start_ = block;
275 component_end_ = schedule_->block(exit);
276 scheduler_->equivalence_->Run(exit);
277 while (!queue_.empty()) { // Breadth-first backwards traversal.
278 Node* node = queue_.front();
279 queue_.pop();
280
281 // Use control dependence equivalence to find a canonical single-entry
282 // single-exit region that makes up a minimal component to be scheduled.
283 if (IsSingleEntrySingleExitRegion(node, exit)) {
284 Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
285 DCHECK_EQ(NULL, component_entry_);
286 component_entry_ = node;
287 continue;
288 }
289
290 int max = NodeProperties::PastControlIndex(node);
291 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
292 Queue(node->InputAt(i));
293 }
294 }
295 DCHECK_NE(NULL, component_entry_);
296
297 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
298 ConnectBlocks(*i); // Connect block to its predecessor/successors.
299 }
300 }
301
302 private:
303 // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
304 friend class Scheduler;
305
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000306 void FixNode(BasicBlock* block, Node* node) {
307 schedule_->AddNode(block, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400308 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000309 }
310
311 void Queue(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400312 // Mark the connected control nodes as they are queued.
313 if (!queued_.Get(node)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000314 BuildBlocks(node);
315 queue_.push(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400316 queued_.Set(node, true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000317 control_.push_back(node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000318 }
319 }
320
321 void BuildBlocks(Node* node) {
322 switch (node->opcode()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400323 case IrOpcode::kEnd:
324 FixNode(schedule_->end(), node);
325 break;
326 case IrOpcode::kStart:
327 FixNode(schedule_->start(), node);
328 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000329 case IrOpcode::kLoop:
330 case IrOpcode::kMerge:
331 BuildBlockForNode(node);
332 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400333 case IrOpcode::kTerminate: {
334 // Put Terminate in the loop to which it refers.
335 Node* loop = NodeProperties::GetControlInput(node);
336 BasicBlock* block = BuildBlockForNode(loop);
337 FixNode(block, node);
338 break;
339 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000340 case IrOpcode::kBranch:
341 BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
342 break;
343 default:
344 break;
345 }
346 }
347
348 void ConnectBlocks(Node* node) {
349 switch (node->opcode()) {
350 case IrOpcode::kLoop:
351 case IrOpcode::kMerge:
352 ConnectMerge(node);
353 break;
354 case IrOpcode::kBranch:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400355 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000356 ConnectBranch(node);
357 break;
358 case IrOpcode::kReturn:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400359 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000360 ConnectReturn(node);
361 break;
362 default:
363 break;
364 }
365 }
366
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400367 BasicBlock* BuildBlockForNode(Node* node) {
368 BasicBlock* block = schedule_->block(node);
369 if (block == NULL) {
370 block = schedule_->NewBasicBlock();
371 Trace("Create block B%d for #%d:%s\n", block->id().ToInt(), node->id(),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000372 node->op()->mnemonic());
373 FixNode(block, node);
374 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400375 return block;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000376 }
377
378 void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
379 IrOpcode::Value b) {
380 Node* successors[2];
381 CollectSuccessorProjections(node, successors, a, b);
382 BuildBlockForNode(successors[0]);
383 BuildBlockForNode(successors[1]);
384 }
385
386 // Collect the branch-related projections from a node, such as IfTrue,
387 // IfFalse.
388 // TODO(titzer): consider moving this to node.h
389 void CollectSuccessorProjections(Node* node, Node** buffer,
390 IrOpcode::Value true_opcode,
391 IrOpcode::Value false_opcode) {
392 buffer[0] = NULL;
393 buffer[1] = NULL;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400394 for (Node* use : node->uses()) {
395 if (use->opcode() == true_opcode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 DCHECK_EQ(NULL, buffer[0]);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400397 buffer[0] = use;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000398 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400399 if (use->opcode() == false_opcode) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000400 DCHECK_EQ(NULL, buffer[1]);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400401 buffer[1] = use;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000402 }
403 }
404 DCHECK_NE(NULL, buffer[0]);
405 DCHECK_NE(NULL, buffer[1]);
406 }
407
408 void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
409 IrOpcode::Value true_opcode,
410 IrOpcode::Value false_opcode) {
411 Node* successors[2];
412 CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
413 buffer[0] = schedule_->block(successors[0]);
414 buffer[1] = schedule_->block(successors[1]);
415 }
416
417 void ConnectBranch(Node* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000418 BasicBlock* successor_blocks[2];
419 CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
420 IrOpcode::kIfFalse);
421
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400422 // Consider branch hints.
423 switch (BranchHintOf(branch->op())) {
424 case BranchHint::kNone:
425 break;
426 case BranchHint::kTrue:
427 successor_blocks[1]->set_deferred(true);
428 break;
429 case BranchHint::kFalse:
430 successor_blocks[0]->set_deferred(true);
431 break;
432 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000433
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400434 if (branch == component_entry_) {
435 TraceConnect(branch, component_start_, successor_blocks[0]);
436 TraceConnect(branch, component_start_, successor_blocks[1]);
437 schedule_->InsertBranch(component_start_, component_end_, branch,
438 successor_blocks[0], successor_blocks[1]);
439 } else {
440 Node* branch_block_node = NodeProperties::GetControlInput(branch);
441 BasicBlock* branch_block = schedule_->block(branch_block_node);
442 DCHECK(branch_block != NULL);
443
444 TraceConnect(branch, branch_block, successor_blocks[0]);
445 TraceConnect(branch, branch_block, successor_blocks[1]);
446 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
447 successor_blocks[1]);
448 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000449 }
450
451 void ConnectMerge(Node* merge) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400452 // Don't connect the special merge at the end to its predecessors.
453 if (IsFinalMerge(merge)) return;
454
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000455 BasicBlock* block = schedule_->block(merge);
456 DCHECK(block != NULL);
457 // For all of the merge's control inputs, add a goto at the end to the
458 // merge's basic block.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400459 for (Node* const input : merge->inputs()) {
460 BasicBlock* predecessor_block = schedule_->block(input);
461 TraceConnect(merge, predecessor_block, block);
462 schedule_->AddGoto(predecessor_block, block);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000463 }
464 }
465
466 void ConnectReturn(Node* ret) {
467 Node* return_block_node = NodeProperties::GetControlInput(ret);
468 BasicBlock* return_block = schedule_->block(return_block_node);
469 TraceConnect(ret, return_block, NULL);
470 schedule_->AddReturn(return_block, ret);
471 }
472
473 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
474 DCHECK_NE(NULL, block);
475 if (succ == NULL) {
476 Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400477 block->id().ToInt());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000478 } else {
479 Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400480 block->id().ToInt(), succ->id().ToInt());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000481 }
482 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400483
484 bool IsFinalMerge(Node* node) {
485 return (node->opcode() == IrOpcode::kMerge &&
486 node == scheduler_->graph_->end()->InputAt(0));
487 }
488
489 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
490 size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
491 size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
492 return entry != exit && entry_class == exit_class;
493 }
494
495 void ResetDataStructures() {
496 control_.clear();
497 DCHECK(queue_.empty());
498 DCHECK(control_.empty());
499 }
500
501 Scheduler* scheduler_;
502 Schedule* schedule_;
503 NodeMarker<bool> queued_; // Mark indicating whether node is queued.
504 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal.
505 NodeVector control_; // List of encountered control nodes.
506 Node* component_entry_; // Component single-entry node.
507 BasicBlock* component_start_; // Component single-entry block.
508 BasicBlock* component_end_; // Component single-exit block.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000509};
510
511
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000512void Scheduler::BuildCFG() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400513 Trace("--- CREATING CFG -------------------------------------------\n");
514
515 // Instantiate a new control equivalence algorithm for the graph.
516 equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
517
518 // Build a control-flow graph for the main control-connected component that
519 // is being spanned by the graph's start and end nodes.
520 control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
521 control_flow_builder_->Run();
522
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000523 // Initialize per-block data.
524 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
525}
526
527
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400528// -----------------------------------------------------------------------------
529// Phase 2: Compute special RPO and dominator tree.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000530
531
532// Compute the special reverse-post-order block ordering, which is essentially
533// a RPO of the graph where loop bodies are contiguous. Properties:
534// 1. If block A is a predecessor of B, then A appears before B in the order,
535// unless B is a loop header and A is in the loop headed at B
536// (i.e. A -> B is a backedge).
537// => If block A dominates block B, then A appears before B in the order.
538// => If block A is a loop header, A appears before all blocks in the loop
539// headed at A.
540// 2. All loops are contiguous in the order (i.e. no intervening blocks that
541// do not belong to the loop.)
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400542// Note a simple RPO traversal satisfies (1) but not (2).
543class SpecialRPONumberer : public ZoneObject {
544 public:
545 SpecialRPONumberer(Zone* zone, Schedule* schedule)
546 : zone_(zone),
547 schedule_(schedule),
548 order_(NULL),
549 beyond_end_(NULL),
550 loops_(zone),
551 backedges_(zone),
552 stack_(zone),
553 previous_block_count_(0) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000554
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400555 // Computes the special reverse-post-order for the main control flow graph,
556 // that is for the graph spanned between the schedule's start and end blocks.
557 void ComputeSpecialRPO() {
558 DCHECK(schedule_->end()->SuccessorCount() == 0);
559 DCHECK_EQ(NULL, order_); // Main order does not exist yet.
560 ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000561 }
562
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400563 // Computes the special reverse-post-order for a partial control flow graph,
564 // that is for the graph spanned between the given {entry} and {end} blocks,
565 // then updates the existing ordering with this new information.
566 void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
567 DCHECK_NE(NULL, order_); // Main order to be updated is present.
568 ComputeAndInsertSpecialRPO(entry, end);
569 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000570
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400571 // Serialize the previously computed order as a special reverse-post-order
572 // numbering for basic blocks into the final schedule.
573 void SerializeRPOIntoSchedule() {
574 int32_t number = 0;
575 for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
576 b->set_rpo_number(number++);
577 schedule_->rpo_order()->push_back(b);
578 }
579 BeyondEndSentinel()->set_rpo_number(number);
580 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000581
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400582 // Print and verify the special reverse-post-order.
583 void PrintAndVerifySpecialRPO() {
584#if DEBUG
585 if (FLAG_trace_turbo_scheduler) PrintRPO();
586 VerifySpecialRPO();
587#endif
588 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000589
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400590 private:
591 typedef std::pair<BasicBlock*, size_t> Backedge;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000592
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400593 // Numbering for BasicBlock::rpo_number for this block traversal:
594 static const int kBlockOnStack = -2;
595 static const int kBlockVisited1 = -3;
596 static const int kBlockVisited2 = -4;
597 static const int kBlockUnvisited1 = -1;
598 static const int kBlockUnvisited2 = kBlockVisited1;
599
600 struct SpecialRPOStackFrame {
601 BasicBlock* block;
602 size_t index;
603 };
604
605 struct LoopInfo {
606 BasicBlock* header;
607 ZoneList<BasicBlock*>* outgoing;
608 BitVector* members;
609 LoopInfo* prev;
610 BasicBlock* end;
611 BasicBlock* start;
612
613 void AddOutgoing(Zone* zone, BasicBlock* block) {
614 if (outgoing == NULL) {
615 outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000616 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400617 outgoing->Add(block, zone);
618 }
619 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000620
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400621 int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
622 BasicBlock* child, int unvisited) {
623 if (child->rpo_number() == unvisited) {
624 stack[depth].block = child;
625 stack[depth].index = 0;
626 child->set_rpo_number(kBlockOnStack);
627 return depth + 1;
628 }
629 return depth;
630 }
631
632 BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
633 block->set_rpo_next(head);
634 return block;
635 }
636
637 static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
638 static void SetLoopNumber(BasicBlock* block, int loop_number) {
639 return block->set_loop_number(loop_number);
640 }
641 static bool HasLoopNumber(BasicBlock* block) {
642 return block->loop_number() >= 0;
643 }
644
645 // TODO(mstarzinger): We only need this special sentinel because some tests
646 // use the schedule's end block in actual control flow (e.g. with end having
647 // successors). Once this has been cleaned up we can use the end block here.
648 BasicBlock* BeyondEndSentinel() {
649 if (beyond_end_ == NULL) {
650 BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
651 beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
652 }
653 return beyond_end_;
654 }
655
656 // Compute special RPO for the control flow graph between {entry} and {end},
657 // mutating any existing order so that the result is still valid.
658 void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
659 // RPO should not have been serialized for this schedule yet.
660 CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
661 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
662 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
663
664 // Find correct insertion point within existing order.
665 BasicBlock* insertion_point = entry->rpo_next();
666 BasicBlock* order = insertion_point;
667
668 // Perform an iterative RPO traversal using an explicit stack,
669 // recording backedges that form cycles. O(|B|).
670 DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
671 stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
672 previous_block_count_ = schedule_->BasicBlockCount();
673 int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
674 int num_loops = static_cast<int>(loops_.size());
675
676 while (stack_depth > 0) {
677 int current = stack_depth - 1;
678 SpecialRPOStackFrame* frame = &stack_[current];
679
680 if (frame->block != end &&
681 frame->index < frame->block->SuccessorCount()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000682 // Process the next successor.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400683 BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
684 if (succ->rpo_number() == kBlockVisited1) continue;
685 if (succ->rpo_number() == kBlockOnStack) {
686 // The successor is on the stack, so this is a backedge (cycle).
687 backedges_.push_back(Backedge(frame->block, frame->index - 1));
688 if (!HasLoopNumber(succ)) {
689 // Assign a new loop number to the header if it doesn't have one.
690 SetLoopNumber(succ, num_loops++);
691 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000692 } else {
693 // Push the successor onto the stack.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400694 DCHECK(succ->rpo_number() == kBlockUnvisited1);
695 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000696 }
697 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400698 // Finished with all successors; pop the stack and add the block.
699 order = PushFront(order, frame->block);
700 frame->block->set_rpo_number(kBlockVisited1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000701 stack_depth--;
702 }
703 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000704
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400705 // If no loops were encountered, then the order we computed was correct.
706 if (num_loops > static_cast<int>(loops_.size())) {
707 // Otherwise, compute the loop information from the backedges in order
708 // to perform a traversal that groups loop bodies together.
709 ComputeLoopInfo(stack_, num_loops, &backedges_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000710
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400711 // Initialize the "loop stack". Note the entry could be a loop header.
712 LoopInfo* loop =
713 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
714 order = insertion_point;
715
716 // Perform an iterative post-order traversal, visiting loop bodies before
717 // edges that lead out of loops. Visits each block once, but linking loop
718 // sections together is linear in the loop size, so overall is
719 // O(|B| + max(loop_depth) * max(|loop|))
720 stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
721 while (stack_depth > 0) {
722 SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
723 BasicBlock* block = frame->block;
724 BasicBlock* succ = NULL;
725
726 if (block != end && frame->index < block->SuccessorCount()) {
727 // Process the next normal successor.
728 succ = block->SuccessorAt(frame->index++);
729 } else if (HasLoopNumber(block)) {
730 // Process additional outgoing edges from the loop header.
731 if (block->rpo_number() == kBlockOnStack) {
732 // Finish the loop body the first time the header is left on the
733 // stack.
734 DCHECK(loop != NULL && loop->header == block);
735 loop->start = PushFront(order, block);
736 order = loop->end;
737 block->set_rpo_number(kBlockVisited2);
738 // Pop the loop stack and continue visiting outgoing edges within
739 // the context of the outer loop, if any.
740 loop = loop->prev;
741 // We leave the loop header on the stack; the rest of this iteration
742 // and later iterations will go through its outgoing edges list.
743 }
744
745 // Use the next outgoing edge if there are any.
746 int outgoing_index =
747 static_cast<int>(frame->index - block->SuccessorCount());
748 LoopInfo* info = &loops_[GetLoopNumber(block)];
749 DCHECK(loop != info);
750 if (block != entry && info->outgoing != NULL &&
751 outgoing_index < info->outgoing->length()) {
752 succ = info->outgoing->at(outgoing_index);
753 frame->index++;
754 }
755 }
756
757 if (succ != NULL) {
758 // Process the next successor.
759 if (succ->rpo_number() == kBlockOnStack) continue;
760 if (succ->rpo_number() == kBlockVisited2) continue;
761 DCHECK(succ->rpo_number() == kBlockUnvisited2);
762 if (loop != NULL && !loop->members->Contains(succ->id().ToInt())) {
763 // The successor is not in the current loop or any nested loop.
764 // Add it to the outgoing edges of this loop and visit it later.
765 loop->AddOutgoing(zone_, succ);
766 } else {
767 // Push the successor onto the stack.
768 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
769 if (HasLoopNumber(succ)) {
770 // Push the inner loop onto the loop stack.
771 DCHECK(GetLoopNumber(succ) < num_loops);
772 LoopInfo* next = &loops_[GetLoopNumber(succ)];
773 next->end = order;
774 next->prev = loop;
775 loop = next;
776 }
777 }
778 } else {
779 // Finished with all successors of the current block.
780 if (HasLoopNumber(block)) {
781 // If we are going to pop a loop header, then add its entire body.
782 LoopInfo* info = &loops_[GetLoopNumber(block)];
783 for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
784 if (b->rpo_next() == info->end) {
785 b->set_rpo_next(order);
786 info->end = order;
787 break;
788 }
789 }
790 order = info->start;
791 } else {
792 // Pop a single node off the stack and add it to the order.
793 order = PushFront(order, block);
794 block->set_rpo_number(kBlockVisited2);
795 }
796 stack_depth--;
797 }
798 }
799 }
800
801 // Publish new order the first time.
802 if (order_ == NULL) order_ = order;
803
804 // Compute the correct loop headers and set the correct loop ends.
805 LoopInfo* current_loop = NULL;
806 BasicBlock* current_header = entry->loop_header();
807 int32_t loop_depth = entry->loop_depth();
808 if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
809 for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
810 BasicBlock* current = b;
811
812 // Reset BasicBlock::rpo_number again.
813 current->set_rpo_number(kBlockUnvisited1);
814
815 // Finish the previous loop(s) if we just exited them.
816 while (current_header != NULL && current == current_header->loop_end()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000817 DCHECK(current_header->IsLoopHeader());
818 DCHECK(current_loop != NULL);
819 current_loop = current_loop->prev;
820 current_header = current_loop == NULL ? NULL : current_loop->header;
821 --loop_depth;
822 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400823 current->set_loop_header(current_header);
824
825 // Push a new loop onto the stack if this loop is a loop header.
826 if (HasLoopNumber(current)) {
827 ++loop_depth;
828 current_loop = &loops_[GetLoopNumber(current)];
829 BasicBlock* end = current_loop->end;
830 current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
831 current_header = current_loop->header;
832 Trace("B%d is a loop header, increment loop depth to %d\n",
833 current->id().ToInt(), loop_depth);
834 }
835
836 current->set_loop_depth(loop_depth);
837
838 if (current->loop_header() == NULL) {
839 Trace("B%d is not in a loop (depth == %d)\n", current->id().ToInt(),
840 current->loop_depth());
841 } else {
842 Trace("B%d has loop header B%d, (depth == %d)\n", current->id().ToInt(),
843 current->loop_header()->id().ToInt(), current->loop_depth());
844 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000845 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400846 }
847
848 // Computes loop membership from the backedges of the control flow graph.
849 void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
850 size_t num_loops, ZoneVector<Backedge>* backedges) {
851 // Extend existing loop membership vectors.
852 for (LoopInfo& loop : loops_) {
853 BitVector* new_members = new (zone_)
854 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
855 new_members->CopyFrom(*loop.members);
856 loop.members = new_members;
857 }
858
859 // Extend loop information vector.
860 loops_.resize(num_loops, LoopInfo());
861
862 // Compute loop membership starting from backedges.
863 // O(max(loop_depth) * max(|loop|)
864 for (size_t i = 0; i < backedges->size(); i++) {
865 BasicBlock* member = backedges->at(i).first;
866 BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
867 size_t loop_num = GetLoopNumber(header);
868 if (loops_[loop_num].header == NULL) {
869 loops_[loop_num].header = header;
870 loops_[loop_num].members = new (zone_)
871 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
872 }
873
874 int queue_length = 0;
875 if (member != header) {
876 // As long as the header doesn't have a backedge to itself,
877 // Push the member onto the queue and process its predecessors.
878 if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
879 loops_[loop_num].members->Add(member->id().ToInt());
880 }
881 queue[queue_length++].block = member;
882 }
883
884 // Propagate loop membership backwards. All predecessors of M up to the
885 // loop header H are members of the loop too. O(|blocks between M and H|).
886 while (queue_length > 0) {
887 BasicBlock* block = queue[--queue_length].block;
888 for (size_t i = 0; i < block->PredecessorCount(); i++) {
889 BasicBlock* pred = block->PredecessorAt(i);
890 if (pred != header) {
891 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
892 loops_[loop_num].members->Add(pred->id().ToInt());
893 queue[queue_length++].block = pred;
894 }
895 }
896 }
897 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000898 }
899 }
900
901#if DEBUG
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400902 void PrintRPO() {
903 OFStream os(stdout);
904 os << "RPO with " << loops_.size() << " loops";
905 if (loops_.size() > 0) {
906 os << " (";
907 for (size_t i = 0; i < loops_.size(); i++) {
908 if (i > 0) os << " ";
909 os << "B" << loops_[i].header->id();
910 }
911 os << ")";
912 }
913 os << ":\n";
914
915 for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
916 BasicBlock::Id bid = block->id();
917 // TODO(jarin,svenpanne): Add formatting here once we have support for
918 // that in streams (we want an equivalent of PrintF("%5d:", x) here).
919 os << " " << block->rpo_number() << ":";
920 for (size_t i = 0; i < loops_.size(); i++) {
921 bool range = loops_[i].header->LoopContains(block);
922 bool membership = loops_[i].header != block && range;
923 os << (membership ? " |" : " ");
924 os << (range ? "x" : " ");
925 }
926 os << " B" << bid << ": ";
927 if (block->loop_end() != NULL) {
928 os << " range: [" << block->rpo_number() << ", "
929 << block->loop_end()->rpo_number() << ")";
930 }
931 if (block->loop_header() != NULL) {
932 os << " header: B" << block->loop_header()->id();
933 }
934 if (block->loop_depth() > 0) {
935 os << " depth: " << block->loop_depth();
936 }
937 os << "\n";
938 }
939 }
940
941 void VerifySpecialRPO() {
942 BasicBlockVector* order = schedule_->rpo_order();
943 DCHECK(order->size() > 0);
944 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
945
946 for (size_t i = 0; i < loops_.size(); i++) {
947 LoopInfo* loop = &loops_[i];
948 BasicBlock* header = loop->header;
949 BasicBlock* end = header->loop_end();
950
951 DCHECK(header != NULL);
952 DCHECK(header->rpo_number() >= 0);
953 DCHECK(header->rpo_number() < static_cast<int>(order->size()));
954 DCHECK(end != NULL);
955 DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
956 DCHECK(end->rpo_number() > header->rpo_number());
957 DCHECK(header->loop_header() != header);
958
959 // Verify the start ... end list relationship.
960 int links = 0;
961 BasicBlock* block = loop->start;
962 DCHECK_EQ(header, block);
963 bool end_found;
964 while (true) {
965 if (block == NULL || block == loop->end) {
966 end_found = (loop->end == block);
967 break;
968 }
969 // The list should be in same order as the final result.
970 DCHECK(block->rpo_number() == links + header->rpo_number());
971 links++;
972 block = block->rpo_next();
973 DCHECK(links < static_cast<int>(2 * order->size())); // cycle?
974 }
975 DCHECK(links > 0);
976 DCHECK(links == end->rpo_number() - header->rpo_number());
977 DCHECK(end_found);
978
979 // Check loop depth of the header.
980 int loop_depth = 0;
981 for (LoopInfo* outer = loop; outer != NULL; outer = outer->prev) {
982 loop_depth++;
983 }
984 DCHECK_EQ(loop_depth, header->loop_depth());
985
986 // Check the contiguousness of loops.
987 int count = 0;
988 for (int j = 0; j < static_cast<int>(order->size()); j++) {
989 BasicBlock* block = order->at(j);
990 DCHECK(block->rpo_number() == j);
991 if (j < header->rpo_number() || j >= end->rpo_number()) {
992 DCHECK(!header->LoopContains(block));
993 } else {
994 DCHECK(header->LoopContains(block));
995 DCHECK_GE(block->loop_depth(), loop_depth);
996 count++;
997 }
998 }
999 DCHECK(links == count);
1000 }
1001 }
1002#endif // DEBUG
1003
1004 Zone* zone_;
1005 Schedule* schedule_;
1006 BasicBlock* order_;
1007 BasicBlock* beyond_end_;
1008 ZoneVector<LoopInfo> loops_;
1009 ZoneVector<Backedge> backedges_;
1010 ZoneVector<SpecialRPOStackFrame> stack_;
1011 size_t previous_block_count_;
1012};
1013
1014
1015BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1016 SpecialRPONumberer numberer(zone, schedule);
1017 numberer.ComputeSpecialRPO();
1018 numberer.SerializeRPOIntoSchedule();
1019 numberer.PrintAndVerifySpecialRPO();
1020 return schedule->rpo_order();
1021}
1022
1023
1024void Scheduler::ComputeSpecialRPONumbering() {
1025 Trace("--- COMPUTING SPECIAL RPO ----------------------------------\n");
1026
1027 // Compute the special reverse-post-order for basic blocks.
1028 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1029 special_rpo_->ComputeSpecialRPO();
1030}
1031
1032
1033void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
1034 for (/*nop*/; block != NULL; block = block->rpo_next()) {
1035 BasicBlock::Predecessors::iterator pred = block->predecessors_begin();
1036 BasicBlock::Predecessors::iterator end = block->predecessors_end();
1037 DCHECK(pred != end); // All blocks except start have predecessors.
1038 BasicBlock* dominator = *pred;
1039 // For multiple predecessors, walk up the dominator tree until a common
1040 // dominator is found. Visitation order guarantees that all predecessors
1041 // except for backwards edges have been visited.
1042 for (++pred; pred != end; ++pred) {
1043 // Don't examine backwards edges.
1044 if ((*pred)->dominator_depth() < 0) continue;
1045 dominator = GetCommonDominator(dominator, *pred);
1046 }
1047 block->set_dominator(dominator);
1048 block->set_dominator_depth(dominator->dominator_depth() + 1);
1049 // Propagate "deferredness" of the dominator.
1050 if (dominator->deferred()) block->set_deferred(true);
1051 Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
1052 dominator->id().ToInt(), block->dominator_depth());
1053 }
1054}
1055
1056
1057void Scheduler::GenerateImmediateDominatorTree() {
1058 Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
1059
1060 // Seed start block to be the first dominator.
1061 schedule_->start()->set_dominator_depth(0);
1062
1063 // Build the block dominator tree resulting from the above seed.
1064 PropagateImmediateDominators(schedule_->start()->rpo_next());
1065}
1066
1067
1068// -----------------------------------------------------------------------------
1069// Phase 3: Prepare use counts for nodes.
1070
1071
1072class PrepareUsesVisitor : public NullNodeVisitor {
1073 public:
1074 explicit PrepareUsesVisitor(Scheduler* scheduler)
1075 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1076
1077 void Pre(Node* node) {
1078 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1079 // Fixed nodes are always roots for schedule late.
1080 scheduler_->schedule_root_nodes_.push_back(node);
1081 if (!schedule_->IsScheduled(node)) {
1082 // Make sure root nodes are scheduled in their respective blocks.
1083 Trace("Scheduling fixed position node #%d:%s\n", node->id(),
1084 node->op()->mnemonic());
1085 IrOpcode::Value opcode = node->opcode();
1086 BasicBlock* block =
1087 opcode == IrOpcode::kParameter
1088 ? schedule_->start()
1089 : schedule_->block(NodeProperties::GetControlInput(node));
1090 DCHECK(block != NULL);
1091 schedule_->AddNode(block, node);
1092 }
1093 }
1094 }
1095
1096 void PostEdge(Node* from, int index, Node* to) {
1097 // If the edge is from an unscheduled node, then tally it in the use count
1098 // for all of its inputs. The same criterion will be used in ScheduleLate
1099 // for decrementing use counts.
1100 if (!schedule_->IsScheduled(from)) {
1101 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1102 scheduler_->IncrementUnscheduledUseCount(to, index, from);
1103 }
1104 }
1105
1106 private:
1107 Scheduler* scheduler_;
1108 Schedule* schedule_;
1109};
1110
1111
1112void Scheduler::PrepareUses() {
1113 Trace("--- PREPARE USES -------------------------------------------\n");
1114
1115 // Count the uses of every node, it will be used to ensure that all of a
1116 // node's uses are scheduled before the node itself.
1117 PrepareUsesVisitor prepare_uses(this);
1118 graph_->VisitNodeInputsFromEnd(&prepare_uses);
1119}
1120
1121
1122// -----------------------------------------------------------------------------
1123// Phase 4: Schedule nodes early.
1124
1125
1126class ScheduleEarlyNodeVisitor {
1127 public:
1128 ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1129 : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1130
1131 // Run the schedule early algorithm on a set of fixed root nodes.
1132 void Run(NodeVector* roots) {
1133 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
1134 queue_.push(*i);
1135 while (!queue_.empty()) {
1136 VisitNode(queue_.front());
1137 queue_.pop();
1138 }
1139 }
1140 }
1141
1142 private:
1143 // Visits one node from the queue and propagates its current schedule early
1144 // position to all uses. This in turn might push more nodes onto the queue.
1145 void VisitNode(Node* node) {
1146 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1147
1148 // Fixed nodes already know their schedule early position.
1149 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1150 data->minimum_block_ = schedule_->block(node);
1151 Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
1152 node->id(), node->op()->mnemonic(),
1153 data->minimum_block_->id().ToInt(),
1154 data->minimum_block_->dominator_depth());
1155 }
1156
1157 // No need to propagate unconstrained schedule early positions.
1158 if (data->minimum_block_ == schedule_->start()) return;
1159
1160 // Propagate schedule early position.
1161 DCHECK(data->minimum_block_ != NULL);
1162 Node::Uses uses = node->uses();
1163 for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
1164 PropagateMinimumPositionToNode(data->minimum_block_, *i);
1165 }
1166 }
1167
1168 // Propagates {block} as another minimum position into the given {node}. This
1169 // has the net effect of computing the minimum dominator block of {node} that
1170 // still post-dominates all inputs to {node} when the queue is processed.
1171 void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1172 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1173
1174 // No need to propagate to fixed node, it's guaranteed to be a root.
1175 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1176
1177 // Coupled nodes influence schedule early position of their control.
1178 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1179 Node* control = NodeProperties::GetControlInput(node);
1180 PropagateMinimumPositionToNode(block, control);
1181 }
1182
1183 // Propagate new position if it is deeper down the dominator tree than the
1184 // current. Note that all inputs need to have minimum block position inside
1185 // the dominator chain of {node}'s minimum block position.
1186 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1187 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1188 data->minimum_block_ = block;
1189 queue_.push(node);
1190 Trace("Propagating #%d:%s minimum_block = B%d, dominator_depth = %d\n",
1191 node->id(), node->op()->mnemonic(),
1192 data->minimum_block_->id().ToInt(),
1193 data->minimum_block_->dominator_depth());
1194 }
1195 }
1196
1197#if DEBUG
1198 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
1199 BasicBlock* dominator = scheduler_->GetCommonDominator(b1, b2);
1200 return dominator == b1 || dominator == b2;
1201 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001202#endif
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001203
1204 Scheduler* scheduler_;
1205 Schedule* schedule_;
1206 ZoneQueue<Node*> queue_;
1207};
1208
1209
1210void Scheduler::ScheduleEarly() {
1211 Trace("--- SCHEDULE EARLY -----------------------------------------\n");
1212 if (FLAG_trace_turbo_scheduler) {
1213 Trace("roots: ");
1214 for (Node* node : schedule_root_nodes_) {
1215 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1216 }
1217 Trace("\n");
1218 }
1219
1220 // Compute the minimum block for each node thereby determining the earliest
1221 // position each node could be placed within a valid schedule.
1222 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1223 schedule_early_visitor.Run(&schedule_root_nodes_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001224}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001225
1226
1227// -----------------------------------------------------------------------------
1228// Phase 5: Schedule nodes late.
1229
1230
1231class ScheduleLateNodeVisitor {
1232 public:
1233 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
1234 : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
1235
1236 // Run the schedule late algorithm on a set of fixed root nodes.
1237 void Run(NodeVector* roots) {
1238 for (NodeVectorIter i = roots->begin(); i != roots->end(); ++i) {
1239 ProcessQueue(*i);
1240 }
1241 }
1242
1243 private:
1244 void ProcessQueue(Node* root) {
1245 ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1246 for (Node* node : root->inputs()) {
1247 // Don't schedule coupled nodes on their own.
1248 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1249 node = NodeProperties::GetControlInput(node);
1250 }
1251
1252 // Test schedulability condition by looking at unscheduled use count.
1253 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1254
1255 queue->push(node);
1256 while (!queue->empty()) {
1257 VisitNode(queue->front());
1258 queue->pop();
1259 }
1260 }
1261 }
1262
1263 // Visits one node from the queue of schedulable nodes and determines its
1264 // schedule late position. Also hoists nodes out of loops to find a more
1265 // optimal scheduling position.
1266 void VisitNode(Node* node) {
1267 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1268
1269 // Don't schedule nodes that are already scheduled.
1270 if (schedule_->IsScheduled(node)) return;
1271 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1272
1273 // Determine the dominating block for all of the uses of this node. It is
1274 // the latest block that this node can be scheduled in.
1275 Trace("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
1276 BasicBlock* block = GetCommonDominatorOfUses(node);
1277 DCHECK_NOT_NULL(block);
1278
1279 // The schedule early block dominates the schedule late block.
1280 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
1281 DCHECK_EQ(min_block, scheduler_->GetCommonDominator(block, min_block));
1282 Trace("Schedule late of #%d:%s is B%d at loop depth %d, minimum = B%d\n",
1283 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1284 block->loop_depth(), min_block->id().ToInt());
1285
1286 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1287 // into enclosing loop pre-headers until they would preceed their schedule
1288 // early position.
1289 BasicBlock* hoist_block = GetPreHeader(block);
1290 while (hoist_block != NULL &&
1291 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1292 Trace(" hoisting #%d:%s to block B%d\n", node->id(),
1293 node->op()->mnemonic(), hoist_block->id().ToInt());
1294 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1295 block = hoist_block;
1296 hoist_block = GetPreHeader(hoist_block);
1297 }
1298
1299 // Schedule the node or a floating control structure.
1300 if (NodeProperties::IsControl(node)) {
1301 ScheduleFloatingControl(block, node);
1302 } else {
1303 ScheduleNode(block, node);
1304 }
1305 }
1306
1307 BasicBlock* GetPreHeader(BasicBlock* block) {
1308 if (block->IsLoopHeader()) {
1309 return block->dominator();
1310 } else if (block->loop_header() != NULL) {
1311 return block->loop_header()->dominator();
1312 } else {
1313 return NULL;
1314 }
1315 }
1316
1317 BasicBlock* GetCommonDominatorOfUses(Node* node) {
1318 BasicBlock* block = NULL;
1319 for (Edge edge : node->use_edges()) {
1320 BasicBlock* use_block = GetBlockForUse(edge);
1321 block = block == NULL ? use_block : use_block == NULL
1322 ? block
1323 : scheduler_->GetCommonDominator(
1324 block, use_block);
1325 }
1326 return block;
1327 }
1328
1329 BasicBlock* GetBlockForUse(Edge edge) {
1330 Node* use = edge.from();
1331 IrOpcode::Value opcode = use->opcode();
1332 if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
1333 // If the use is from a coupled (i.e. floating) phi, compute the common
1334 // dominator of its uses. This will not recurse more than one level.
1335 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
1336 Trace(" inspecting uses of coupled #%d:%s\n", use->id(),
1337 use->op()->mnemonic());
1338 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1339 return GetCommonDominatorOfUses(use);
1340 }
1341 // If the use is from a fixed (i.e. non-floating) phi, use the block
1342 // of the corresponding control input to the merge.
1343 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1344 Trace(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
1345 use->op()->mnemonic());
1346 Node* merge = NodeProperties::GetControlInput(use, 0);
1347 opcode = merge->opcode();
1348 DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
1349 use = NodeProperties::GetControlInput(merge, edge.index());
1350 }
1351 }
1352 BasicBlock* result = schedule_->block(use);
1353 if (result == NULL) return NULL;
1354 Trace(" must dominate use #%d:%s in B%d\n", use->id(),
1355 use->op()->mnemonic(), result->id().ToInt());
1356 return result;
1357 }
1358
1359 void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1360 scheduler_->FuseFloatingControl(block, node);
1361 }
1362
1363 void ScheduleNode(BasicBlock* block, Node* node) {
1364 schedule_->PlanNode(block, node);
1365 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
1366 scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1367 }
1368
1369 Scheduler* scheduler_;
1370 Schedule* schedule_;
1371};
1372
1373
1374void Scheduler::ScheduleLate() {
1375 Trace("--- SCHEDULE LATE ------------------------------------------\n");
1376 if (FLAG_trace_turbo_scheduler) {
1377 Trace("roots: ");
1378 for (Node* node : schedule_root_nodes_) {
1379 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1380 }
1381 Trace("\n");
1382 }
1383
1384 // Schedule: Places nodes in dominator block of all their uses.
1385 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1386 schedule_late_visitor.Run(&schedule_root_nodes_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001387}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001388
1389
1390// -----------------------------------------------------------------------------
1391// Phase 6: Seal the final schedule.
1392
1393
1394void Scheduler::SealFinalSchedule() {
1395 Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
1396
1397 // Serialize the assembly order and reverse-post-order numbering.
1398 special_rpo_->SerializeRPOIntoSchedule();
1399 special_rpo_->PrintAndVerifySpecialRPO();
1400
1401 // Add collected nodes for basic blocks to their blocks in the right order.
1402 int block_num = 0;
1403 for (NodeVector& nodes : scheduled_nodes_) {
1404 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1405 BasicBlock* block = schedule_->GetBlockById(id);
1406 for (NodeVectorRIter i = nodes.rbegin(); i != nodes.rend(); ++i) {
1407 schedule_->AddNode(block, *i);
1408 }
1409 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001410}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001411
1412
1413// -----------------------------------------------------------------------------
1414
1415
1416void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
1417 Trace("--- FUSE FLOATING CONTROL ----------------------------------\n");
1418 if (FLAG_trace_turbo_scheduler) {
1419 OFStream os(stdout);
1420 os << "Schedule before control flow fusion:\n" << *schedule_;
1421 }
1422
1423 // Iterate on phase 1: Build control-flow graph.
1424 control_flow_builder_->Run(block, node);
1425
1426 // Iterate on phase 2: Compute special RPO and dominator tree.
1427 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1428 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
1429 for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
1430 b->set_dominator_depth(-1);
1431 b->set_dominator(NULL);
1432 }
1433 PropagateImmediateDominators(block->rpo_next());
1434
1435 // Iterate on phase 4: Schedule nodes early.
1436 // TODO(mstarzinger): The following loop gathering the propagation roots is a
1437 // temporary solution and should be merged into the rest of the scheduler as
1438 // soon as the approach settled for all floating loops.
1439 NodeVector propagation_roots(control_flow_builder_->control_);
1440 for (Node* node : control_flow_builder_->control_) {
1441 for (Node* use : node->uses()) {
1442 if (use->opcode() == IrOpcode::kPhi ||
1443 use->opcode() == IrOpcode::kEffectPhi) {
1444 propagation_roots.push_back(use);
1445 }
1446 }
1447 }
1448 if (FLAG_trace_turbo_scheduler) {
1449 Trace("propagation roots: ");
1450 for (Node* node : propagation_roots) {
1451 Trace("#%d:%s ", node->id(), node->op()->mnemonic());
1452 }
1453 Trace("\n");
1454 }
1455 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1456 schedule_early_visitor.Run(&propagation_roots);
1457
1458 // Move previously planned nodes.
1459 // TODO(mstarzinger): Improve that by supporting bulk moves.
1460 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
1461 MovePlannedNodes(block, schedule_->block(node));
1462
1463 if (FLAG_trace_turbo_scheduler) {
1464 OFStream os(stdout);
1465 os << "Schedule after control flow fusion:\n" << *schedule_;
1466 }
1467}
1468
1469
1470void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
1471 Trace("Move planned nodes from B%d to B%d\n", from->id().ToInt(),
1472 to->id().ToInt());
1473 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
1474 for (NodeVectorIter i = nodes->begin(); i != nodes->end(); ++i) {
1475 schedule_->SetBlockForNode(to, *i);
1476 scheduled_nodes_[to->id().ToSize()].push_back(*i);
1477 }
1478 nodes->clear();
1479}
1480
1481} // namespace compiler
1482} // namespace internal
1483} // namespace v8