blob: 80ce8b17112da8404f14fd5ed5db0a2c3f49eacf [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
Ben Murdochb8a8cc12014-11-26 15:28:44 +00005#include "src/compiler/scheduler.h"
6
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00007#include <iomanip>
8
9#include "src/base/adapters.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040010#include "src/bit-vector.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000011#include "src/compiler/common-operator.h"
Emily Bernierd0a1eb72015-03-24 16:35:39 -040012#include "src/compiler/control-equivalence.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000013#include "src/compiler/graph.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000014#include "src/compiler/node.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000015#include "src/compiler/node-marker.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000016#include "src/compiler/node-properties.h"
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000017#include "src/zone-containers.h"
Ben Murdochb8a8cc12014-11-26 15:28:44 +000018
19namespace v8 {
20namespace internal {
21namespace compiler {
22
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000023#define TRACE(...) \
24 do { \
25 if (FLAG_trace_turbo_scheduler) PrintF(__VA_ARGS__); \
26 } while (false)
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000028Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule, Flags flags)
Emily Bernierd0a1eb72015-03-24 16:35:39 -040029 : zone_(zone),
30 graph_(graph),
31 schedule_(schedule),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000032 flags_(flags),
Emily Bernierd0a1eb72015-03-24 16:35:39 -040033 scheduled_nodes_(zone),
34 schedule_root_nodes_(zone),
35 schedule_queue_(zone),
36 node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037
Emily Bernierd0a1eb72015-03-24 16:35:39 -040038
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000039Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph, Flags flags) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040040 Schedule* schedule = new (graph->zone())
41 Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000042 Scheduler scheduler(zone, graph, schedule, flags);
Emily Bernierd0a1eb72015-03-24 16:35:39 -040043
44 scheduler.BuildCFG();
45 scheduler.ComputeSpecialRPONumbering();
46 scheduler.GenerateImmediateDominatorTree();
47
48 scheduler.PrepareUses();
49 scheduler.ScheduleEarly();
50 scheduler.ScheduleLate();
51
52 scheduler.SealFinalSchedule();
53
54 return schedule;
55}
56
57
58Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
59 SchedulerData def = {schedule_->start(), 0, kUnknown};
60 return def;
61}
62
63
64Scheduler::SchedulerData* Scheduler::GetData(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -040065 return &node_data_[node->id()];
66}
67
68
69Scheduler::Placement Scheduler::GetPlacement(Node* node) {
70 SchedulerData* data = GetData(node);
71 if (data->placement_ == kUnknown) { // Compute placement, once, on demand.
72 switch (node->opcode()) {
73 case IrOpcode::kParameter:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 case IrOpcode::kOsrValue:
75 // Parameters and OSR values are always fixed to the start block.
Emily Bernierd0a1eb72015-03-24 16:35:39 -040076 data->placement_ = kFixed;
77 break;
78 case IrOpcode::kPhi:
79 case IrOpcode::kEffectPhi: {
80 // Phis and effect phis are fixed if their control inputs are, whereas
81 // otherwise they are coupled to a floating control node.
82 Placement p = GetPlacement(NodeProperties::GetControlInput(node));
83 data->placement_ = (p == kFixed ? kFixed : kCoupled);
84 break;
85 }
86#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
87 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
88#undef DEFINE_CONTROL_CASE
89 {
90 // Control nodes that were not control-reachable from end may float.
91 data->placement_ = kSchedulable;
92 break;
93 }
94 default:
95 data->placement_ = kSchedulable;
96 break;
97 }
98 }
99 return data->placement_;
100}
101
102
103void Scheduler::UpdatePlacement(Node* node, Placement placement) {
104 SchedulerData* data = GetData(node);
105 if (data->placement_ != kUnknown) { // Trap on mutation, not initialization.
106 switch (node->opcode()) {
107 case IrOpcode::kParameter:
108 // Parameters are fixed once and for all.
109 UNREACHABLE();
110 break;
111 case IrOpcode::kPhi:
112 case IrOpcode::kEffectPhi: {
113 // Phis and effect phis are coupled to their respective blocks.
114 DCHECK_EQ(Scheduler::kCoupled, data->placement_);
115 DCHECK_EQ(Scheduler::kFixed, placement);
116 Node* control = NodeProperties::GetControlInput(node);
117 BasicBlock* block = schedule_->block(control);
118 schedule_->AddNode(block, node);
119 break;
120 }
121#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
122 CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
123#undef DEFINE_CONTROL_CASE
124 {
125 // Control nodes force coupled uses to be placed.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126 for (auto use : node->uses()) {
127 if (GetPlacement(use) == Scheduler::kCoupled) {
128 DCHECK_EQ(node, NodeProperties::GetControlInput(use));
129 UpdatePlacement(use, placement);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400130 }
131 }
132 break;
133 }
134 default:
135 DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
136 DCHECK_EQ(Scheduler::kScheduled, placement);
137 break;
138 }
139 // Reduce the use count of the node's inputs to potentially make them
140 // schedulable. If all the uses of a node have been scheduled, then the node
141 // itself can be scheduled.
142 for (Edge const edge : node->input_edges()) {
143 DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
144 }
145 }
146 data->placement_ = placement;
147}
148
149
150bool Scheduler::IsCoupledControlEdge(Node* node, int index) {
151 return GetPlacement(node) == kCoupled &&
152 NodeProperties::FirstControlIndex(node) == index;
153}
154
155
156void Scheduler::IncrementUnscheduledUseCount(Node* node, int index,
157 Node* from) {
158 // Make sure that control edges from coupled nodes are not counted.
159 if (IsCoupledControlEdge(from, index)) return;
160
161 // Tracking use counts for fixed nodes is useless.
162 if (GetPlacement(node) == kFixed) return;
163
164 // Use count for coupled nodes is summed up on their control.
165 if (GetPlacement(node) == kCoupled) {
166 Node* control = NodeProperties::GetControlInput(node);
167 return IncrementUnscheduledUseCount(control, index, from);
168 }
169
170 ++(GetData(node)->unscheduled_count_);
171 if (FLAG_trace_turbo_scheduler) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000172 TRACE(" Use count of #%d:%s (used by #%d:%s)++ = %d\n", node->id(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400173 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
174 GetData(node)->unscheduled_count_);
175 }
176}
177
178
179void Scheduler::DecrementUnscheduledUseCount(Node* node, int index,
180 Node* from) {
181 // Make sure that control edges from coupled nodes are not counted.
182 if (IsCoupledControlEdge(from, index)) return;
183
184 // Tracking use counts for fixed nodes is useless.
185 if (GetPlacement(node) == kFixed) return;
186
187 // Use count for coupled nodes is summed up on their control.
188 if (GetPlacement(node) == kCoupled) {
189 Node* control = NodeProperties::GetControlInput(node);
190 return DecrementUnscheduledUseCount(control, index, from);
191 }
192
193 DCHECK(GetData(node)->unscheduled_count_ > 0);
194 --(GetData(node)->unscheduled_count_);
195 if (FLAG_trace_turbo_scheduler) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000196 TRACE(" Use count of #%d:%s (used by #%d:%s)-- = %d\n", node->id(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400197 node->op()->mnemonic(), from->id(), from->op()->mnemonic(),
198 GetData(node)->unscheduled_count_);
199 }
200 if (GetData(node)->unscheduled_count_ == 0) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000201 TRACE(" newly eligible #%d:%s\n", node->id(), node->op()->mnemonic());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400202 schedule_queue_.push(node);
203 }
204}
205
206
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400207// -----------------------------------------------------------------------------
208// Phase 1: Build control-flow graph.
209
210
211// Internal class to build a control flow graph (i.e the basic blocks and edges
212// between them within a Schedule) from the node graph. Visits control edges of
213// the graph backwards from an end node in order to find the connected control
214// subgraph, needed for scheduling.
215class CFGBuilder : public ZoneObject {
216 public:
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000217 CFGBuilder(Zone* zone, Scheduler* scheduler)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000218 : zone_(zone),
219 scheduler_(scheduler),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000220 schedule_(scheduler->schedule_),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400221 queued_(scheduler->graph_, 2),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000222 queue_(zone),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400223 control_(zone),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000224 component_entry_(nullptr),
225 component_start_(nullptr),
226 component_end_(nullptr) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000227
228 // Run the control flow graph construction algorithm by walking the graph
229 // backwards from end through control edges, building and connecting the
230 // basic blocks for control nodes.
231 void Run() {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400232 ResetDataStructures();
233 Queue(scheduler_->graph_->end());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000234
235 while (!queue_.empty()) { // Breadth-first backwards traversal.
236 Node* node = queue_.front();
237 queue_.pop();
238 int max = NodeProperties::PastControlIndex(node);
239 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
240 Queue(node->InputAt(i));
241 }
242 }
243
244 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
245 ConnectBlocks(*i); // Connect block to its predecessor/successors.
246 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000247 }
248
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400249 // Run the control flow graph construction for a minimal control-connected
250 // component ending in {exit} and merge that component into an existing
251 // control flow graph at the bottom of {block}.
252 void Run(BasicBlock* block, Node* exit) {
253 ResetDataStructures();
254 Queue(exit);
255
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000256 component_entry_ = nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400257 component_start_ = block;
258 component_end_ = schedule_->block(exit);
259 scheduler_->equivalence_->Run(exit);
260 while (!queue_.empty()) { // Breadth-first backwards traversal.
261 Node* node = queue_.front();
262 queue_.pop();
263
264 // Use control dependence equivalence to find a canonical single-entry
265 // single-exit region that makes up a minimal component to be scheduled.
266 if (IsSingleEntrySingleExitRegion(node, exit)) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000267 TRACE("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
268 DCHECK(!component_entry_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400269 component_entry_ = node;
270 continue;
271 }
272
273 int max = NodeProperties::PastControlIndex(node);
274 for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
275 Queue(node->InputAt(i));
276 }
277 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000278 DCHECK(component_entry_);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400279
280 for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
281 ConnectBlocks(*i); // Connect block to its predecessor/successors.
282 }
283 }
284
285 private:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000286 friend class ScheduleLateNodeVisitor;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400287 friend class Scheduler;
288
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000289 void FixNode(BasicBlock* block, Node* node) {
290 schedule_->AddNode(block, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400291 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000292 }
293
294 void Queue(Node* node) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400295 // Mark the connected control nodes as they are queued.
296 if (!queued_.Get(node)) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000297 BuildBlocks(node);
298 queue_.push(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400299 queued_.Set(node, true);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000300 control_.push_back(node);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000301 }
302 }
303
304 void BuildBlocks(Node* node) {
305 switch (node->opcode()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400306 case IrOpcode::kEnd:
307 FixNode(schedule_->end(), node);
308 break;
309 case IrOpcode::kStart:
310 FixNode(schedule_->start(), node);
311 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000312 case IrOpcode::kLoop:
313 case IrOpcode::kMerge:
314 BuildBlockForNode(node);
315 break;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400316 case IrOpcode::kTerminate: {
317 // Put Terminate in the loop to which it refers.
318 Node* loop = NodeProperties::GetControlInput(node);
319 BasicBlock* block = BuildBlockForNode(loop);
320 FixNode(block, node);
321 break;
322 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000323 case IrOpcode::kBranch:
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000324 case IrOpcode::kSwitch:
325 BuildBlocksForSuccessors(node);
326 break;
327 case IrOpcode::kCall:
328 if (NodeProperties::IsExceptionalCall(node)) {
329 BuildBlocksForSuccessors(node);
330 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000331 break;
332 default:
333 break;
334 }
335 }
336
337 void ConnectBlocks(Node* node) {
338 switch (node->opcode()) {
339 case IrOpcode::kLoop:
340 case IrOpcode::kMerge:
341 ConnectMerge(node);
342 break;
343 case IrOpcode::kBranch:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400344 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000345 ConnectBranch(node);
346 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000347 case IrOpcode::kSwitch:
348 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
349 ConnectSwitch(node);
350 break;
351 case IrOpcode::kDeoptimize:
352 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
353 ConnectDeoptimize(node);
354 break;
355 case IrOpcode::kTailCall:
356 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
357 ConnectTailCall(node);
358 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000359 case IrOpcode::kReturn:
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400360 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000361 ConnectReturn(node);
362 break;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000363 case IrOpcode::kThrow:
364 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
365 ConnectThrow(node);
366 break;
367 case IrOpcode::kCall:
368 if (NodeProperties::IsExceptionalCall(node)) {
369 scheduler_->UpdatePlacement(node, Scheduler::kFixed);
370 ConnectCall(node);
371 }
372 break;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000373 default:
374 break;
375 }
376 }
377
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400378 BasicBlock* BuildBlockForNode(Node* node) {
379 BasicBlock* block = schedule_->block(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000380 if (block == nullptr) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400381 block = schedule_->NewBasicBlock();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000382 TRACE("Create block id:%d for #%d:%s\n", block->id().ToInt(), node->id(),
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000383 node->op()->mnemonic());
384 FixNode(block, node);
385 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400386 return block;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000387 }
388
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000389 void BuildBlocksForSuccessors(Node* node) {
390 size_t const successor_cnt = node->op()->ControlOutputCount();
391 Node** successors = zone_->NewArray<Node*>(successor_cnt);
392 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
393 for (size_t index = 0; index < successor_cnt; ++index) {
394 BuildBlockForNode(successors[index]);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000395 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000396 }
397
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000398 void CollectSuccessorBlocks(Node* node, BasicBlock** successor_blocks,
399 size_t successor_cnt) {
400 Node** successors = reinterpret_cast<Node**>(successor_blocks);
401 NodeProperties::CollectControlProjections(node, successors, successor_cnt);
402 for (size_t index = 0; index < successor_cnt; ++index) {
403 successor_blocks[index] = schedule_->block(successors[index]);
404 }
405 }
406
407 BasicBlock* FindPredecessorBlock(Node* node) {
408 BasicBlock* predecessor_block = nullptr;
409 while (true) {
410 predecessor_block = schedule_->block(node);
411 if (predecessor_block != nullptr) break;
412 node = NodeProperties::GetControlInput(node);
413 }
414 return predecessor_block;
415 }
416
417 void ConnectCall(Node* call) {
418 BasicBlock* successor_blocks[2];
419 CollectSuccessorBlocks(call, successor_blocks, arraysize(successor_blocks));
420
421 // Consider the exception continuation to be deferred.
422 successor_blocks[1]->set_deferred(true);
423
424 Node* call_control = NodeProperties::GetControlInput(call);
425 BasicBlock* call_block = FindPredecessorBlock(call_control);
426 TraceConnect(call, call_block, successor_blocks[0]);
427 TraceConnect(call, call_block, successor_blocks[1]);
428 schedule_->AddCall(call_block, call, successor_blocks[0],
429 successor_blocks[1]);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000430 }
431
432 void ConnectBranch(Node* branch) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000433 BasicBlock* successor_blocks[2];
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000434 CollectSuccessorBlocks(branch, successor_blocks,
435 arraysize(successor_blocks));
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000436
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400437 // Consider branch hints.
438 switch (BranchHintOf(branch->op())) {
439 case BranchHint::kNone:
440 break;
441 case BranchHint::kTrue:
442 successor_blocks[1]->set_deferred(true);
443 break;
444 case BranchHint::kFalse:
445 successor_blocks[0]->set_deferred(true);
446 break;
447 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000448
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400449 if (branch == component_entry_) {
450 TraceConnect(branch, component_start_, successor_blocks[0]);
451 TraceConnect(branch, component_start_, successor_blocks[1]);
452 schedule_->InsertBranch(component_start_, component_end_, branch,
453 successor_blocks[0], successor_blocks[1]);
454 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000455 Node* branch_control = NodeProperties::GetControlInput(branch);
456 BasicBlock* branch_block = FindPredecessorBlock(branch_control);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400457 TraceConnect(branch, branch_block, successor_blocks[0]);
458 TraceConnect(branch, branch_block, successor_blocks[1]);
459 schedule_->AddBranch(branch_block, branch, successor_blocks[0],
460 successor_blocks[1]);
461 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000462 }
463
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000464 void ConnectSwitch(Node* sw) {
465 size_t const successor_count = sw->op()->ControlOutputCount();
466 BasicBlock** successor_blocks =
467 zone_->NewArray<BasicBlock*>(successor_count);
468 CollectSuccessorBlocks(sw, successor_blocks, successor_count);
469
470 if (sw == component_entry_) {
471 for (size_t index = 0; index < successor_count; ++index) {
472 TraceConnect(sw, component_start_, successor_blocks[index]);
473 }
474 schedule_->InsertSwitch(component_start_, component_end_, sw,
475 successor_blocks, successor_count);
476 } else {
477 Node* switch_control = NodeProperties::GetControlInput(sw);
478 BasicBlock* switch_block = FindPredecessorBlock(switch_control);
479 for (size_t index = 0; index < successor_count; ++index) {
480 TraceConnect(sw, switch_block, successor_blocks[index]);
481 }
482 schedule_->AddSwitch(switch_block, sw, successor_blocks, successor_count);
483 }
484 }
485
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000486 void ConnectMerge(Node* merge) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400487 // Don't connect the special merge at the end to its predecessors.
488 if (IsFinalMerge(merge)) return;
489
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000490 BasicBlock* block = schedule_->block(merge);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000491 DCHECK_NOT_NULL(block);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000492 // For all of the merge's control inputs, add a goto at the end to the
493 // merge's basic block.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400494 for (Node* const input : merge->inputs()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000495 BasicBlock* predecessor_block = FindPredecessorBlock(input);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400496 TraceConnect(merge, predecessor_block, block);
497 schedule_->AddGoto(predecessor_block, block);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000498 }
499 }
500
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000501 void ConnectTailCall(Node* call) {
502 Node* call_control = NodeProperties::GetControlInput(call);
503 BasicBlock* call_block = FindPredecessorBlock(call_control);
504 TraceConnect(call, call_block, nullptr);
505 schedule_->AddTailCall(call_block, call);
506 }
507
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000508 void ConnectReturn(Node* ret) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000509 Node* return_control = NodeProperties::GetControlInput(ret);
510 BasicBlock* return_block = FindPredecessorBlock(return_control);
511 TraceConnect(ret, return_block, nullptr);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000512 schedule_->AddReturn(return_block, ret);
513 }
514
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000515 void ConnectDeoptimize(Node* deopt) {
516 Node* deoptimize_control = NodeProperties::GetControlInput(deopt);
517 BasicBlock* deoptimize_block = FindPredecessorBlock(deoptimize_control);
518 TraceConnect(deopt, deoptimize_block, nullptr);
519 schedule_->AddDeoptimize(deoptimize_block, deopt);
520 }
521
522 void ConnectThrow(Node* thr) {
523 Node* throw_control = NodeProperties::GetControlInput(thr);
524 BasicBlock* throw_block = FindPredecessorBlock(throw_control);
525 TraceConnect(thr, throw_block, nullptr);
526 schedule_->AddThrow(throw_block, thr);
527 }
528
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000529 void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000530 DCHECK_NOT_NULL(block);
531 if (succ == nullptr) {
532 TRACE("Connect #%d:%s, id:%d -> end\n", node->id(),
533 node->op()->mnemonic(), block->id().ToInt());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000534 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000535 TRACE("Connect #%d:%s, id:%d -> id:%d\n", node->id(),
536 node->op()->mnemonic(), block->id().ToInt(), succ->id().ToInt());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000537 }
538 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400539
540 bool IsFinalMerge(Node* node) {
541 return (node->opcode() == IrOpcode::kMerge &&
542 node == scheduler_->graph_->end()->InputAt(0));
543 }
544
545 bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
546 size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
547 size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
548 return entry != exit && entry_class == exit_class;
549 }
550
551 void ResetDataStructures() {
552 control_.clear();
553 DCHECK(queue_.empty());
554 DCHECK(control_.empty());
555 }
556
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000557 Zone* zone_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400558 Scheduler* scheduler_;
559 Schedule* schedule_;
560 NodeMarker<bool> queued_; // Mark indicating whether node is queued.
561 ZoneQueue<Node*> queue_; // Queue used for breadth-first traversal.
562 NodeVector control_; // List of encountered control nodes.
563 Node* component_entry_; // Component single-entry node.
564 BasicBlock* component_start_; // Component single-entry block.
565 BasicBlock* component_end_; // Component single-exit block.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000566};
567
568
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000569void Scheduler::BuildCFG() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000570 TRACE("--- CREATING CFG -------------------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400571
572 // Instantiate a new control equivalence algorithm for the graph.
573 equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
574
575 // Build a control-flow graph for the main control-connected component that
576 // is being spanned by the graph's start and end nodes.
577 control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
578 control_flow_builder_->Run();
579
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000580 // Initialize per-block data.
581 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
582}
583
584
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400585// -----------------------------------------------------------------------------
586// Phase 2: Compute special RPO and dominator tree.
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000587
588
589// Compute the special reverse-post-order block ordering, which is essentially
590// a RPO of the graph where loop bodies are contiguous. Properties:
591// 1. If block A is a predecessor of B, then A appears before B in the order,
592// unless B is a loop header and A is in the loop headed at B
593// (i.e. A -> B is a backedge).
594// => If block A dominates block B, then A appears before B in the order.
595// => If block A is a loop header, A appears before all blocks in the loop
596// headed at A.
597// 2. All loops are contiguous in the order (i.e. no intervening blocks that
598// do not belong to the loop.)
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400599// Note a simple RPO traversal satisfies (1) but not (2).
600class SpecialRPONumberer : public ZoneObject {
601 public:
602 SpecialRPONumberer(Zone* zone, Schedule* schedule)
603 : zone_(zone),
604 schedule_(schedule),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000605 order_(nullptr),
606 beyond_end_(nullptr),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400607 loops_(zone),
608 backedges_(zone),
609 stack_(zone),
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000610 previous_block_count_(0),
611 empty_(0, zone) {}
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000612
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400613 // Computes the special reverse-post-order for the main control flow graph,
614 // that is for the graph spanned between the schedule's start and end blocks.
615 void ComputeSpecialRPO() {
616 DCHECK(schedule_->end()->SuccessorCount() == 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000617 DCHECK(!order_); // Main order does not exist yet.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400618 ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000619 }
620
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400621 // Computes the special reverse-post-order for a partial control flow graph,
622 // that is for the graph spanned between the given {entry} and {end} blocks,
623 // then updates the existing ordering with this new information.
624 void UpdateSpecialRPO(BasicBlock* entry, BasicBlock* end) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000625 DCHECK(order_); // Main order to be updated is present.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400626 ComputeAndInsertSpecialRPO(entry, end);
627 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000628
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400629 // Serialize the previously computed order as a special reverse-post-order
630 // numbering for basic blocks into the final schedule.
631 void SerializeRPOIntoSchedule() {
632 int32_t number = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000633 for (BasicBlock* b = order_; b != nullptr; b = b->rpo_next()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400634 b->set_rpo_number(number++);
635 schedule_->rpo_order()->push_back(b);
636 }
637 BeyondEndSentinel()->set_rpo_number(number);
638 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000639
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400640 // Print and verify the special reverse-post-order.
641 void PrintAndVerifySpecialRPO() {
642#if DEBUG
643 if (FLAG_trace_turbo_scheduler) PrintRPO();
644 VerifySpecialRPO();
645#endif
646 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000647
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000648 const ZoneVector<BasicBlock*>& GetOutgoingBlocks(BasicBlock* block) {
649 if (HasLoopNumber(block)) {
650 LoopInfo const& loop = loops_[GetLoopNumber(block)];
651 if (loop.outgoing) return *loop.outgoing;
652 }
653 return empty_;
654 }
655
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400656 private:
657 typedef std::pair<BasicBlock*, size_t> Backedge;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000658
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400659 // Numbering for BasicBlock::rpo_number for this block traversal:
660 static const int kBlockOnStack = -2;
661 static const int kBlockVisited1 = -3;
662 static const int kBlockVisited2 = -4;
663 static const int kBlockUnvisited1 = -1;
664 static const int kBlockUnvisited2 = kBlockVisited1;
665
666 struct SpecialRPOStackFrame {
667 BasicBlock* block;
668 size_t index;
669 };
670
671 struct LoopInfo {
672 BasicBlock* header;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000673 ZoneVector<BasicBlock*>* outgoing;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400674 BitVector* members;
675 LoopInfo* prev;
676 BasicBlock* end;
677 BasicBlock* start;
678
679 void AddOutgoing(Zone* zone, BasicBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000680 if (outgoing == nullptr) {
681 outgoing = new (zone->New(sizeof(ZoneVector<BasicBlock*>)))
682 ZoneVector<BasicBlock*>(zone);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000683 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000684 outgoing->push_back(block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400685 }
686 };
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000687
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400688 int Push(ZoneVector<SpecialRPOStackFrame>& stack, int depth,
689 BasicBlock* child, int unvisited) {
690 if (child->rpo_number() == unvisited) {
691 stack[depth].block = child;
692 stack[depth].index = 0;
693 child->set_rpo_number(kBlockOnStack);
694 return depth + 1;
695 }
696 return depth;
697 }
698
699 BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
700 block->set_rpo_next(head);
701 return block;
702 }
703
704 static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
705 static void SetLoopNumber(BasicBlock* block, int loop_number) {
706 return block->set_loop_number(loop_number);
707 }
708 static bool HasLoopNumber(BasicBlock* block) {
709 return block->loop_number() >= 0;
710 }
711
712 // TODO(mstarzinger): We only need this special sentinel because some tests
713 // use the schedule's end block in actual control flow (e.g. with end having
714 // successors). Once this has been cleaned up we can use the end block here.
715 BasicBlock* BeyondEndSentinel() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000716 if (beyond_end_ == nullptr) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400717 BasicBlock::Id id = BasicBlock::Id::FromInt(-1);
718 beyond_end_ = new (schedule_->zone()) BasicBlock(schedule_->zone(), id);
719 }
720 return beyond_end_;
721 }
722
723 // Compute special RPO for the control flow graph between {entry} and {end},
724 // mutating any existing order so that the result is still valid.
725 void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
726 // RPO should not have been serialized for this schedule yet.
727 CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
728 CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
729 CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
730
731 // Find correct insertion point within existing order.
732 BasicBlock* insertion_point = entry->rpo_next();
733 BasicBlock* order = insertion_point;
734
735 // Perform an iterative RPO traversal using an explicit stack,
736 // recording backedges that form cycles. O(|B|).
737 DCHECK_LT(previous_block_count_, schedule_->BasicBlockCount());
738 stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
739 previous_block_count_ = schedule_->BasicBlockCount();
740 int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
741 int num_loops = static_cast<int>(loops_.size());
742
743 while (stack_depth > 0) {
744 int current = stack_depth - 1;
745 SpecialRPOStackFrame* frame = &stack_[current];
746
747 if (frame->block != end &&
748 frame->index < frame->block->SuccessorCount()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000749 // Process the next successor.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400750 BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
751 if (succ->rpo_number() == kBlockVisited1) continue;
752 if (succ->rpo_number() == kBlockOnStack) {
753 // The successor is on the stack, so this is a backedge (cycle).
754 backedges_.push_back(Backedge(frame->block, frame->index - 1));
755 if (!HasLoopNumber(succ)) {
756 // Assign a new loop number to the header if it doesn't have one.
757 SetLoopNumber(succ, num_loops++);
758 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000759 } else {
760 // Push the successor onto the stack.
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400761 DCHECK(succ->rpo_number() == kBlockUnvisited1);
762 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000763 }
764 } else {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400765 // Finished with all successors; pop the stack and add the block.
766 order = PushFront(order, frame->block);
767 frame->block->set_rpo_number(kBlockVisited1);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000768 stack_depth--;
769 }
770 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000771
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400772 // If no loops were encountered, then the order we computed was correct.
773 if (num_loops > static_cast<int>(loops_.size())) {
774 // Otherwise, compute the loop information from the backedges in order
775 // to perform a traversal that groups loop bodies together.
776 ComputeLoopInfo(stack_, num_loops, &backedges_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000777
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400778 // Initialize the "loop stack". Note the entry could be a loop header.
779 LoopInfo* loop =
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000780 HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400781 order = insertion_point;
782
783 // Perform an iterative post-order traversal, visiting loop bodies before
784 // edges that lead out of loops. Visits each block once, but linking loop
785 // sections together is linear in the loop size, so overall is
786 // O(|B| + max(loop_depth) * max(|loop|))
787 stack_depth = Push(stack_, 0, entry, kBlockUnvisited2);
788 while (stack_depth > 0) {
789 SpecialRPOStackFrame* frame = &stack_[stack_depth - 1];
790 BasicBlock* block = frame->block;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000791 BasicBlock* succ = nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400792
793 if (block != end && frame->index < block->SuccessorCount()) {
794 // Process the next normal successor.
795 succ = block->SuccessorAt(frame->index++);
796 } else if (HasLoopNumber(block)) {
797 // Process additional outgoing edges from the loop header.
798 if (block->rpo_number() == kBlockOnStack) {
799 // Finish the loop body the first time the header is left on the
800 // stack.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000801 DCHECK(loop != nullptr && loop->header == block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400802 loop->start = PushFront(order, block);
803 order = loop->end;
804 block->set_rpo_number(kBlockVisited2);
805 // Pop the loop stack and continue visiting outgoing edges within
806 // the context of the outer loop, if any.
807 loop = loop->prev;
808 // We leave the loop header on the stack; the rest of this iteration
809 // and later iterations will go through its outgoing edges list.
810 }
811
812 // Use the next outgoing edge if there are any.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000813 size_t outgoing_index = frame->index - block->SuccessorCount();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400814 LoopInfo* info = &loops_[GetLoopNumber(block)];
815 DCHECK(loop != info);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000816 if (block != entry && info->outgoing != nullptr &&
817 outgoing_index < info->outgoing->size()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400818 succ = info->outgoing->at(outgoing_index);
819 frame->index++;
820 }
821 }
822
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000823 if (succ != nullptr) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400824 // Process the next successor.
825 if (succ->rpo_number() == kBlockOnStack) continue;
826 if (succ->rpo_number() == kBlockVisited2) continue;
827 DCHECK(succ->rpo_number() == kBlockUnvisited2);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000828 if (loop != nullptr && !loop->members->Contains(succ->id().ToInt())) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400829 // The successor is not in the current loop or any nested loop.
830 // Add it to the outgoing edges of this loop and visit it later.
831 loop->AddOutgoing(zone_, succ);
832 } else {
833 // Push the successor onto the stack.
834 stack_depth = Push(stack_, stack_depth, succ, kBlockUnvisited2);
835 if (HasLoopNumber(succ)) {
836 // Push the inner loop onto the loop stack.
837 DCHECK(GetLoopNumber(succ) < num_loops);
838 LoopInfo* next = &loops_[GetLoopNumber(succ)];
839 next->end = order;
840 next->prev = loop;
841 loop = next;
842 }
843 }
844 } else {
845 // Finished with all successors of the current block.
846 if (HasLoopNumber(block)) {
847 // If we are going to pop a loop header, then add its entire body.
848 LoopInfo* info = &loops_[GetLoopNumber(block)];
849 for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
850 if (b->rpo_next() == info->end) {
851 b->set_rpo_next(order);
852 info->end = order;
853 break;
854 }
855 }
856 order = info->start;
857 } else {
858 // Pop a single node off the stack and add it to the order.
859 order = PushFront(order, block);
860 block->set_rpo_number(kBlockVisited2);
861 }
862 stack_depth--;
863 }
864 }
865 }
866
867 // Publish new order the first time.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000868 if (order_ == nullptr) order_ = order;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400869
870 // Compute the correct loop headers and set the correct loop ends.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000871 LoopInfo* current_loop = nullptr;
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400872 BasicBlock* current_header = entry->loop_header();
873 int32_t loop_depth = entry->loop_depth();
874 if (entry->IsLoopHeader()) --loop_depth; // Entry might be a loop header.
875 for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
876 BasicBlock* current = b;
877
878 // Reset BasicBlock::rpo_number again.
879 current->set_rpo_number(kBlockUnvisited1);
880
881 // Finish the previous loop(s) if we just exited them.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000882 while (current_header != nullptr &&
883 current == current_header->loop_end()) {
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000884 DCHECK(current_header->IsLoopHeader());
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000885 DCHECK_NOT_NULL(current_loop);
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000886 current_loop = current_loop->prev;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000887 current_header =
888 current_loop == nullptr ? nullptr : current_loop->header;
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000889 --loop_depth;
890 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400891 current->set_loop_header(current_header);
892
893 // Push a new loop onto the stack if this loop is a loop header.
894 if (HasLoopNumber(current)) {
895 ++loop_depth;
896 current_loop = &loops_[GetLoopNumber(current)];
897 BasicBlock* end = current_loop->end;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000898 current->set_loop_end(end == nullptr ? BeyondEndSentinel() : end);
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400899 current_header = current_loop->header;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000900 TRACE("id:%d is a loop header, increment loop depth to %d\n",
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400901 current->id().ToInt(), loop_depth);
902 }
903
904 current->set_loop_depth(loop_depth);
905
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000906 if (current->loop_header() == nullptr) {
907 TRACE("id:%d is not in a loop (depth == %d)\n", current->id().ToInt(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400908 current->loop_depth());
909 } else {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000910 TRACE("id:%d has loop header id:%d, (depth == %d)\n",
911 current->id().ToInt(), current->loop_header()->id().ToInt(),
912 current->loop_depth());
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400913 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000914 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400915 }
916
917 // Computes loop membership from the backedges of the control flow graph.
918 void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
919 size_t num_loops, ZoneVector<Backedge>* backedges) {
920 // Extend existing loop membership vectors.
921 for (LoopInfo& loop : loops_) {
922 BitVector* new_members = new (zone_)
923 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
924 new_members->CopyFrom(*loop.members);
925 loop.members = new_members;
926 }
927
928 // Extend loop information vector.
929 loops_.resize(num_loops, LoopInfo());
930
931 // Compute loop membership starting from backedges.
932 // O(max(loop_depth) * max(|loop|)
933 for (size_t i = 0; i < backedges->size(); i++) {
934 BasicBlock* member = backedges->at(i).first;
935 BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
936 size_t loop_num = GetLoopNumber(header);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000937 if (loops_[loop_num].header == nullptr) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400938 loops_[loop_num].header = header;
939 loops_[loop_num].members = new (zone_)
940 BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
941 }
942
943 int queue_length = 0;
944 if (member != header) {
945 // As long as the header doesn't have a backedge to itself,
946 // Push the member onto the queue and process its predecessors.
947 if (!loops_[loop_num].members->Contains(member->id().ToInt())) {
948 loops_[loop_num].members->Add(member->id().ToInt());
949 }
950 queue[queue_length++].block = member;
951 }
952
953 // Propagate loop membership backwards. All predecessors of M up to the
954 // loop header H are members of the loop too. O(|blocks between M and H|).
955 while (queue_length > 0) {
956 BasicBlock* block = queue[--queue_length].block;
957 for (size_t i = 0; i < block->PredecessorCount(); i++) {
958 BasicBlock* pred = block->PredecessorAt(i);
959 if (pred != header) {
960 if (!loops_[loop_num].members->Contains(pred->id().ToInt())) {
961 loops_[loop_num].members->Add(pred->id().ToInt());
962 queue[queue_length++].block = pred;
963 }
964 }
965 }
966 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000967 }
968 }
969
970#if DEBUG
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400971 void PrintRPO() {
972 OFStream os(stdout);
973 os << "RPO with " << loops_.size() << " loops";
974 if (loops_.size() > 0) {
975 os << " (";
976 for (size_t i = 0; i < loops_.size(); i++) {
977 if (i > 0) os << " ";
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000978 os << "id:" << loops_[i].header->id();
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400979 }
980 os << ")";
981 }
982 os << ":\n";
983
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000984 for (BasicBlock* block = order_; block != nullptr;
985 block = block->rpo_next()) {
986 os << std::setw(5) << "B" << block->rpo_number() << ":";
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400987 for (size_t i = 0; i < loops_.size(); i++) {
988 bool range = loops_[i].header->LoopContains(block);
989 bool membership = loops_[i].header != block && range;
990 os << (membership ? " |" : " ");
991 os << (range ? "x" : " ");
992 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000993 os << " id:" << block->id() << ": ";
994 if (block->loop_end() != nullptr) {
995 os << " range: [B" << block->rpo_number() << ", B"
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400996 << block->loop_end()->rpo_number() << ")";
997 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000998 if (block->loop_header() != nullptr) {
999 os << " header: id:" << block->loop_header()->id();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001000 }
1001 if (block->loop_depth() > 0) {
1002 os << " depth: " << block->loop_depth();
1003 }
1004 os << "\n";
1005 }
1006 }
1007
1008 void VerifySpecialRPO() {
1009 BasicBlockVector* order = schedule_->rpo_order();
1010 DCHECK(order->size() > 0);
1011 DCHECK((*order)[0]->id().ToInt() == 0); // entry should be first.
1012
1013 for (size_t i = 0; i < loops_.size(); i++) {
1014 LoopInfo* loop = &loops_[i];
1015 BasicBlock* header = loop->header;
1016 BasicBlock* end = header->loop_end();
1017
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001018 DCHECK_NOT_NULL(header);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001019 DCHECK(header->rpo_number() >= 0);
1020 DCHECK(header->rpo_number() < static_cast<int>(order->size()));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001021 DCHECK_NOT_NULL(end);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001022 DCHECK(end->rpo_number() <= static_cast<int>(order->size()));
1023 DCHECK(end->rpo_number() > header->rpo_number());
1024 DCHECK(header->loop_header() != header);
1025
1026 // Verify the start ... end list relationship.
1027 int links = 0;
1028 BasicBlock* block = loop->start;
1029 DCHECK_EQ(header, block);
1030 bool end_found;
1031 while (true) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001032 if (block == nullptr || block == loop->end) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001033 end_found = (loop->end == block);
1034 break;
1035 }
1036 // The list should be in same order as the final result.
1037 DCHECK(block->rpo_number() == links + header->rpo_number());
1038 links++;
1039 block = block->rpo_next();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001040 DCHECK_LT(links, static_cast<int>(2 * order->size())); // cycle?
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001041 }
1042 DCHECK(links > 0);
1043 DCHECK(links == end->rpo_number() - header->rpo_number());
1044 DCHECK(end_found);
1045
1046 // Check loop depth of the header.
1047 int loop_depth = 0;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001048 for (LoopInfo* outer = loop; outer != nullptr; outer = outer->prev) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001049 loop_depth++;
1050 }
1051 DCHECK_EQ(loop_depth, header->loop_depth());
1052
1053 // Check the contiguousness of loops.
1054 int count = 0;
1055 for (int j = 0; j < static_cast<int>(order->size()); j++) {
1056 BasicBlock* block = order->at(j);
1057 DCHECK(block->rpo_number() == j);
1058 if (j < header->rpo_number() || j >= end->rpo_number()) {
1059 DCHECK(!header->LoopContains(block));
1060 } else {
1061 DCHECK(header->LoopContains(block));
1062 DCHECK_GE(block->loop_depth(), loop_depth);
1063 count++;
1064 }
1065 }
1066 DCHECK(links == count);
1067 }
1068 }
1069#endif // DEBUG
1070
1071 Zone* zone_;
1072 Schedule* schedule_;
1073 BasicBlock* order_;
1074 BasicBlock* beyond_end_;
1075 ZoneVector<LoopInfo> loops_;
1076 ZoneVector<Backedge> backedges_;
1077 ZoneVector<SpecialRPOStackFrame> stack_;
1078 size_t previous_block_count_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001079 ZoneVector<BasicBlock*> const empty_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001080};
1081
1082
1083BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
1084 SpecialRPONumberer numberer(zone, schedule);
1085 numberer.ComputeSpecialRPO();
1086 numberer.SerializeRPOIntoSchedule();
1087 numberer.PrintAndVerifySpecialRPO();
1088 return schedule->rpo_order();
1089}
1090
1091
1092void Scheduler::ComputeSpecialRPONumbering() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001093 TRACE("--- COMPUTING SPECIAL RPO ----------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001094
1095 // Compute the special reverse-post-order for basic blocks.
1096 special_rpo_ = new (zone_) SpecialRPONumberer(zone_, schedule_);
1097 special_rpo_->ComputeSpecialRPO();
1098}
1099
1100
1101void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001102 for (/*nop*/; block != nullptr; block = block->rpo_next()) {
1103 auto pred = block->predecessors().begin();
1104 auto end = block->predecessors().end();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001105 DCHECK(pred != end); // All blocks except start have predecessors.
1106 BasicBlock* dominator = *pred;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001107 bool deferred = dominator->deferred();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001108 // For multiple predecessors, walk up the dominator tree until a common
1109 // dominator is found. Visitation order guarantees that all predecessors
1110 // except for backwards edges have been visited.
1111 for (++pred; pred != end; ++pred) {
1112 // Don't examine backwards edges.
1113 if ((*pred)->dominator_depth() < 0) continue;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001114 dominator = BasicBlock::GetCommonDominator(dominator, *pred);
1115 deferred = deferred & (*pred)->deferred();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001116 }
1117 block->set_dominator(dominator);
1118 block->set_dominator_depth(dominator->dominator_depth() + 1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001119 block->set_deferred(deferred | block->deferred());
1120 TRACE("Block id:%d's idom is id:%d, depth = %d\n", block->id().ToInt(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001121 dominator->id().ToInt(), block->dominator_depth());
1122 }
1123}
1124
1125
1126void Scheduler::GenerateImmediateDominatorTree() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001127 TRACE("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001128
1129 // Seed start block to be the first dominator.
1130 schedule_->start()->set_dominator_depth(0);
1131
1132 // Build the block dominator tree resulting from the above seed.
1133 PropagateImmediateDominators(schedule_->start()->rpo_next());
1134}
1135
1136
1137// -----------------------------------------------------------------------------
1138// Phase 3: Prepare use counts for nodes.
1139
1140
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001141class PrepareUsesVisitor {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001142 public:
1143 explicit PrepareUsesVisitor(Scheduler* scheduler)
1144 : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
1145
1146 void Pre(Node* node) {
1147 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1148 // Fixed nodes are always roots for schedule late.
1149 scheduler_->schedule_root_nodes_.push_back(node);
1150 if (!schedule_->IsScheduled(node)) {
1151 // Make sure root nodes are scheduled in their respective blocks.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001152 TRACE("Scheduling fixed position node #%d:%s\n", node->id(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001153 node->op()->mnemonic());
1154 IrOpcode::Value opcode = node->opcode();
1155 BasicBlock* block =
1156 opcode == IrOpcode::kParameter
1157 ? schedule_->start()
1158 : schedule_->block(NodeProperties::GetControlInput(node));
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001159 DCHECK_NOT_NULL(block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001160 schedule_->AddNode(block, node);
1161 }
1162 }
1163 }
1164
1165 void PostEdge(Node* from, int index, Node* to) {
1166 // If the edge is from an unscheduled node, then tally it in the use count
1167 // for all of its inputs. The same criterion will be used in ScheduleLate
1168 // for decrementing use counts.
1169 if (!schedule_->IsScheduled(from)) {
1170 DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
1171 scheduler_->IncrementUnscheduledUseCount(to, index, from);
1172 }
1173 }
1174
1175 private:
1176 Scheduler* scheduler_;
1177 Schedule* schedule_;
1178};
1179
1180
1181void Scheduler::PrepareUses() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001182 TRACE("--- PREPARE USES -------------------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001183
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001184 // Count the uses of every node, which is used to ensure that all of a
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001185 // node's uses are scheduled before the node itself.
1186 PrepareUsesVisitor prepare_uses(this);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001187
1188 // TODO(turbofan): simplify the careful pre/post ordering here.
1189 BoolVector visited(graph_->NodeCount(), false, zone_);
1190 ZoneStack<Node::InputEdges::iterator> stack(zone_);
1191 Node* node = graph_->end();
1192 prepare_uses.Pre(node);
1193 visited[node->id()] = true;
1194 stack.push(node->input_edges().begin());
1195 while (!stack.empty()) {
1196 Edge edge = *stack.top();
1197 Node* node = edge.to();
1198 if (visited[node->id()]) {
1199 prepare_uses.PostEdge(edge.from(), edge.index(), edge.to());
1200 if (++stack.top() == edge.from()->input_edges().end()) stack.pop();
1201 } else {
1202 prepare_uses.Pre(node);
1203 visited[node->id()] = true;
1204 if (node->InputCount() > 0) stack.push(node->input_edges().begin());
1205 }
1206 }
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001207}
1208
1209
1210// -----------------------------------------------------------------------------
1211// Phase 4: Schedule nodes early.
1212
1213
1214class ScheduleEarlyNodeVisitor {
1215 public:
1216 ScheduleEarlyNodeVisitor(Zone* zone, Scheduler* scheduler)
1217 : scheduler_(scheduler), schedule_(scheduler->schedule_), queue_(zone) {}
1218
1219 // Run the schedule early algorithm on a set of fixed root nodes.
1220 void Run(NodeVector* roots) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001221 for (Node* const root : *roots) {
1222 queue_.push(root);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001223 while (!queue_.empty()) {
1224 VisitNode(queue_.front());
1225 queue_.pop();
1226 }
1227 }
1228 }
1229
1230 private:
1231 // Visits one node from the queue and propagates its current schedule early
1232 // position to all uses. This in turn might push more nodes onto the queue.
1233 void VisitNode(Node* node) {
1234 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1235
1236 // Fixed nodes already know their schedule early position.
1237 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
1238 data->minimum_block_ = schedule_->block(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001239 TRACE("Fixing #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001240 node->id(), node->op()->mnemonic(),
1241 data->minimum_block_->id().ToInt(),
1242 data->minimum_block_->dominator_depth());
1243 }
1244
1245 // No need to propagate unconstrained schedule early positions.
1246 if (data->minimum_block_ == schedule_->start()) return;
1247
1248 // Propagate schedule early position.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001249 DCHECK_NOT_NULL(data->minimum_block_);
1250 for (auto use : node->uses()) {
1251 PropagateMinimumPositionToNode(data->minimum_block_, use);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001252 }
1253 }
1254
1255 // Propagates {block} as another minimum position into the given {node}. This
1256 // has the net effect of computing the minimum dominator block of {node} that
1257 // still post-dominates all inputs to {node} when the queue is processed.
1258 void PropagateMinimumPositionToNode(BasicBlock* block, Node* node) {
1259 Scheduler::SchedulerData* data = scheduler_->GetData(node);
1260
1261 // No need to propagate to fixed node, it's guaranteed to be a root.
1262 if (scheduler_->GetPlacement(node) == Scheduler::kFixed) return;
1263
1264 // Coupled nodes influence schedule early position of their control.
1265 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1266 Node* control = NodeProperties::GetControlInput(node);
1267 PropagateMinimumPositionToNode(block, control);
1268 }
1269
1270 // Propagate new position if it is deeper down the dominator tree than the
1271 // current. Note that all inputs need to have minimum block position inside
1272 // the dominator chain of {node}'s minimum block position.
1273 DCHECK(InsideSameDominatorChain(block, data->minimum_block_));
1274 if (block->dominator_depth() > data->minimum_block_->dominator_depth()) {
1275 data->minimum_block_ = block;
1276 queue_.push(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001277 TRACE("Propagating #%d:%s minimum_block = id:%d, dominator_depth = %d\n",
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001278 node->id(), node->op()->mnemonic(),
1279 data->minimum_block_->id().ToInt(),
1280 data->minimum_block_->dominator_depth());
1281 }
1282 }
1283
1284#if DEBUG
1285 bool InsideSameDominatorChain(BasicBlock* b1, BasicBlock* b2) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001286 BasicBlock* dominator = BasicBlock::GetCommonDominator(b1, b2);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001287 return dominator == b1 || dominator == b2;
1288 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001289#endif
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001290
1291 Scheduler* scheduler_;
1292 Schedule* schedule_;
1293 ZoneQueue<Node*> queue_;
1294};
1295
1296
1297void Scheduler::ScheduleEarly() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001298 TRACE("--- SCHEDULE EARLY -----------------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001299 if (FLAG_trace_turbo_scheduler) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001300 TRACE("roots: ");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001301 for (Node* node : schedule_root_nodes_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001302 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001303 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001304 TRACE("\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001305 }
1306
1307 // Compute the minimum block for each node thereby determining the earliest
1308 // position each node could be placed within a valid schedule.
1309 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1310 schedule_early_visitor.Run(&schedule_root_nodes_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001311}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001312
1313
1314// -----------------------------------------------------------------------------
1315// Phase 5: Schedule nodes late.
1316
1317
1318class ScheduleLateNodeVisitor {
1319 public:
1320 ScheduleLateNodeVisitor(Zone* zone, Scheduler* scheduler)
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001321 : scheduler_(scheduler),
1322 schedule_(scheduler_->schedule_),
1323 marked_(scheduler->zone_),
1324 marking_queue_(scheduler->zone_) {}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001325
1326 // Run the schedule late algorithm on a set of fixed root nodes.
1327 void Run(NodeVector* roots) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001328 for (Node* const root : *roots) {
1329 ProcessQueue(root);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001330 }
1331 }
1332
1333 private:
1334 void ProcessQueue(Node* root) {
1335 ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
1336 for (Node* node : root->inputs()) {
1337 // Don't schedule coupled nodes on their own.
1338 if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
1339 node = NodeProperties::GetControlInput(node);
1340 }
1341
1342 // Test schedulability condition by looking at unscheduled use count.
1343 if (scheduler_->GetData(node)->unscheduled_count_ != 0) continue;
1344
1345 queue->push(node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001346 do {
1347 Node* const node = queue->front();
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001348 queue->pop();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001349 VisitNode(node);
1350 } while (!queue->empty());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001351 }
1352 }
1353
1354 // Visits one node from the queue of schedulable nodes and determines its
1355 // schedule late position. Also hoists nodes out of loops to find a more
1356 // optimal scheduling position.
1357 void VisitNode(Node* node) {
1358 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1359
1360 // Don't schedule nodes that are already scheduled.
1361 if (schedule_->IsScheduled(node)) return;
1362 DCHECK_EQ(Scheduler::kSchedulable, scheduler_->GetPlacement(node));
1363
1364 // Determine the dominating block for all of the uses of this node. It is
1365 // the latest block that this node can be scheduled in.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001366 TRACE("Scheduling #%d:%s\n", node->id(), node->op()->mnemonic());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001367 BasicBlock* block = GetCommonDominatorOfUses(node);
1368 DCHECK_NOT_NULL(block);
1369
1370 // The schedule early block dominates the schedule late block.
1371 BasicBlock* min_block = scheduler_->GetData(node)->minimum_block_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001372 DCHECK_EQ(min_block, BasicBlock::GetCommonDominator(block, min_block));
1373 TRACE(
1374 "Schedule late of #%d:%s is id:%d at loop depth %d, minimum = id:%d\n",
1375 node->id(), node->op()->mnemonic(), block->id().ToInt(),
1376 block->loop_depth(), min_block->id().ToInt());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001377
1378 // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
1379 // into enclosing loop pre-headers until they would preceed their schedule
1380 // early position.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001381 BasicBlock* hoist_block = GetHoistBlock(block);
1382 if (hoist_block &&
1383 hoist_block->dominator_depth() >= min_block->dominator_depth()) {
1384 do {
1385 TRACE(" hoisting #%d:%s to block id:%d\n", node->id(),
1386 node->op()->mnemonic(), hoist_block->id().ToInt());
1387 DCHECK_LT(hoist_block->loop_depth(), block->loop_depth());
1388 block = hoist_block;
1389 hoist_block = GetHoistBlock(hoist_block);
1390 } while (hoist_block &&
1391 hoist_block->dominator_depth() >= min_block->dominator_depth());
1392 } else if (scheduler_->flags_ & Scheduler::kSplitNodes) {
1393 // Split the {node} if beneficial and return the new {block} for it.
1394 block = SplitNode(block, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001395 }
1396
1397 // Schedule the node or a floating control structure.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001398 if (IrOpcode::IsMergeOpcode(node->opcode())) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001399 ScheduleFloatingControl(block, node);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001400 } else if (node->opcode() == IrOpcode::kFinishRegion) {
1401 ScheduleRegion(block, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001402 } else {
1403 ScheduleNode(block, node);
1404 }
1405 }
1406
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001407 // Mark {block} and push its non-marked predecessor on the marking queue.
1408 void MarkBlock(BasicBlock* block) {
1409 DCHECK_LT(block->id().ToSize(), marked_.size());
1410 marked_[block->id().ToSize()] = true;
1411 for (BasicBlock* pred_block : block->predecessors()) {
1412 DCHECK_LT(pred_block->id().ToSize(), marked_.size());
1413 if (marked_[pred_block->id().ToSize()]) continue;
1414 marking_queue_.push_back(pred_block);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001415 }
1416 }
1417
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001418 BasicBlock* SplitNode(BasicBlock* block, Node* node) {
1419 // For now, we limit splitting to pure nodes.
1420 if (!node->op()->HasProperty(Operator::kPure)) return block;
1421 // TODO(titzer): fix the special case of splitting of projections.
1422 if (node->opcode() == IrOpcode::kProjection) return block;
1423
1424 // The {block} is common dominator of all uses of {node}, so we cannot
1425 // split anything unless the {block} has at least two successors.
1426 DCHECK_EQ(block, GetCommonDominatorOfUses(node));
1427 if (block->SuccessorCount() < 2) return block;
1428
1429 // Clear marking bits.
1430 DCHECK(marking_queue_.empty());
1431 std::fill(marked_.begin(), marked_.end(), false);
1432 marked_.resize(schedule_->BasicBlockCount() + 1, false);
1433
1434 // Check if the {node} has uses in {block}.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001435 for (Edge edge : node->use_edges()) {
1436 BasicBlock* use_block = GetBlockForUse(edge);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001437 if (use_block == nullptr || marked_[use_block->id().ToSize()]) continue;
1438 if (use_block == block) {
1439 TRACE(" not splitting #%d:%s, it is used in id:%d\n", node->id(),
1440 node->op()->mnemonic(), block->id().ToInt());
1441 marking_queue_.clear();
1442 return block;
1443 }
1444 MarkBlock(use_block);
1445 }
1446
1447 // Compute transitive marking closure; a block is marked if all its
1448 // successors are marked.
1449 do {
1450 BasicBlock* top_block = marking_queue_.front();
1451 marking_queue_.pop_front();
1452 if (marked_[top_block->id().ToSize()]) continue;
1453 bool marked = true;
1454 for (BasicBlock* successor : top_block->successors()) {
1455 if (!marked_[successor->id().ToSize()]) {
1456 marked = false;
1457 break;
1458 }
1459 }
1460 if (marked) MarkBlock(top_block);
1461 } while (!marking_queue_.empty());
1462
1463 // If the (common dominator) {block} is marked, we know that all paths from
1464 // {block} to the end contain at least one use of {node}, and hence there's
1465 // no point in splitting the {node} in this case.
1466 if (marked_[block->id().ToSize()]) {
1467 TRACE(" not splitting #%d:%s, its common dominator id:%d is perfect\n",
1468 node->id(), node->op()->mnemonic(), block->id().ToInt());
1469 return block;
1470 }
1471
1472 // Split {node} for uses according to the previously computed marking
1473 // closure. Every marking partition has a unique dominator, which get's a
1474 // copy of the {node} with the exception of the first partition, which get's
1475 // the {node} itself.
1476 ZoneMap<BasicBlock*, Node*> dominators(scheduler_->zone_);
1477 for (Edge edge : node->use_edges()) {
1478 BasicBlock* use_block = GetBlockForUse(edge);
1479 if (use_block == nullptr) continue;
1480 while (marked_[use_block->dominator()->id().ToSize()]) {
1481 use_block = use_block->dominator();
1482 }
1483 auto& use_node = dominators[use_block];
1484 if (use_node == nullptr) {
1485 if (dominators.size() == 1u) {
1486 // Place the {node} at {use_block}.
1487 block = use_block;
1488 use_node = node;
1489 TRACE(" pushing #%d:%s down to id:%d\n", node->id(),
1490 node->op()->mnemonic(), block->id().ToInt());
1491 } else {
1492 // Place a copy of {node} at {use_block}.
1493 use_node = CloneNode(node);
1494 TRACE(" cloning #%d:%s for id:%d\n", use_node->id(),
1495 use_node->op()->mnemonic(), use_block->id().ToInt());
1496 scheduler_->schedule_queue_.push(use_node);
1497 }
1498 }
1499 edge.UpdateTo(use_node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001500 }
1501 return block;
1502 }
1503
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001504 BasicBlock* GetHoistBlock(BasicBlock* block) {
1505 if (block->IsLoopHeader()) return block->dominator();
1506 // We have to check to make sure that the {block} dominates all
1507 // of the outgoing blocks. If it doesn't, then there is a path
1508 // out of the loop which does not execute this {block}, so we
1509 // can't hoist operations from this {block} out of the loop, as
1510 // that would introduce additional computations.
1511 if (BasicBlock* header_block = block->loop_header()) {
1512 for (BasicBlock* outgoing_block :
1513 scheduler_->special_rpo_->GetOutgoingBlocks(header_block)) {
1514 if (BasicBlock::GetCommonDominator(block, outgoing_block) != block) {
1515 return nullptr;
1516 }
1517 }
1518 return header_block->dominator();
1519 }
1520 return nullptr;
1521 }
1522
1523 BasicBlock* GetCommonDominatorOfUses(Node* node) {
1524 BasicBlock* block = nullptr;
1525 for (Edge edge : node->use_edges()) {
1526 BasicBlock* use_block = GetBlockForUse(edge);
1527 block = block == nullptr
1528 ? use_block
1529 : use_block == nullptr
1530 ? block
1531 : BasicBlock::GetCommonDominator(block, use_block);
1532 }
1533 return block;
1534 }
1535
1536 BasicBlock* FindPredecessorBlock(Node* node) {
1537 return scheduler_->control_flow_builder_->FindPredecessorBlock(node);
1538 }
1539
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001540 BasicBlock* GetBlockForUse(Edge edge) {
1541 Node* use = edge.from();
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001542 if (IrOpcode::IsPhiOpcode(use->opcode())) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001543 // If the use is from a coupled (i.e. floating) phi, compute the common
1544 // dominator of its uses. This will not recurse more than one level.
1545 if (scheduler_->GetPlacement(use) == Scheduler::kCoupled) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001546 TRACE(" inspecting uses of coupled #%d:%s\n", use->id(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001547 use->op()->mnemonic());
1548 DCHECK_EQ(edge.to(), NodeProperties::GetControlInput(use));
1549 return GetCommonDominatorOfUses(use);
1550 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001551 // If the use is from a fixed (i.e. non-floating) phi, we use the
1552 // predecessor block of the corresponding control input to the merge.
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001553 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001554 TRACE(" input@%d into a fixed phi #%d:%s\n", edge.index(), use->id(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001555 use->op()->mnemonic());
1556 Node* merge = NodeProperties::GetControlInput(use, 0);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001557 DCHECK(IrOpcode::IsMergeOpcode(merge->opcode()));
1558 Node* input = NodeProperties::GetControlInput(merge, edge.index());
1559 return FindPredecessorBlock(input);
1560 }
1561 } else if (IrOpcode::IsMergeOpcode(use->opcode())) {
1562 // If the use is from a fixed (i.e. non-floating) merge, we use the
1563 // predecessor block of the current input to the merge.
1564 if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
1565 TRACE(" input@%d into a fixed merge #%d:%s\n", edge.index(), use->id(),
1566 use->op()->mnemonic());
1567 return FindPredecessorBlock(edge.to());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001568 }
1569 }
1570 BasicBlock* result = schedule_->block(use);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001571 if (result == nullptr) return nullptr;
1572 TRACE(" must dominate use #%d:%s in id:%d\n", use->id(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001573 use->op()->mnemonic(), result->id().ToInt());
1574 return result;
1575 }
1576
1577 void ScheduleFloatingControl(BasicBlock* block, Node* node) {
1578 scheduler_->FuseFloatingControl(block, node);
1579 }
1580
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001581 void ScheduleRegion(BasicBlock* block, Node* region_end) {
1582 // We only allow regions of instructions connected into a linear
1583 // effect chain. The only value allowed to be produced by a node
1584 // in the chain must be the value consumed by the FinishRegion node.
1585
1586 // We schedule back to front; we first schedule FinishRegion.
1587 CHECK_EQ(IrOpcode::kFinishRegion, region_end->opcode());
1588 ScheduleNode(block, region_end);
1589
1590 // Schedule the chain.
1591 Node* node = NodeProperties::GetEffectInput(region_end);
1592 while (node->opcode() != IrOpcode::kBeginRegion) {
1593 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1594 DCHECK_EQ(1, node->op()->EffectInputCount());
1595 DCHECK_EQ(1, node->op()->EffectOutputCount());
1596 DCHECK_EQ(0, node->op()->ControlOutputCount());
1597 // The value output (if there is any) must be consumed
1598 // by the EndRegion node.
1599 DCHECK(node->op()->ValueOutputCount() == 0 ||
1600 node == region_end->InputAt(0));
1601 ScheduleNode(block, node);
1602 node = NodeProperties::GetEffectInput(node);
1603 }
1604 // Schedule the BeginRegion node.
1605 DCHECK_EQ(0, scheduler_->GetData(node)->unscheduled_count_);
1606 ScheduleNode(block, node);
1607 }
1608
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001609 void ScheduleNode(BasicBlock* block, Node* node) {
1610 schedule_->PlanNode(block, node);
1611 scheduler_->scheduled_nodes_[block->id().ToSize()].push_back(node);
1612 scheduler_->UpdatePlacement(node, Scheduler::kScheduled);
1613 }
1614
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001615 Node* CloneNode(Node* node) {
1616 int const input_count = node->InputCount();
1617 for (int index = 0; index < input_count; ++index) {
1618 Node* const input = node->InputAt(index);
1619 scheduler_->IncrementUnscheduledUseCount(input, index, node);
1620 }
1621 Node* const copy = scheduler_->graph_->CloneNode(node);
1622 TRACE(("clone #%d:%s -> #%d\n"), node->id(), node->op()->mnemonic(),
1623 copy->id());
1624 scheduler_->node_data_.resize(copy->id() + 1,
1625 scheduler_->DefaultSchedulerData());
1626 scheduler_->node_data_[copy->id()] = scheduler_->node_data_[node->id()];
1627 return copy;
1628 }
1629
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001630 Scheduler* scheduler_;
1631 Schedule* schedule_;
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001632 BoolVector marked_;
1633 ZoneDeque<BasicBlock*> marking_queue_;
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001634};
1635
1636
1637void Scheduler::ScheduleLate() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001638 TRACE("--- SCHEDULE LATE ------------------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001639 if (FLAG_trace_turbo_scheduler) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001640 TRACE("roots: ");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001641 for (Node* node : schedule_root_nodes_) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001642 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001643 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001644 TRACE("\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001645 }
1646
1647 // Schedule: Places nodes in dominator block of all their uses.
1648 ScheduleLateNodeVisitor schedule_late_visitor(zone_, this);
1649 schedule_late_visitor.Run(&schedule_root_nodes_);
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001650}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001651
1652
1653// -----------------------------------------------------------------------------
1654// Phase 6: Seal the final schedule.
1655
1656
1657void Scheduler::SealFinalSchedule() {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001658 TRACE("--- SEAL FINAL SCHEDULE ------------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001659
1660 // Serialize the assembly order and reverse-post-order numbering.
1661 special_rpo_->SerializeRPOIntoSchedule();
1662 special_rpo_->PrintAndVerifySpecialRPO();
1663
1664 // Add collected nodes for basic blocks to their blocks in the right order.
1665 int block_num = 0;
1666 for (NodeVector& nodes : scheduled_nodes_) {
1667 BasicBlock::Id id = BasicBlock::Id::FromInt(block_num++);
1668 BasicBlock* block = schedule_->GetBlockById(id);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001669 for (Node* node : base::Reversed(nodes)) {
1670 schedule_->AddNode(block, node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001671 }
1672 }
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001673}
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001674
1675
1676// -----------------------------------------------------------------------------
1677
1678
1679void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001680 TRACE("--- FUSE FLOATING CONTROL ----------------------------------\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001681 if (FLAG_trace_turbo_scheduler) {
1682 OFStream os(stdout);
1683 os << "Schedule before control flow fusion:\n" << *schedule_;
1684 }
1685
1686 // Iterate on phase 1: Build control-flow graph.
1687 control_flow_builder_->Run(block, node);
1688
1689 // Iterate on phase 2: Compute special RPO and dominator tree.
1690 special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
1691 // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001692 for (BasicBlock* b = block->rpo_next(); b != nullptr; b = b->rpo_next()) {
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001693 b->set_dominator_depth(-1);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001694 b->set_dominator(nullptr);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001695 }
1696 PropagateImmediateDominators(block->rpo_next());
1697
1698 // Iterate on phase 4: Schedule nodes early.
1699 // TODO(mstarzinger): The following loop gathering the propagation roots is a
1700 // temporary solution and should be merged into the rest of the scheduler as
1701 // soon as the approach settled for all floating loops.
1702 NodeVector propagation_roots(control_flow_builder_->control_);
1703 for (Node* node : control_flow_builder_->control_) {
1704 for (Node* use : node->uses()) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001705 if (NodeProperties::IsPhi(use)) propagation_roots.push_back(use);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001706 }
1707 }
1708 if (FLAG_trace_turbo_scheduler) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001709 TRACE("propagation roots: ");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001710 for (Node* node : propagation_roots) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001711 TRACE("#%d:%s ", node->id(), node->op()->mnemonic());
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001712 }
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001713 TRACE("\n");
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001714 }
1715 ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
1716 schedule_early_visitor.Run(&propagation_roots);
1717
1718 // Move previously planned nodes.
1719 // TODO(mstarzinger): Improve that by supporting bulk moves.
1720 scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
1721 MovePlannedNodes(block, schedule_->block(node));
1722
1723 if (FLAG_trace_turbo_scheduler) {
1724 OFStream os(stdout);
1725 os << "Schedule after control flow fusion:\n" << *schedule_;
1726 }
1727}
1728
1729
1730void Scheduler::MovePlannedNodes(BasicBlock* from, BasicBlock* to) {
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001731 TRACE("Move planned nodes from id:%d to id:%d\n", from->id().ToInt(),
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001732 to->id().ToInt());
1733 NodeVector* nodes = &(scheduled_nodes_[from->id().ToSize()]);
Ben Murdoch4a90d5f2016-03-22 12:00:34 +00001734 for (Node* const node : *nodes) {
1735 schedule_->SetBlockForNode(to, node);
1736 scheduled_nodes_[to->id().ToSize()].push_back(node);
Emily Bernierd0a1eb72015-03-24 16:35:39 -04001737 }
1738 nodes->clear();
1739}
1740
1741} // namespace compiler
1742} // namespace internal
1743} // namespace v8