blob: aa63e9734f930433ec54ca728874fd836d229642 [file] [log] [blame]
Evan Chengd38c22b2006-05-11 23:55:42 +00001//===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Evan Cheng and is distributed under the
6// University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
Dale Johannesen2182f062007-07-13 17:13:54 +000018#define DEBUG_TYPE "pre-RA-sched"
Evan Chengd38c22b2006-05-11 23:55:42 +000019#include "llvm/CodeGen/ScheduleDAG.h"
Jim Laskey29e635d2006-08-02 12:30:23 +000020#include "llvm/CodeGen/SchedulerRegistry.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000021#include "llvm/CodeGen/SSARegMap.h"
22#include "llvm/Target/MRegisterInfo.h"
Owen Anderson8c2c1e92006-05-12 06:33:49 +000023#include "llvm/Target/TargetData.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000024#include "llvm/Target/TargetMachine.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/Debug.h"
Chris Lattner3d27be12006-08-27 12:54:02 +000027#include "llvm/Support/Compiler.h"
Evan Cheng5924bf72007-09-25 01:54:36 +000028#include "llvm/ADT/SmallSet.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000029#include "llvm/ADT/Statistic.h"
30#include <climits>
Evan Chengd38c22b2006-05-11 23:55:42 +000031#include <queue>
32#include "llvm/Support/CommandLine.h"
33using namespace llvm;
34
Jim Laskey95eda5b2006-08-01 14:21:23 +000035static RegisterScheduler
36 burrListDAGScheduler("list-burr",
37 " Bottom-up register reduction list scheduling",
38 createBURRListDAGScheduler);
39static RegisterScheduler
40 tdrListrDAGScheduler("list-tdrr",
41 " Top-down register reduction list scheduling",
42 createTDRRListDAGScheduler);
43
Evan Chengd38c22b2006-05-11 23:55:42 +000044namespace {
Evan Chengd38c22b2006-05-11 23:55:42 +000045//===----------------------------------------------------------------------===//
46/// ScheduleDAGRRList - The actual register reduction list scheduler
47/// implementation. This supports both top-down and bottom-up scheduling.
48///
Chris Lattnere097e6f2006-06-28 22:17:39 +000049class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
Evan Chengd38c22b2006-05-11 23:55:42 +000050private:
51 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
52 /// it is top-down.
53 bool isBottomUp;
54
55 /// AvailableQueue - The priority queue to use for the available SUnits.
Evan Cheng5924bf72007-09-25 01:54:36 +000056 ///a
Evan Chengd38c22b2006-05-11 23:55:42 +000057 SchedulingPriorityQueue *AvailableQueue;
58
Evan Cheng5924bf72007-09-25 01:54:36 +000059 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
60 /// that are "live". These nodes must be scheduled before any other nodes that
61 /// modifies the registers can be scheduled.
62 SmallSet<unsigned, 4> LiveRegs;
63 std::vector<SUnit*> LiveRegDefs;
64 std::vector<unsigned> LiveRegCycles;
65
Evan Chengd38c22b2006-05-11 23:55:42 +000066public:
67 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
68 const TargetMachine &tm, bool isbottomup,
69 SchedulingPriorityQueue *availqueue)
70 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
71 AvailableQueue(availqueue) {
72 }
73
74 ~ScheduleDAGRRList() {
75 delete AvailableQueue;
76 }
77
78 void Schedule();
79
80private:
Evan Cheng8e136a92007-09-26 21:36:17 +000081 void ReleasePred(SUnit*, bool, unsigned);
82 void ReleaseSucc(SUnit*, bool isChain, unsigned);
83 void CapturePred(SUnit*, SUnit*, bool);
84 void ScheduleNodeBottomUp(SUnit*, unsigned);
85 void ScheduleNodeTopDown(SUnit*, unsigned);
86 void UnscheduleNodeBottomUp(SUnit*);
87 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
88 SUnit *CopyAndMoveSuccessors(SUnit*);
89 SUnit *InsertCopiesAndMoveSuccs(SUnit*, unsigned,
90 const TargetRegisterClass*,
91 const TargetRegisterClass*);
92 bool DelayForLiveRegsBottomUp(SUnit*, unsigned&);
Evan Chengd38c22b2006-05-11 23:55:42 +000093 void ListScheduleTopDown();
94 void ListScheduleBottomUp();
Evan Chengafed73e2006-05-12 01:58:24 +000095 void CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +000096};
97} // end anonymous namespace
98
99
100/// Schedule - Schedule the DAG using list scheduling.
101void ScheduleDAGRRList::Schedule() {
Bill Wendling22e978a2006-12-07 20:04:42 +0000102 DOUT << "********** List Scheduling **********\n";
Evan Cheng5924bf72007-09-25 01:54:36 +0000103
104 LiveRegDefs.resize(MRI->getNumRegs(), NULL);
105 LiveRegCycles.resize(MRI->getNumRegs(), 0);
106
Evan Chengd38c22b2006-05-11 23:55:42 +0000107 // Build scheduling units.
108 BuildSchedUnits();
109
Evan Chengd38c22b2006-05-11 23:55:42 +0000110 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
Chris Lattnerd86418a2006-08-17 00:09:56 +0000111 SUnits[su].dumpAll(&DAG));
Evan Cheng47fbeda2006-10-14 08:34:06 +0000112 CalculateDepths();
113 CalculateHeights();
Evan Chengd38c22b2006-05-11 23:55:42 +0000114
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000115 AvailableQueue->initNodes(SUnitMap, SUnits);
Dan Gohman54a187e2007-08-20 19:28:38 +0000116
Evan Chengd38c22b2006-05-11 23:55:42 +0000117 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
118 if (isBottomUp)
119 ListScheduleBottomUp();
120 else
121 ListScheduleTopDown();
122
123 AvailableQueue->releaseState();
Dan Gohman54a187e2007-08-20 19:28:38 +0000124
Evan Cheng009f5f52006-05-25 08:37:31 +0000125 CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +0000126
Bill Wendling22e978a2006-12-07 20:04:42 +0000127 DOUT << "*** Final schedule ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000128 DEBUG(dumpSchedule());
Bill Wendling22e978a2006-12-07 20:04:42 +0000129 DOUT << "\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000130
131 // Emit in scheduled order
132 EmitSchedule();
133}
134
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000135/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
Evan Chengafed73e2006-05-12 01:58:24 +0000136/// it is not the last use of its first operand, add it to the CommuteSet if
137/// possible. It will be commuted when it is translated to a MI.
138void ScheduleDAGRRList::CommuteNodesToReducePressure() {
Evan Chenge3c44192007-06-22 01:35:51 +0000139 SmallPtrSet<SUnit*, 4> OperandSeen;
Evan Chengafed73e2006-05-12 01:58:24 +0000140 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
141 SUnit *SU = Sequence[i];
Evan Cheng8e136a92007-09-26 21:36:17 +0000142 if (!SU || !SU->Node) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000143 if (SU->isCommutable) {
144 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +0000145 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000146 unsigned NumOps = CountOperands(SU->Node);
147 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +0000148 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) == -1)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000149 continue;
150
151 SDNode *OpN = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000152 SUnit *OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000153 if (OpSU && OperandSeen.count(OpSU) == 1) {
154 // Ok, so SU is not the last use of OpSU, but SU is two-address so
155 // it will clobber OpSU. Try to commute SU if no other source operands
156 // are live below.
157 bool DoCommute = true;
158 for (unsigned k = 0; k < NumOps; ++k) {
159 if (k != j) {
160 OpN = SU->Node->getOperand(k).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000161 OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000162 if (OpSU && OperandSeen.count(OpSU) == 1) {
163 DoCommute = false;
164 break;
165 }
166 }
Evan Chengafed73e2006-05-12 01:58:24 +0000167 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000168 if (DoCommute)
169 CommuteSet.insert(SU->Node);
Evan Chengafed73e2006-05-12 01:58:24 +0000170 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000171
172 // Only look at the first use&def node for now.
173 break;
Evan Chengafed73e2006-05-12 01:58:24 +0000174 }
175 }
176
Chris Lattnerd86418a2006-08-17 00:09:56 +0000177 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
178 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000179 if (!I->isCtrl)
180 OperandSeen.insert(I->Dep);
Evan Chengafed73e2006-05-12 01:58:24 +0000181 }
182 }
183}
Evan Chengd38c22b2006-05-11 23:55:42 +0000184
185//===----------------------------------------------------------------------===//
186// Bottom-Up Scheduling
187//===----------------------------------------------------------------------===//
188
Evan Chengd38c22b2006-05-11 23:55:42 +0000189/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000190/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000191void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
192 unsigned CurCycle) {
193 // FIXME: the distance between two nodes is not always == the predecessor's
194 // latency. For example, the reader can very well read the register written
195 // by the predecessor later than the issue cycle. It also depends on the
196 // interrupt model (drain vs. freeze).
197 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
198
199 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000200 --PredSU->NumSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000201 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000202 --PredSU->NumChainSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000203
204#ifndef NDEBUG
205 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000206 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000207 PredSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000208 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000209 assert(0);
210 }
211#endif
212
213 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
214 // EntryToken has to go last! Special case it here.
Evan Cheng8e136a92007-09-26 21:36:17 +0000215 if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) {
Evan Chengd38c22b2006-05-11 23:55:42 +0000216 PredSU->isAvailable = true;
217 AvailableQueue->push(PredSU);
218 }
219 }
220}
221
222/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
223/// count of its predecessors. If a predecessor pending count is zero, add it to
224/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000225void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000226 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000227 DEBUG(SU->dump(&DAG));
228 SU->Cycle = CurCycle;
229
230 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000231
232 // Bottom up: release predecessors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000233 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Cheng5924bf72007-09-25 01:54:36 +0000234 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000235 ReleasePred(I->Dep, I->isCtrl, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000236 if (I->Cost < 0) {
237 // This is a physical register dependency and it's impossible or
238 // expensive to copy the register. Make sure nothing that can
239 // clobber the register is scheduled between the predecessor and
240 // this node.
241 if (LiveRegs.insert(I->Reg)) {
242 LiveRegDefs[I->Reg] = I->Dep;
243 LiveRegCycles[I->Reg] = CurCycle;
244 }
245 }
246 }
247
248 // Release all the implicit physical register defs that are live.
249 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
250 I != E; ++I) {
251 if (I->Cost < 0) {
252 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
253 LiveRegs.erase(I->Reg);
254 assert(LiveRegDefs[I->Reg] == SU &&
255 "Physical register dependency violated?");
256 LiveRegDefs[I->Reg] = NULL;
257 LiveRegCycles[I->Reg] = 0;
258 }
259 }
260 }
261
Evan Chengd38c22b2006-05-11 23:55:42 +0000262 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000263}
264
Evan Cheng5924bf72007-09-25 01:54:36 +0000265/// CapturePred - This does the opposite of ReleasePred. Since SU is being
266/// unscheduled, incrcease the succ left count of its predecessors. Remove
267/// them from AvailableQueue if necessary.
268void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
269 PredSU->CycleBound = 0;
270 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
271 I != E; ++I) {
272 if (I->Dep == SU)
273 continue;
274 PredSU->CycleBound = std::max(PredSU->CycleBound,
275 I->Dep->Cycle + PredSU->Latency);
276 }
277
278 if (PredSU->isAvailable) {
279 PredSU->isAvailable = false;
280 if (!PredSU->isPending)
281 AvailableQueue->remove(PredSU);
282 }
283
284 if (!isChain)
285 ++PredSU->NumSuccsLeft;
286 else
287 ++PredSU->NumChainSuccsLeft;
288}
289
290/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
291/// its predecessor states to reflect the change.
292void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
293 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
294 DEBUG(SU->dump(&DAG));
295
296 AvailableQueue->UnscheduledNode(SU);
297
298 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
299 I != E; ++I) {
300 CapturePred(I->Dep, SU, I->isCtrl);
301 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
302 LiveRegs.erase(I->Reg);
303 assert(LiveRegDefs[I->Reg] == I->Dep &&
304 "Physical register dependency violated?");
305 LiveRegDefs[I->Reg] = NULL;
306 LiveRegCycles[I->Reg] = 0;
307 }
308 }
309
310 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
311 I != E; ++I) {
312 if (I->Cost < 0) {
313 if (LiveRegs.insert(I->Reg)) {
314 assert(!LiveRegDefs[I->Reg] &&
315 "Physical register dependency violated?");
316 LiveRegDefs[I->Reg] = SU;
317 }
318 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
319 LiveRegCycles[I->Reg] = I->Dep->Cycle;
320 }
321 }
322
323 SU->Cycle = 0;
324 SU->isScheduled = false;
325 SU->isAvailable = true;
326 AvailableQueue->push(SU);
327}
328
Evan Chengcfd5f822007-09-27 00:25:29 +0000329// FIXME: This is probably too slow!
330static void isReachable(SUnit *SU, SUnit *TargetSU,
331 SmallPtrSet<SUnit*, 32> &Visited, bool &Reached) {
332 if (Reached) return;
333 if (SU == TargetSU) {
334 Reached = true;
335 return;
336 }
337 if (!Visited.insert(SU)) return;
338
339 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E;
340 ++I)
341 isReachable(I->Dep, TargetSU, Visited, Reached);
342}
343
344static bool isReachable(SUnit *SU, SUnit *TargetSU) {
345 SmallPtrSet<SUnit*, 32> Visited;
346 bool Reached = false;
347 isReachable(SU, TargetSU, Visited, Reached);
348 return Reached;
349}
350
351/// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
352/// create a cycle.
353static bool willCreateCycle(SUnit *SU, SUnit *TargetSU) {
354 if (isReachable(TargetSU, SU))
355 return true;
356 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
357 I != E; ++I)
358 if (I->Cost < 0 && isReachable(TargetSU, I->Dep))
359 return true;
360 return false;
361}
362
Evan Cheng8e136a92007-09-26 21:36:17 +0000363/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
Evan Cheng5924bf72007-09-25 01:54:36 +0000364/// BTCycle in order to schedule a specific node. Returns the last unscheduled
365/// SUnit. Also returns if a successor is unscheduled in the process.
Evan Cheng8e136a92007-09-26 21:36:17 +0000366void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
367 unsigned &CurCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000368 SUnit *OldSU = NULL;
Evan Cheng8e136a92007-09-26 21:36:17 +0000369 while (CurCycle > BtCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000370 OldSU = Sequence.back();
371 Sequence.pop_back();
372 if (SU->isSucc(OldSU))
Evan Cheng8e136a92007-09-26 21:36:17 +0000373 // Don't try to remove SU from AvailableQueue.
374 SU->isAvailable = false;
Evan Cheng5924bf72007-09-25 01:54:36 +0000375 UnscheduleNodeBottomUp(OldSU);
376 --CurCycle;
377 }
378
379
380 if (SU->isSucc(OldSU)) {
381 assert(false && "Something is wrong!");
382 abort();
383 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000384}
385
386/// isSafeToCopy - True if the SUnit for the given SDNode can safely cloned,
387/// i.e. the node does not produce a flag, it does not read a flag and it does
388/// not have an incoming chain.
389static bool isSafeToCopy(SDNode *N) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000390 if (!N)
391 return true;
392
Evan Cheng5924bf72007-09-25 01:54:36 +0000393 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
394 if (N->getValueType(i) == MVT::Flag)
395 return false;
396 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
397 const SDOperand &Op = N->getOperand(i);
398 MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
399 if (VT == MVT::Other || VT == MVT::Flag)
400 return false;
401 }
402
403 return true;
404}
405
406/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
407/// successors to the newly created node.
408SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000409 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
410
Evan Cheng5924bf72007-09-25 01:54:36 +0000411 SUnit *NewSU = Clone(SU);
412
413 // New SUnit has the exact same predecessors.
414 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
415 I != E; ++I)
416 if (!I->isSpecial) {
417 NewSU->addPred(I->Dep, I->isCtrl, false, I->Reg, I->Cost);
418 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
419 }
420
421 // Only copy scheduled successors. Cut them from old node's successor
422 // list and move them over.
423 SmallVector<SDep*, 2> DelDeps;
424 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
425 I != E; ++I) {
426 if (I->isSpecial)
427 continue;
428 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
429 if (I->Dep->isScheduled) {
430 I->Dep->addPred(NewSU, I->isCtrl, false, I->Reg, I->Cost);
431 DelDeps.push_back(I);
432 }
433 }
434 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
435 SUnit *Succ = DelDeps[i]->Dep;
436 bool isCtrl = DelDeps[i]->isCtrl;
437 Succ->removePred(SU, isCtrl, false);
438 }
439
440 AvailableQueue->updateNode(SU);
441 AvailableQueue->addNode(NewSU);
442
443 return NewSU;
444}
445
Evan Cheng8e136a92007-09-26 21:36:17 +0000446/// InsertCopiesAndMoveSuccs - Insert expensive cross register class copies and
447/// move all scheduled successors of the given SUnit to the last copy.
448SUnit *ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
449 const TargetRegisterClass *DestRC,
450 const TargetRegisterClass *SrcRC) {
451 SUnit *CopyFromSU = NewSUnit(NULL);
452 CopyFromSU->CopySrcRC = SrcRC;
453 CopyFromSU->CopyDstRC = DestRC;
454 CopyFromSU->Depth = SU->Depth;
455 CopyFromSU->Height = SU->Height;
456
457 SUnit *CopyToSU = NewSUnit(NULL);
458 CopyToSU->CopySrcRC = DestRC;
459 CopyToSU->CopyDstRC = SrcRC;
460
461 // Only copy scheduled successors. Cut them from old node's successor
462 // list and move them over.
463 SmallVector<SDep*, 2> DelDeps;
464 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
465 I != E; ++I) {
466 if (I->isSpecial)
467 continue;
468 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
469 if (I->Dep->isScheduled) {
470 I->Dep->addPred(CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
471 DelDeps.push_back(I);
472 }
473 }
474 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
475 SUnit *Succ = DelDeps[i]->Dep;
476 bool isCtrl = DelDeps[i]->isCtrl;
477 Succ->removePred(SU, isCtrl, false);
478 }
479
480 CopyFromSU->addPred(SU, false, false, Reg, -1);
481 CopyToSU->addPred(CopyFromSU, false, false, Reg, 1);
482
483 AvailableQueue->updateNode(SU);
484 AvailableQueue->addNode(CopyFromSU);
485 AvailableQueue->addNode(CopyToSU);
486
487 return CopyToSU;
488}
489
490/// getPhysicalRegisterVT - Returns the ValueType of the physical register
491/// definition of the specified node.
492/// FIXME: Move to SelectionDAG?
493static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg,
494 const TargetInstrInfo *TII) {
495 const TargetInstrDescriptor &TID = TII->get(N->getTargetOpcode());
496 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
497 unsigned NumRes = TID.numDefs;
498 for (const unsigned *ImpDef = TID.ImplicitDefs; *ImpDef; ++ImpDef) {
499 if (Reg == *ImpDef)
500 break;
501 ++NumRes;
502 }
503 return N->getValueType(NumRes);
504}
505
Evan Cheng5924bf72007-09-25 01:54:36 +0000506/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
507/// scheduling of the given node to satisfy live physical register dependencies.
508/// If the specific node is the last one that's available to schedule, do
509/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
510bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, unsigned &CurCycle){
511 if (LiveRegs.empty())
512 return false;
513
514 // If this node would clobber any "live" register, then it's not ready.
515 // However, if this is the last "available" node, then we may have to
516 // backtrack.
517 bool MustSched = AvailableQueue->empty();
518 SmallVector<unsigned, 4> LRegs;
519 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
520 I != E; ++I) {
521 if (I->Cost < 0) {
522 unsigned Reg = I->Reg;
523 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep)
524 LRegs.push_back(Reg);
525 for (const unsigned *Alias = MRI->getAliasSet(Reg);
526 *Alias; ++Alias)
527 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep)
528 LRegs.push_back(*Alias);
529 }
530 }
531
532 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
533 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
Evan Cheng8e136a92007-09-26 21:36:17 +0000534 if (!Node || !Node->isTargetOpcode())
Evan Cheng5924bf72007-09-25 01:54:36 +0000535 continue;
536 const TargetInstrDescriptor &TID = TII->get(Node->getTargetOpcode());
537 if (!TID.ImplicitDefs)
538 continue;
539 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
540 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU)
541 LRegs.push_back(*Reg);
542 for (const unsigned *Alias = MRI->getAliasSet(*Reg);
543 *Alias; ++Alias)
544 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU)
545 LRegs.push_back(*Alias);
546 }
547 }
548
549 if (MustSched && !LRegs.empty()) {
550 // We have made a mistake by scheduling some nodes too early. Now we must
551 // schedule the current node which will end up clobbering some live
552 // registers that are expensive / impossible to copy. Try unscheduling
553 // up to the point where it's safe to schedule the current node.
554 unsigned LiveCycle = CurCycle;
555 for (unsigned i = 0, e = LRegs.size(); i != e; ++i) {
556 unsigned Reg = LRegs[i];
557 unsigned LCycle = LiveRegCycles[Reg];
558 LiveCycle = std::min(LiveCycle, LCycle);
559 }
560
Evan Cheng8e136a92007-09-26 21:36:17 +0000561 SUnit *OldSU = Sequence[LiveCycle];
Evan Chengcfd5f822007-09-27 00:25:29 +0000562 if (!willCreateCycle(SU, OldSU)) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000563 // If CycleBound is greater than backtrack cycle, then some of SU
564 // successors are going to be unscheduled.
565 bool SuccUnsched = SU->CycleBound > LiveCycle;
566 BacktrackBottomUp(SU, LiveCycle, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000567 // Force the current node to be scheduled before the node that
568 // requires the physical reg dep.
569 if (OldSU->isAvailable) {
570 OldSU->isAvailable = false;
571 AvailableQueue->remove(OldSU);
572 }
573 SU->addPred(OldSU, true, true);
574 // If a successor has been unscheduled, then it's not possible to
575 // schedule the current node.
576 return SuccUnsched;
577 } else {
578 // Try duplicating the nodes that produces these "expensive to copy"
579 // values to break the dependency.
Evan Cheng8e136a92007-09-26 21:36:17 +0000580 assert(LRegs.size() == 1 && "Can't handle this yet!");
581 unsigned Reg = LRegs[0];
582 SUnit *LRDef = LiveRegDefs[Reg];
583 SUnit *NewDef;
584 if (isSafeToCopy(LRDef->Node))
585 NewDef = CopyAndMoveSuccessors(LRDef);
586 else {
587 // Issue expensive cross register class copies.
588 MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
589 const TargetRegisterClass *RC =
590 MRI->getPhysicalRegisterRegClass(VT, Reg);
591 const TargetRegisterClass *DestRC = MRI->getCrossCopyRegClass(RC);
592 if (!DestRC) {
593 assert(false && "Don't know how to copy this physical register!");
Evan Cheng5924bf72007-09-25 01:54:36 +0000594 abort();
595 }
Evan Cheng8e136a92007-09-26 21:36:17 +0000596 NewDef = InsertCopiesAndMoveSuccs(LRDef,Reg,DestRC,RC);
Evan Cheng5924bf72007-09-25 01:54:36 +0000597 }
Evan Cheng8e136a92007-09-26 21:36:17 +0000598
599 DOUT << "Adding an edge from SU # " << SU->NodeNum
600 << " to SU #" << NewDef->NodeNum << "\n";
601 LiveRegDefs[Reg] = NewDef;
602 NewDef->addPred(SU, true, true);
603 SU->isAvailable = false;
604 AvailableQueue->push(NewDef);
Evan Cheng5924bf72007-09-25 01:54:36 +0000605 return true;
606 }
607 }
608 return !LRegs.empty();
Evan Chengd38c22b2006-05-11 23:55:42 +0000609}
610
611/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
612/// schedulers.
613void ScheduleDAGRRList::ListScheduleBottomUp() {
614 unsigned CurCycle = 0;
615 // Add root to Available queue.
Evan Cheng5924bf72007-09-25 01:54:36 +0000616 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
617 RootSU->isAvailable = true;
618 AvailableQueue->push(RootSU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000619
620 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000621 // priority. If it is not ready put it back. Schedule the node.
Evan Cheng5924bf72007-09-25 01:54:36 +0000622 SmallVector<SUnit*, 4> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000623 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000624 SUnit *CurSU = AvailableQueue->pop();
625 while (CurSU) {
626 if (CurSU->CycleBound <= CurCycle)
627 if (!DelayForLiveRegsBottomUp(CurSU, CurCycle))
628 break;
629
630 // Verify node is still ready. It may not be in case the
631 // scheduler has backtracked.
632 if (CurSU->isAvailable) {
633 CurSU->isPending = true;
634 NotReady.push_back(CurSU);
635 }
636 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000637 }
638
639 // Add the nodes that aren't ready back onto the available list.
Evan Cheng5924bf72007-09-25 01:54:36 +0000640 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
641 NotReady[i]->isPending = false;
642 if (NotReady[i]->isAvailable)
643 AvailableQueue->push(NotReady[i]);
644 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000645 NotReady.clear();
646
Evan Cheng5924bf72007-09-25 01:54:36 +0000647 if (!CurSU)
648 Sequence.push_back(0);
649 else {
650 ScheduleNodeBottomUp(CurSU, CurCycle);
651 Sequence.push_back(CurSU);
652 }
653 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000654 }
655
656 // Add entry node last
657 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000658 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000659 Sequence.push_back(Entry);
660 }
661
662 // Reverse the order if it is bottom up.
663 std::reverse(Sequence.begin(), Sequence.end());
664
665
666#ifndef NDEBUG
667 // Verify that all SUnits were scheduled.
668 bool AnyNotSched = false;
669 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
670 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
671 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000672 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000673 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000674 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000675 AnyNotSched = true;
676 }
677 }
678 assert(!AnyNotSched);
679#endif
680}
681
682//===----------------------------------------------------------------------===//
683// Top-Down Scheduling
684//===----------------------------------------------------------------------===//
685
686/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000687/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000688void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
689 unsigned CurCycle) {
690 // FIXME: the distance between two nodes is not always == the predecessor's
691 // latency. For example, the reader can very well read the register written
692 // by the predecessor later than the issue cycle. It also depends on the
693 // interrupt model (drain vs. freeze).
694 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
695
696 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000697 --SuccSU->NumPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000698 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000699 --SuccSU->NumChainPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000700
701#ifndef NDEBUG
702 if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000703 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000704 SuccSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000705 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000706 assert(0);
707 }
708#endif
709
710 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
711 SuccSU->isAvailable = true;
712 AvailableQueue->push(SuccSU);
713 }
714}
715
716
717/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
718/// count of its successors. If a successor pending count is zero, add it to
719/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000720void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000721 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000722 DEBUG(SU->dump(&DAG));
723 SU->Cycle = CurCycle;
724
725 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000726
727 // Top down: release successors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000728 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
729 I != E; ++I)
Evan Cheng0effc3a2007-09-19 01:38:40 +0000730 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +0000731 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000732}
733
Dan Gohman54a187e2007-08-20 19:28:38 +0000734/// ListScheduleTopDown - The main loop of list scheduling for top-down
735/// schedulers.
Evan Chengd38c22b2006-05-11 23:55:42 +0000736void ScheduleDAGRRList::ListScheduleTopDown() {
737 unsigned CurCycle = 0;
Evan Cheng5924bf72007-09-25 01:54:36 +0000738 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000739
740 // All leaves to Available queue.
741 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
742 // It is available if it has no predecessors.
743 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
744 AvailableQueue->push(&SUnits[i]);
745 SUnits[i].isAvailable = true;
746 }
747 }
748
749 // Emit the entry node first.
750 ScheduleNodeTopDown(Entry, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000751 Sequence.push_back(Entry);
752 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000753
754 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000755 // priority. If it is not ready put it back. Schedule the node.
Evan Chengd38c22b2006-05-11 23:55:42 +0000756 std::vector<SUnit*> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000757 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000758 SUnit *CurSU = AvailableQueue->pop();
759 while (CurSU && CurSU->CycleBound > CurCycle) {
760 NotReady.push_back(CurSU);
761 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000762 }
763
764 // Add the nodes that aren't ready back onto the available list.
765 AvailableQueue->push_all(NotReady);
766 NotReady.clear();
767
Evan Cheng5924bf72007-09-25 01:54:36 +0000768 if (!CurSU)
769 Sequence.push_back(0);
770 else {
771 ScheduleNodeTopDown(CurSU, CurCycle);
772 Sequence.push_back(CurSU);
773 }
Evan Chengd12c97d2006-05-30 18:05:39 +0000774 CurCycle++;
Evan Chengd38c22b2006-05-11 23:55:42 +0000775 }
776
777
778#ifndef NDEBUG
779 // Verify that all SUnits were scheduled.
780 bool AnyNotSched = false;
781 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
782 if (!SUnits[i].isScheduled) {
783 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000784 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000785 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000786 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000787 AnyNotSched = true;
788 }
789 }
790 assert(!AnyNotSched);
791#endif
792}
793
794
795
796//===----------------------------------------------------------------------===//
797// RegReductionPriorityQueue Implementation
798//===----------------------------------------------------------------------===//
799//
800// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
801// to reduce register pressure.
802//
803namespace {
804 template<class SF>
805 class RegReductionPriorityQueue;
806
807 /// Sorting functions for the Available queue.
808 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
809 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
810 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
811 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
812
813 bool operator()(const SUnit* left, const SUnit* right) const;
814 };
815
816 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
817 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
818 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
819 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
820
821 bool operator()(const SUnit* left, const SUnit* right) const;
822 };
823} // end anonymous namespace
824
Evan Cheng961bbd32007-01-08 23:50:38 +0000825static inline bool isCopyFromLiveIn(const SUnit *SU) {
826 SDNode *N = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +0000827 return N && N->getOpcode() == ISD::CopyFromReg &&
Evan Cheng961bbd32007-01-08 23:50:38 +0000828 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
829}
830
Evan Chengd38c22b2006-05-11 23:55:42 +0000831namespace {
832 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000833 class VISIBILITY_HIDDEN RegReductionPriorityQueue
834 : public SchedulingPriorityQueue {
Evan Chengd38c22b2006-05-11 23:55:42 +0000835 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
836
837 public:
838 RegReductionPriorityQueue() :
839 Queue(SF(this)) {}
840
Evan Cheng5924bf72007-09-25 01:54:36 +0000841 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000842 std::vector<SUnit> &sunits) {}
Evan Cheng5924bf72007-09-25 01:54:36 +0000843
844 virtual void addNode(const SUnit *SU) {}
845
846 virtual void updateNode(const SUnit *SU) {}
847
Evan Chengd38c22b2006-05-11 23:55:42 +0000848 virtual void releaseState() {}
849
Evan Cheng6730f032007-01-08 23:55:53 +0000850 virtual unsigned getNodePriority(const SUnit *SU) const {
Evan Chengd38c22b2006-05-11 23:55:42 +0000851 return 0;
852 }
853
Evan Cheng5924bf72007-09-25 01:54:36 +0000854 unsigned size() const { return Queue.size(); }
855
Evan Chengd38c22b2006-05-11 23:55:42 +0000856 bool empty() const { return Queue.empty(); }
857
858 void push(SUnit *U) {
859 Queue.push(U);
860 }
861 void push_all(const std::vector<SUnit *> &Nodes) {
862 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
863 Queue.push(Nodes[i]);
864 }
865
866 SUnit *pop() {
Evan Chengd12c97d2006-05-30 18:05:39 +0000867 if (empty()) return NULL;
Evan Chengd38c22b2006-05-11 23:55:42 +0000868 SUnit *V = Queue.top();
869 Queue.pop();
870 return V;
871 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000872
Evan Cheng5924bf72007-09-25 01:54:36 +0000873 /// remove - This is a really inefficient way to remove a node from a
874 /// priority queue. We should roll our own heap to make this better or
875 /// something.
876 void remove(SUnit *SU) {
877 std::vector<SUnit*> Temp;
878
879 assert(!Queue.empty() && "Not in queue!");
880 while (Queue.top() != SU) {
881 Temp.push_back(Queue.top());
882 Queue.pop();
883 assert(!Queue.empty() && "Not in queue!");
884 }
885
886 // Remove the node from the PQ.
887 Queue.pop();
888
889 // Add all the other nodes back.
890 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
891 Queue.push(Temp[i]);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000892 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000893 };
894
895 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000896 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
897 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000898 // SUnitMap SDNode to SUnit mapping (n -> n).
899 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000900
Evan Chengd38c22b2006-05-11 23:55:42 +0000901 // SUnits - The SUnits for the current graph.
902 const std::vector<SUnit> *SUnits;
903
904 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000905 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000906
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000907 const TargetInstrInfo *TII;
Evan Chengd38c22b2006-05-11 23:55:42 +0000908 public:
Dan Gohman54a187e2007-08-20 19:28:38 +0000909 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000910 : TII(tii) {}
Evan Chengd38c22b2006-05-11 23:55:42 +0000911
Evan Cheng5924bf72007-09-25 01:54:36 +0000912 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000913 std::vector<SUnit> &sunits) {
914 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000915 SUnits = &sunits;
916 // Add pseudo dependency edges for two-address nodes.
Evan Chengafed73e2006-05-12 01:58:24 +0000917 AddPseudoTwoAddrDeps();
Evan Chengd38c22b2006-05-11 23:55:42 +0000918 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000919 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000920 }
921
Evan Cheng5924bf72007-09-25 01:54:36 +0000922 void addNode(const SUnit *SU) {
923 SethiUllmanNumbers.resize(SUnits->size(), 0);
924 CalcNodeSethiUllmanNumber(SU);
925 }
926
927 void updateNode(const SUnit *SU) {
928 SethiUllmanNumbers[SU->NodeNum] = 0;
929 CalcNodeSethiUllmanNumber(SU);
930 }
931
Evan Chengd38c22b2006-05-11 23:55:42 +0000932 void releaseState() {
933 SUnits = 0;
934 SethiUllmanNumbers.clear();
935 }
936
Evan Cheng6730f032007-01-08 23:55:53 +0000937 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +0000938 assert(SU->NodeNum < SethiUllmanNumbers.size());
Evan Cheng8e136a92007-09-26 21:36:17 +0000939 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Cheng961bbd32007-01-08 23:50:38 +0000940 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
941 // CopyFromReg should be close to its def because it restricts
942 // allocation choices. But if it is a livein then perhaps we want it
943 // closer to its uses so it can be coalesced.
944 return 0xffff;
945 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
946 // CopyToReg should be close to its uses to facilitate coalescing and
947 // avoid spilling.
948 return 0;
949 else if (SU->NumSuccs == 0)
950 // If SU does not have a use, i.e. it doesn't produce a value that would
951 // be consumed (e.g. store), then it terminates a chain of computation.
952 // Give it a large SethiUllman number so it will be scheduled right
953 // before its predecessors that it doesn't lengthen their live ranges.
954 return 0xffff;
955 else if (SU->NumPreds == 0)
956 // If SU does not have a def, schedule it close to its uses because it
957 // does not lengthen any live ranges.
958 return 0;
959 else
960 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +0000961 }
962
963 private:
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000964 bool canClobber(SUnit *SU, SUnit *Op);
Evan Chengd38c22b2006-05-11 23:55:42 +0000965 void AddPseudoTwoAddrDeps();
Evan Cheng6730f032007-01-08 23:55:53 +0000966 void CalculateSethiUllmanNumbers();
967 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000968 };
969
970
971 template<class SF>
Dan Gohman54a187e2007-08-20 19:28:38 +0000972 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
973 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000974 // SUnitMap SDNode to SUnit mapping (n -> n).
975 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000976
Evan Chengd38c22b2006-05-11 23:55:42 +0000977 // SUnits - The SUnits for the current graph.
978 const std::vector<SUnit> *SUnits;
979
980 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000981 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000982
983 public:
984 TDRegReductionPriorityQueue() {}
985
Evan Cheng5924bf72007-09-25 01:54:36 +0000986 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000987 std::vector<SUnit> &sunits) {
988 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000989 SUnits = &sunits;
990 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000991 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000992 }
993
Evan Cheng5924bf72007-09-25 01:54:36 +0000994 void addNode(const SUnit *SU) {
995 SethiUllmanNumbers.resize(SUnits->size(), 0);
996 CalcNodeSethiUllmanNumber(SU);
997 }
998
999 void updateNode(const SUnit *SU) {
1000 SethiUllmanNumbers[SU->NodeNum] = 0;
1001 CalcNodeSethiUllmanNumber(SU);
1002 }
1003
Evan Chengd38c22b2006-05-11 23:55:42 +00001004 void releaseState() {
1005 SUnits = 0;
1006 SethiUllmanNumbers.clear();
1007 }
1008
Evan Cheng6730f032007-01-08 23:55:53 +00001009 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +00001010 assert(SU->NodeNum < SethiUllmanNumbers.size());
1011 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001012 }
1013
1014 private:
Evan Cheng6730f032007-01-08 23:55:53 +00001015 void CalculateSethiUllmanNumbers();
1016 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001017 };
1018}
1019
Evan Chengb9e3db62007-03-14 22:43:40 +00001020/// closestSucc - Returns the scheduled cycle of the successor which is
1021/// closet to the current cycle.
Evan Cheng28748552007-03-13 23:25:11 +00001022static unsigned closestSucc(const SUnit *SU) {
1023 unsigned MaxCycle = 0;
1024 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
Evan Chengb9e3db62007-03-14 22:43:40 +00001025 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001026 unsigned Cycle = I->Dep->Cycle;
Evan Chengb9e3db62007-03-14 22:43:40 +00001027 // If there are bunch of CopyToRegs stacked up, they should be considered
1028 // to be at the same position.
Evan Cheng8e136a92007-09-26 21:36:17 +00001029 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
Evan Cheng0effc3a2007-09-19 01:38:40 +00001030 Cycle = closestSucc(I->Dep)+1;
Evan Chengb9e3db62007-03-14 22:43:40 +00001031 if (Cycle > MaxCycle)
1032 MaxCycle = Cycle;
1033 }
Evan Cheng28748552007-03-13 23:25:11 +00001034 return MaxCycle;
1035}
1036
Evan Chengb9e3db62007-03-14 22:43:40 +00001037/// calcMaxScratches - Returns an cost estimate of the worse case requirement
1038/// for scratch registers. Live-in operands and live-out results don't count
1039/// since they are "fixed".
1040static unsigned calcMaxScratches(const SUnit *SU) {
1041 unsigned Scratches = 0;
1042 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1043 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001044 if (I->isCtrl) continue; // ignore chain preds
Evan Cheng8e136a92007-09-26 21:36:17 +00001045 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
Evan Chengb9e3db62007-03-14 22:43:40 +00001046 Scratches++;
1047 }
1048 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1049 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001050 if (I->isCtrl) continue; // ignore chain succs
Evan Cheng8e136a92007-09-26 21:36:17 +00001051 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
Evan Chengb9e3db62007-03-14 22:43:40 +00001052 Scratches += 10;
1053 }
1054 return Scratches;
1055}
1056
Evan Chengd38c22b2006-05-11 23:55:42 +00001057// Bottom up
1058bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
David Greene4c1e6f32007-06-29 03:42:23 +00001059 // There used to be a special tie breaker here that looked for
David Greene5b6f7552007-06-29 02:48:09 +00001060 // two-address instructions and preferred the instruction with a
1061 // def&use operand. The special case triggered diagnostics when
1062 // _GLIBCXX_DEBUG was enabled because it broke the strict weak
1063 // ordering that priority_queue requires. It didn't help much anyway
1064 // because AddPseudoTwoAddrDeps already covers many of the cases
1065 // where it would have applied. In addition, it's counter-intuitive
1066 // that a tie breaker would be the first thing attempted. There's a
1067 // "real" tie breaker below that is the operation of last resort.
1068 // The fact that the "special tie breaker" would trigger when there
1069 // wasn't otherwise a tie is what broke the strict weak ordering
1070 // constraint.
Evan Cheng99f2f792006-05-13 08:22:24 +00001071
Evan Cheng6730f032007-01-08 23:55:53 +00001072 unsigned LPriority = SPQ->getNodePriority(left);
1073 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng961bbd32007-01-08 23:50:38 +00001074 if (LPriority > RPriority)
Evan Chengd38c22b2006-05-11 23:55:42 +00001075 return true;
Evan Cheng28748552007-03-13 23:25:11 +00001076 else if (LPriority == RPriority) {
Dan Gohmane131e3a2007-04-26 19:40:56 +00001077 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
Evan Cheng28748552007-03-13 23:25:11 +00001078 // e.g.
1079 // t1 = op t2, c1
1080 // t3 = op t4, c2
1081 //
1082 // and the following instructions are both ready.
1083 // t2 = op c3
1084 // t4 = op c4
1085 //
1086 // Then schedule t2 = op first.
1087 // i.e.
1088 // t4 = op c4
1089 // t2 = op c3
1090 // t1 = op t2, c1
1091 // t3 = op t4, c2
1092 //
1093 // This creates more short live intervals.
1094 unsigned LDist = closestSucc(left);
1095 unsigned RDist = closestSucc(right);
1096 if (LDist < RDist)
Evan Chengd38c22b2006-05-11 23:55:42 +00001097 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001098 else if (LDist == RDist) {
1099 // Intuitively, it's good to push down instructions whose results are
1100 // liveout so their long live ranges won't conflict with other values
1101 // which are needed inside the BB. Further prioritize liveout instructions
1102 // by the number of operands which are calculated within the BB.
1103 unsigned LScratch = calcMaxScratches(left);
1104 unsigned RScratch = calcMaxScratches(right);
1105 if (LScratch > RScratch)
Evan Chengd38c22b2006-05-11 23:55:42 +00001106 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001107 else if (LScratch == RScratch)
1108 if (left->Height > right->Height)
Evan Cheng99f2f792006-05-13 08:22:24 +00001109 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001110 else if (left->Height == right->Height)
1111 if (left->Depth < right->Depth)
Evan Cheng28748552007-03-13 23:25:11 +00001112 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001113 else if (left->Depth == right->Depth)
1114 if (left->CycleBound > right->CycleBound)
1115 return true;
1116 }
Evan Cheng28748552007-03-13 23:25:11 +00001117 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001118 return false;
1119}
1120
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001121template<class SF>
1122bool BURegReductionPriorityQueue<SF>::canClobber(SUnit *SU, SUnit *Op) {
1123 if (SU->isTwoAddress) {
1124 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001125 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001126 unsigned NumOps = ScheduleDAG::CountOperands(SU->Node);
1127 for (unsigned i = 0; i != NumOps; ++i) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001128 if (TII->getOperandConstraint(Opc, i+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001129 SDNode *DU = SU->Node->getOperand(i).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001130 if (Op == (*SUnitMap)[DU][SU->InstanceNo])
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001131 return true;
1132 }
1133 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001134 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001135 return false;
1136}
1137
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001138
Evan Chengd38c22b2006-05-11 23:55:42 +00001139/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1140/// it as a def&use operand. Add a pseudo control edge from it to the other
1141/// node (if it won't create a cycle) so the two-address one will be scheduled
1142/// first (lower in the schedule).
1143template<class SF>
1144void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001145 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1146 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1147 if (!SU->isTwoAddress)
1148 continue;
1149
1150 SDNode *Node = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +00001151 if (!Node || !Node->isTargetOpcode())
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001152 continue;
1153
1154 unsigned Opc = Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001155 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001156 unsigned NumOps = ScheduleDAG::CountOperands(Node);
1157 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001158 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001159 SDNode *DU = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001160 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
Evan Chengf24d15f2006-11-06 21:33:46 +00001161 if (!DUSU) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001162 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
1163 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001164 if (I->isCtrl) continue;
1165 SUnit *SuccSU = I->Dep;
Evan Cheng5924bf72007-09-25 01:54:36 +00001166 // Don't constraint nodes with implicit defs. It can create cycles
1167 // plus it may increase register pressures.
1168 if (SuccSU == SU || SuccSU->hasImplicitDefs)
1169 continue;
1170 // Be conservative. Ignore if nodes aren't at the same depth.
1171 if (SuccSU->Depth != SU->Depth)
1172 continue;
1173 if ((!canClobber(SuccSU, DUSU) ||
1174 (!SU->isCommutable && SuccSU->isCommutable)) &&
1175 !isReachable(SuccSU, SU)) {
1176 DOUT << "Adding an edge from SU # " << SU->NodeNum
1177 << " to SU #" << SuccSU->NodeNum << "\n";
1178 SU->addPred(SuccSU, true, true);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001179 }
1180 }
1181 }
1182 }
1183 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001184}
1185
Evan Cheng6730f032007-01-08 23:55:53 +00001186/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001187/// Smaller number is the higher priority.
1188template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001189unsigned BURegReductionPriorityQueue<SF>::
1190CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001191 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001192 if (SethiUllmanNumber != 0)
1193 return SethiUllmanNumber;
1194
Evan Cheng961bbd32007-01-08 23:50:38 +00001195 unsigned Extra = 0;
1196 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1197 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001198 if (I->isCtrl) continue; // ignore chain preds
1199 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001200 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Cheng961bbd32007-01-08 23:50:38 +00001201 if (PredSethiUllman > SethiUllmanNumber) {
1202 SethiUllmanNumber = PredSethiUllman;
1203 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001204 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001205 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001206 }
Evan Cheng961bbd32007-01-08 23:50:38 +00001207
1208 SethiUllmanNumber += Extra;
1209
1210 if (SethiUllmanNumber == 0)
1211 SethiUllmanNumber = 1;
Evan Chengd38c22b2006-05-11 23:55:42 +00001212
1213 return SethiUllmanNumber;
1214}
1215
Evan Cheng6730f032007-01-08 23:55:53 +00001216/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1217/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001218template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001219void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001220 SethiUllmanNumbers.assign(SUnits->size(), 0);
1221
1222 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001223 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001224}
1225
1226static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
1227 unsigned Sum = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001228 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1229 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001230 SUnit *SuccSU = I->Dep;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001231 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1232 EE = SuccSU->Preds.end(); II != EE; ++II) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001233 SUnit *PredSU = II->Dep;
Evan Chengd38c22b2006-05-11 23:55:42 +00001234 if (!PredSU->isScheduled)
Evan Cheng5924bf72007-09-25 01:54:36 +00001235 ++Sum;
Evan Chengd38c22b2006-05-11 23:55:42 +00001236 }
1237 }
1238
1239 return Sum;
1240}
1241
1242
1243// Top down
1244bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00001245 unsigned LPriority = SPQ->getNodePriority(left);
1246 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng8e136a92007-09-26 21:36:17 +00001247 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1248 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
Evan Chengd38c22b2006-05-11 23:55:42 +00001249 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1250 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1251 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
1252 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
1253
1254 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1255 return false;
1256 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1257 return true;
1258
1259 // Special tie breaker: if two nodes share a operand, the one that use it
1260 // as a def&use operand is preferred.
1261 if (LIsTarget && RIsTarget) {
1262 if (left->isTwoAddress && !right->isTwoAddress) {
1263 SDNode *DUNode = left->Node->getOperand(0).Val;
1264 if (DUNode->isOperand(right->Node))
1265 RBonus += 2;
1266 }
1267 if (!left->isTwoAddress && right->isTwoAddress) {
1268 SDNode *DUNode = right->Node->getOperand(0).Val;
1269 if (DUNode->isOperand(left->Node))
1270 LBonus += 2;
1271 }
1272 }
1273 if (LIsFloater)
1274 LBonus -= 2;
1275 if (RIsFloater)
1276 RBonus -= 2;
1277 if (left->NumSuccs == 1)
1278 LBonus += 2;
1279 if (right->NumSuccs == 1)
1280 RBonus += 2;
1281
1282 if (LPriority+LBonus < RPriority+RBonus)
1283 return true;
1284 else if (LPriority == RPriority)
1285 if (left->Depth < right->Depth)
1286 return true;
1287 else if (left->Depth == right->Depth)
1288 if (left->NumSuccsLeft > right->NumSuccsLeft)
1289 return true;
1290 else if (left->NumSuccsLeft == right->NumSuccsLeft)
1291 if (left->CycleBound > right->CycleBound)
1292 return true;
1293 return false;
1294}
1295
Evan Cheng6730f032007-01-08 23:55:53 +00001296/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001297/// Smaller number is the higher priority.
1298template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001299unsigned TDRegReductionPriorityQueue<SF>::
1300CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001301 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001302 if (SethiUllmanNumber != 0)
1303 return SethiUllmanNumber;
1304
Evan Cheng8e136a92007-09-26 21:36:17 +00001305 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001306 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
Evan Cheng961bbd32007-01-08 23:50:38 +00001307 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001308 else if (SU->NumSuccsLeft == 0)
1309 // If SU does not have a use, i.e. it doesn't produce a value that would
1310 // be consumed (e.g. store), then it terminates a chain of computation.
Chris Lattner296a83c2007-02-01 04:55:59 +00001311 // Give it a small SethiUllman number so it will be scheduled right before
1312 // its predecessors that it doesn't lengthen their live ranges.
Evan Cheng961bbd32007-01-08 23:50:38 +00001313 SethiUllmanNumber = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001314 else if (SU->NumPredsLeft == 0 &&
1315 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
Evan Cheng961bbd32007-01-08 23:50:38 +00001316 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001317 else {
1318 int Extra = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001319 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1320 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001321 if (I->isCtrl) continue; // ignore chain preds
1322 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001323 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001324 if (PredSethiUllman > SethiUllmanNumber) {
1325 SethiUllmanNumber = PredSethiUllman;
1326 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001327 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001328 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001329 }
1330
1331 SethiUllmanNumber += Extra;
1332 }
1333
1334 return SethiUllmanNumber;
1335}
1336
Evan Cheng6730f032007-01-08 23:55:53 +00001337/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1338/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001339template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001340void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001341 SethiUllmanNumbers.assign(SUnits->size(), 0);
1342
1343 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001344 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001345}
1346
1347//===----------------------------------------------------------------------===//
1348// Public Constructor Functions
1349//===----------------------------------------------------------------------===//
1350
Jim Laskey03593f72006-08-01 18:29:48 +00001351llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1352 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001353 MachineBasicBlock *BB) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001354 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
Jim Laskey95eda5b2006-08-01 14:21:23 +00001355 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true,
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001356 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII));
Evan Chengd38c22b2006-05-11 23:55:42 +00001357}
1358
Jim Laskey03593f72006-08-01 18:29:48 +00001359llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1360 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001361 MachineBasicBlock *BB) {
Jim Laskey95eda5b2006-08-01 14:21:23 +00001362 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
Chris Lattner296a83c2007-02-01 04:55:59 +00001363 new TDRegReductionPriorityQueue<td_ls_rr_sort>());
Evan Chengd38c22b2006-05-11 23:55:42 +00001364}
1365