blob: 0b218abd4ab3d08fb93133799b032db86d7afe7a [file] [log] [blame]
Evan Chengd38c22b2006-05-11 23:55:42 +00001//===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Evan Cheng and is distributed under the
6// University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
Dale Johannesen2182f062007-07-13 17:13:54 +000018#define DEBUG_TYPE "pre-RA-sched"
Evan Chengd38c22b2006-05-11 23:55:42 +000019#include "llvm/CodeGen/ScheduleDAG.h"
Jim Laskey29e635d2006-08-02 12:30:23 +000020#include "llvm/CodeGen/SchedulerRegistry.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000021#include "llvm/CodeGen/SSARegMap.h"
22#include "llvm/Target/MRegisterInfo.h"
Owen Anderson8c2c1e92006-05-12 06:33:49 +000023#include "llvm/Target/TargetData.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000024#include "llvm/Target/TargetMachine.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/Debug.h"
Chris Lattner3d27be12006-08-27 12:54:02 +000027#include "llvm/Support/Compiler.h"
Evan Chenge6f92252007-09-27 18:46:06 +000028#include "llvm/ADT/SmallPtrSet.h"
Evan Cheng5924bf72007-09-25 01:54:36 +000029#include "llvm/ADT/SmallSet.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000030#include "llvm/ADT/Statistic.h"
31#include <climits>
Evan Chengd38c22b2006-05-11 23:55:42 +000032#include <queue>
33#include "llvm/Support/CommandLine.h"
34using namespace llvm;
35
Evan Cheng1ec79b42007-09-27 07:09:03 +000036STATISTIC(NumBacktracks, "Number of times scheduler backtraced");
37STATISTIC(NumDups, "Number of duplicated nodes");
38STATISTIC(NumCCCopies, "Number of cross class copies");
39
Jim Laskey95eda5b2006-08-01 14:21:23 +000040static RegisterScheduler
41 burrListDAGScheduler("list-burr",
42 " Bottom-up register reduction list scheduling",
43 createBURRListDAGScheduler);
44static RegisterScheduler
45 tdrListrDAGScheduler("list-tdrr",
46 " Top-down register reduction list scheduling",
47 createTDRRListDAGScheduler);
48
Evan Chengd38c22b2006-05-11 23:55:42 +000049namespace {
Evan Chengd38c22b2006-05-11 23:55:42 +000050//===----------------------------------------------------------------------===//
51/// ScheduleDAGRRList - The actual register reduction list scheduler
52/// implementation. This supports both top-down and bottom-up scheduling.
53///
Chris Lattnere097e6f2006-06-28 22:17:39 +000054class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
Evan Chengd38c22b2006-05-11 23:55:42 +000055private:
56 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
57 /// it is top-down.
58 bool isBottomUp;
59
60 /// AvailableQueue - The priority queue to use for the available SUnits.
Evan Cheng5924bf72007-09-25 01:54:36 +000061 ///a
Evan Chengd38c22b2006-05-11 23:55:42 +000062 SchedulingPriorityQueue *AvailableQueue;
63
Evan Cheng5924bf72007-09-25 01:54:36 +000064 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
65 /// that are "live". These nodes must be scheduled before any other nodes that
66 /// modifies the registers can be scheduled.
67 SmallSet<unsigned, 4> LiveRegs;
68 std::vector<SUnit*> LiveRegDefs;
69 std::vector<unsigned> LiveRegCycles;
70
Evan Chengd38c22b2006-05-11 23:55:42 +000071public:
72 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
73 const TargetMachine &tm, bool isbottomup,
74 SchedulingPriorityQueue *availqueue)
75 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
76 AvailableQueue(availqueue) {
77 }
78
79 ~ScheduleDAGRRList() {
80 delete AvailableQueue;
81 }
82
83 void Schedule();
84
85private:
Evan Cheng8e136a92007-09-26 21:36:17 +000086 void ReleasePred(SUnit*, bool, unsigned);
87 void ReleaseSucc(SUnit*, bool isChain, unsigned);
88 void CapturePred(SUnit*, SUnit*, bool);
89 void ScheduleNodeBottomUp(SUnit*, unsigned);
90 void ScheduleNodeTopDown(SUnit*, unsigned);
91 void UnscheduleNodeBottomUp(SUnit*);
92 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
93 SUnit *CopyAndMoveSuccessors(SUnit*);
Evan Cheng1ec79b42007-09-27 07:09:03 +000094 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
Evan Cheng8e136a92007-09-26 21:36:17 +000095 const TargetRegisterClass*,
Evan Cheng1ec79b42007-09-27 07:09:03 +000096 const TargetRegisterClass*,
97 SmallVector<SUnit*, 2>&);
98 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
Evan Chengd38c22b2006-05-11 23:55:42 +000099 void ListScheduleTopDown();
100 void ListScheduleBottomUp();
Evan Chengafed73e2006-05-12 01:58:24 +0000101 void CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +0000102};
103} // end anonymous namespace
104
105
106/// Schedule - Schedule the DAG using list scheduling.
107void ScheduleDAGRRList::Schedule() {
Bill Wendling22e978a2006-12-07 20:04:42 +0000108 DOUT << "********** List Scheduling **********\n";
Evan Cheng5924bf72007-09-25 01:54:36 +0000109
110 LiveRegDefs.resize(MRI->getNumRegs(), NULL);
111 LiveRegCycles.resize(MRI->getNumRegs(), 0);
112
Evan Chengd38c22b2006-05-11 23:55:42 +0000113 // Build scheduling units.
114 BuildSchedUnits();
115
Evan Chengd38c22b2006-05-11 23:55:42 +0000116 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
Chris Lattnerd86418a2006-08-17 00:09:56 +0000117 SUnits[su].dumpAll(&DAG));
Evan Cheng47fbeda2006-10-14 08:34:06 +0000118 CalculateDepths();
119 CalculateHeights();
Evan Chengd38c22b2006-05-11 23:55:42 +0000120
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000121 AvailableQueue->initNodes(SUnitMap, SUnits);
Dan Gohman54a187e2007-08-20 19:28:38 +0000122
Evan Chengd38c22b2006-05-11 23:55:42 +0000123 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
124 if (isBottomUp)
125 ListScheduleBottomUp();
126 else
127 ListScheduleTopDown();
128
129 AvailableQueue->releaseState();
Dan Gohman54a187e2007-08-20 19:28:38 +0000130
Evan Cheng009f5f52006-05-25 08:37:31 +0000131 CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +0000132
Bill Wendling22e978a2006-12-07 20:04:42 +0000133 DOUT << "*** Final schedule ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000134 DEBUG(dumpSchedule());
Bill Wendling22e978a2006-12-07 20:04:42 +0000135 DOUT << "\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000136
137 // Emit in scheduled order
138 EmitSchedule();
139}
140
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000141/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
Evan Chengafed73e2006-05-12 01:58:24 +0000142/// it is not the last use of its first operand, add it to the CommuteSet if
143/// possible. It will be commuted when it is translated to a MI.
144void ScheduleDAGRRList::CommuteNodesToReducePressure() {
Evan Chenge3c44192007-06-22 01:35:51 +0000145 SmallPtrSet<SUnit*, 4> OperandSeen;
Evan Chengafed73e2006-05-12 01:58:24 +0000146 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
147 SUnit *SU = Sequence[i];
Evan Cheng8e136a92007-09-26 21:36:17 +0000148 if (!SU || !SU->Node) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000149 if (SU->isCommutable) {
150 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +0000151 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000152 unsigned NumOps = CountOperands(SU->Node);
153 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +0000154 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) == -1)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000155 continue;
156
157 SDNode *OpN = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000158 SUnit *OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000159 if (OpSU && OperandSeen.count(OpSU) == 1) {
160 // Ok, so SU is not the last use of OpSU, but SU is two-address so
161 // it will clobber OpSU. Try to commute SU if no other source operands
162 // are live below.
163 bool DoCommute = true;
164 for (unsigned k = 0; k < NumOps; ++k) {
165 if (k != j) {
166 OpN = SU->Node->getOperand(k).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000167 OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000168 if (OpSU && OperandSeen.count(OpSU) == 1) {
169 DoCommute = false;
170 break;
171 }
172 }
Evan Chengafed73e2006-05-12 01:58:24 +0000173 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000174 if (DoCommute)
175 CommuteSet.insert(SU->Node);
Evan Chengafed73e2006-05-12 01:58:24 +0000176 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000177
178 // Only look at the first use&def node for now.
179 break;
Evan Chengafed73e2006-05-12 01:58:24 +0000180 }
181 }
182
Chris Lattnerd86418a2006-08-17 00:09:56 +0000183 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
184 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000185 if (!I->isCtrl)
186 OperandSeen.insert(I->Dep);
Evan Chengafed73e2006-05-12 01:58:24 +0000187 }
188 }
189}
Evan Chengd38c22b2006-05-11 23:55:42 +0000190
191//===----------------------------------------------------------------------===//
192// Bottom-Up Scheduling
193//===----------------------------------------------------------------------===//
194
Evan Chengd38c22b2006-05-11 23:55:42 +0000195/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000196/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000197void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
198 unsigned CurCycle) {
199 // FIXME: the distance between two nodes is not always == the predecessor's
200 // latency. For example, the reader can very well read the register written
201 // by the predecessor later than the issue cycle. It also depends on the
202 // interrupt model (drain vs. freeze).
203 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
204
205 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000206 --PredSU->NumSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000207 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000208 --PredSU->NumChainSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000209
210#ifndef NDEBUG
211 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000212 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000213 PredSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000214 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000215 assert(0);
216 }
217#endif
218
219 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
220 // EntryToken has to go last! Special case it here.
Evan Cheng8e136a92007-09-26 21:36:17 +0000221 if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) {
Evan Chengd38c22b2006-05-11 23:55:42 +0000222 PredSU->isAvailable = true;
223 AvailableQueue->push(PredSU);
224 }
225 }
226}
227
228/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
229/// count of its predecessors. If a predecessor pending count is zero, add it to
230/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000231void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000232 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000233 DEBUG(SU->dump(&DAG));
234 SU->Cycle = CurCycle;
235
236 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000237
238 // Bottom up: release predecessors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000239 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Cheng5924bf72007-09-25 01:54:36 +0000240 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000241 ReleasePred(I->Dep, I->isCtrl, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000242 if (I->Cost < 0) {
243 // This is a physical register dependency and it's impossible or
244 // expensive to copy the register. Make sure nothing that can
245 // clobber the register is scheduled between the predecessor and
246 // this node.
247 if (LiveRegs.insert(I->Reg)) {
248 LiveRegDefs[I->Reg] = I->Dep;
249 LiveRegCycles[I->Reg] = CurCycle;
250 }
251 }
252 }
253
254 // Release all the implicit physical register defs that are live.
255 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
256 I != E; ++I) {
257 if (I->Cost < 0) {
258 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
259 LiveRegs.erase(I->Reg);
260 assert(LiveRegDefs[I->Reg] == SU &&
261 "Physical register dependency violated?");
262 LiveRegDefs[I->Reg] = NULL;
263 LiveRegCycles[I->Reg] = 0;
264 }
265 }
266 }
267
Evan Chengd38c22b2006-05-11 23:55:42 +0000268 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000269}
270
Evan Cheng5924bf72007-09-25 01:54:36 +0000271/// CapturePred - This does the opposite of ReleasePred. Since SU is being
272/// unscheduled, incrcease the succ left count of its predecessors. Remove
273/// them from AvailableQueue if necessary.
274void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
275 PredSU->CycleBound = 0;
276 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
277 I != E; ++I) {
278 if (I->Dep == SU)
279 continue;
280 PredSU->CycleBound = std::max(PredSU->CycleBound,
281 I->Dep->Cycle + PredSU->Latency);
282 }
283
284 if (PredSU->isAvailable) {
285 PredSU->isAvailable = false;
286 if (!PredSU->isPending)
287 AvailableQueue->remove(PredSU);
288 }
289
290 if (!isChain)
291 ++PredSU->NumSuccsLeft;
292 else
293 ++PredSU->NumChainSuccsLeft;
294}
295
296/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
297/// its predecessor states to reflect the change.
298void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
299 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
300 DEBUG(SU->dump(&DAG));
301
302 AvailableQueue->UnscheduledNode(SU);
303
304 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
305 I != E; ++I) {
306 CapturePred(I->Dep, SU, I->isCtrl);
307 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
308 LiveRegs.erase(I->Reg);
309 assert(LiveRegDefs[I->Reg] == I->Dep &&
310 "Physical register dependency violated?");
311 LiveRegDefs[I->Reg] = NULL;
312 LiveRegCycles[I->Reg] = 0;
313 }
314 }
315
316 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
317 I != E; ++I) {
318 if (I->Cost < 0) {
319 if (LiveRegs.insert(I->Reg)) {
320 assert(!LiveRegDefs[I->Reg] &&
321 "Physical register dependency violated?");
322 LiveRegDefs[I->Reg] = SU;
323 }
324 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
325 LiveRegCycles[I->Reg] = I->Dep->Cycle;
326 }
327 }
328
329 SU->Cycle = 0;
330 SU->isScheduled = false;
331 SU->isAvailable = true;
332 AvailableQueue->push(SU);
333}
334
Evan Chengcfd5f822007-09-27 00:25:29 +0000335// FIXME: This is probably too slow!
336static void isReachable(SUnit *SU, SUnit *TargetSU,
337 SmallPtrSet<SUnit*, 32> &Visited, bool &Reached) {
338 if (Reached) return;
339 if (SU == TargetSU) {
340 Reached = true;
341 return;
342 }
343 if (!Visited.insert(SU)) return;
344
345 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E;
346 ++I)
347 isReachable(I->Dep, TargetSU, Visited, Reached);
348}
349
350static bool isReachable(SUnit *SU, SUnit *TargetSU) {
351 SmallPtrSet<SUnit*, 32> Visited;
352 bool Reached = false;
353 isReachable(SU, TargetSU, Visited, Reached);
354 return Reached;
355}
356
357/// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
358/// create a cycle.
Evan Cheng1ec79b42007-09-27 07:09:03 +0000359static bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
Evan Chengcfd5f822007-09-27 00:25:29 +0000360 if (isReachable(TargetSU, SU))
361 return true;
362 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
363 I != E; ++I)
364 if (I->Cost < 0 && isReachable(TargetSU, I->Dep))
365 return true;
366 return false;
367}
368
Evan Cheng8e136a92007-09-26 21:36:17 +0000369/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
Evan Cheng5924bf72007-09-25 01:54:36 +0000370/// BTCycle in order to schedule a specific node. Returns the last unscheduled
371/// SUnit. Also returns if a successor is unscheduled in the process.
Evan Cheng8e136a92007-09-26 21:36:17 +0000372void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
373 unsigned &CurCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000374 SUnit *OldSU = NULL;
Evan Cheng8e136a92007-09-26 21:36:17 +0000375 while (CurCycle > BtCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000376 OldSU = Sequence.back();
377 Sequence.pop_back();
378 if (SU->isSucc(OldSU))
Evan Cheng8e136a92007-09-26 21:36:17 +0000379 // Don't try to remove SU from AvailableQueue.
380 SU->isAvailable = false;
Evan Cheng5924bf72007-09-25 01:54:36 +0000381 UnscheduleNodeBottomUp(OldSU);
382 --CurCycle;
383 }
384
385
386 if (SU->isSucc(OldSU)) {
387 assert(false && "Something is wrong!");
388 abort();
389 }
Evan Cheng1ec79b42007-09-27 07:09:03 +0000390
391 ++NumBacktracks;
Evan Cheng5924bf72007-09-25 01:54:36 +0000392}
393
394/// isSafeToCopy - True if the SUnit for the given SDNode can safely cloned,
395/// i.e. the node does not produce a flag, it does not read a flag and it does
396/// not have an incoming chain.
397static bool isSafeToCopy(SDNode *N) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000398 if (!N)
399 return true;
400
Evan Cheng5924bf72007-09-25 01:54:36 +0000401 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
402 if (N->getValueType(i) == MVT::Flag)
403 return false;
404 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
405 const SDOperand &Op = N->getOperand(i);
406 MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
407 if (VT == MVT::Other || VT == MVT::Flag)
408 return false;
409 }
410
411 return true;
412}
413
414/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
415/// successors to the newly created node.
416SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000417 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
418
Evan Cheng5924bf72007-09-25 01:54:36 +0000419 SUnit *NewSU = Clone(SU);
420
421 // New SUnit has the exact same predecessors.
422 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
423 I != E; ++I)
424 if (!I->isSpecial) {
425 NewSU->addPred(I->Dep, I->isCtrl, false, I->Reg, I->Cost);
426 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
427 }
428
429 // Only copy scheduled successors. Cut them from old node's successor
430 // list and move them over.
Evan Chengbde499b2007-09-27 07:29:27 +0000431 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
Evan Cheng5924bf72007-09-25 01:54:36 +0000432 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
433 I != E; ++I) {
434 if (I->isSpecial)
435 continue;
Evan Cheng5924bf72007-09-25 01:54:36 +0000436 if (I->Dep->isScheduled) {
Evan Chengbde499b2007-09-27 07:29:27 +0000437 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
Evan Cheng5924bf72007-09-25 01:54:36 +0000438 I->Dep->addPred(NewSU, I->isCtrl, false, I->Reg, I->Cost);
Evan Chengbde499b2007-09-27 07:29:27 +0000439 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
Evan Cheng5924bf72007-09-25 01:54:36 +0000440 }
441 }
442 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
Evan Chengbde499b2007-09-27 07:29:27 +0000443 SUnit *Succ = DelDeps[i].first;
444 bool isCtrl = DelDeps[i].second;
Evan Cheng5924bf72007-09-25 01:54:36 +0000445 Succ->removePred(SU, isCtrl, false);
446 }
447
448 AvailableQueue->updateNode(SU);
449 AvailableQueue->addNode(NewSU);
450
Evan Cheng1ec79b42007-09-27 07:09:03 +0000451 ++NumDups;
Evan Cheng5924bf72007-09-25 01:54:36 +0000452 return NewSU;
453}
454
Evan Cheng1ec79b42007-09-27 07:09:03 +0000455/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
456/// and move all scheduled successors of the given SUnit to the last copy.
457void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
458 const TargetRegisterClass *DestRC,
459 const TargetRegisterClass *SrcRC,
460 SmallVector<SUnit*, 2> &Copies) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000461 SUnit *CopyFromSU = NewSUnit(NULL);
462 CopyFromSU->CopySrcRC = SrcRC;
463 CopyFromSU->CopyDstRC = DestRC;
464 CopyFromSU->Depth = SU->Depth;
465 CopyFromSU->Height = SU->Height;
466
467 SUnit *CopyToSU = NewSUnit(NULL);
468 CopyToSU->CopySrcRC = DestRC;
469 CopyToSU->CopyDstRC = SrcRC;
470
471 // Only copy scheduled successors. Cut them from old node's successor
472 // list and move them over.
Evan Chengbde499b2007-09-27 07:29:27 +0000473 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
Evan Cheng8e136a92007-09-26 21:36:17 +0000474 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
475 I != E; ++I) {
476 if (I->isSpecial)
477 continue;
Evan Cheng8e136a92007-09-26 21:36:17 +0000478 if (I->Dep->isScheduled) {
Evan Chengbde499b2007-09-27 07:29:27 +0000479 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
Evan Cheng8e136a92007-09-26 21:36:17 +0000480 I->Dep->addPred(CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
Evan Chengbde499b2007-09-27 07:29:27 +0000481 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
Evan Cheng8e136a92007-09-26 21:36:17 +0000482 }
483 }
484 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
Evan Chengbde499b2007-09-27 07:29:27 +0000485 SUnit *Succ = DelDeps[i].first;
486 bool isCtrl = DelDeps[i].second;
Evan Cheng8e136a92007-09-26 21:36:17 +0000487 Succ->removePred(SU, isCtrl, false);
488 }
489
490 CopyFromSU->addPred(SU, false, false, Reg, -1);
491 CopyToSU->addPred(CopyFromSU, false, false, Reg, 1);
492
493 AvailableQueue->updateNode(SU);
494 AvailableQueue->addNode(CopyFromSU);
495 AvailableQueue->addNode(CopyToSU);
Evan Cheng1ec79b42007-09-27 07:09:03 +0000496 Copies.push_back(CopyFromSU);
497 Copies.push_back(CopyToSU);
Evan Cheng8e136a92007-09-26 21:36:17 +0000498
Evan Cheng1ec79b42007-09-27 07:09:03 +0000499 ++NumCCCopies;
Evan Cheng8e136a92007-09-26 21:36:17 +0000500}
501
502/// getPhysicalRegisterVT - Returns the ValueType of the physical register
503/// definition of the specified node.
504/// FIXME: Move to SelectionDAG?
505static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg,
506 const TargetInstrInfo *TII) {
507 const TargetInstrDescriptor &TID = TII->get(N->getTargetOpcode());
508 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
509 unsigned NumRes = TID.numDefs;
510 for (const unsigned *ImpDef = TID.ImplicitDefs; *ImpDef; ++ImpDef) {
511 if (Reg == *ImpDef)
512 break;
513 ++NumRes;
514 }
515 return N->getValueType(NumRes);
516}
517
Evan Cheng5924bf72007-09-25 01:54:36 +0000518/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
519/// scheduling of the given node to satisfy live physical register dependencies.
520/// If the specific node is the last one that's available to schedule, do
521/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
Evan Cheng1ec79b42007-09-27 07:09:03 +0000522bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
523 SmallVector<unsigned, 4> &LRegs){
Evan Cheng5924bf72007-09-25 01:54:36 +0000524 if (LiveRegs.empty())
525 return false;
526
Evan Chenge6f92252007-09-27 18:46:06 +0000527 SmallSet<unsigned, 4> RegAdded;
Evan Cheng5924bf72007-09-25 01:54:36 +0000528 // If this node would clobber any "live" register, then it's not ready.
Evan Cheng5924bf72007-09-25 01:54:36 +0000529 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
530 I != E; ++I) {
531 if (I->Cost < 0) {
532 unsigned Reg = I->Reg;
Evan Chenge6f92252007-09-27 18:46:06 +0000533 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
534 if (RegAdded.insert(Reg))
535 LRegs.push_back(Reg);
536 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000537 for (const unsigned *Alias = MRI->getAliasSet(Reg);
538 *Alias; ++Alias)
Evan Chenge6f92252007-09-27 18:46:06 +0000539 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
540 if (RegAdded.insert(*Alias))
541 LRegs.push_back(*Alias);
542 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000543 }
544 }
545
546 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
547 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
Evan Cheng8e136a92007-09-26 21:36:17 +0000548 if (!Node || !Node->isTargetOpcode())
Evan Cheng5924bf72007-09-25 01:54:36 +0000549 continue;
550 const TargetInstrDescriptor &TID = TII->get(Node->getTargetOpcode());
551 if (!TID.ImplicitDefs)
552 continue;
553 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
Evan Chenge6f92252007-09-27 18:46:06 +0000554 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
555 if (RegAdded.insert(*Reg))
556 LRegs.push_back(*Reg);
557 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000558 for (const unsigned *Alias = MRI->getAliasSet(*Reg);
559 *Alias; ++Alias)
Evan Chenge6f92252007-09-27 18:46:06 +0000560 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
561 if (RegAdded.insert(*Alias))
562 LRegs.push_back(*Alias);
563 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000564 }
565 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000566 return !LRegs.empty();
Evan Chengd38c22b2006-05-11 23:55:42 +0000567}
568
Evan Cheng1ec79b42007-09-27 07:09:03 +0000569
Evan Chengd38c22b2006-05-11 23:55:42 +0000570/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
571/// schedulers.
572void ScheduleDAGRRList::ListScheduleBottomUp() {
573 unsigned CurCycle = 0;
574 // Add root to Available queue.
Evan Cheng5924bf72007-09-25 01:54:36 +0000575 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
576 RootSU->isAvailable = true;
577 AvailableQueue->push(RootSU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000578
579 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000580 // priority. If it is not ready put it back. Schedule the node.
Evan Cheng5924bf72007-09-25 01:54:36 +0000581 SmallVector<SUnit*, 4> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000582 while (!AvailableQueue->empty()) {
Evan Cheng1ec79b42007-09-27 07:09:03 +0000583 bool Delayed = false;
584 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
Evan Cheng5924bf72007-09-25 01:54:36 +0000585 SUnit *CurSU = AvailableQueue->pop();
586 while (CurSU) {
Evan Cheng1ec79b42007-09-27 07:09:03 +0000587 if (CurSU->CycleBound <= CurCycle) {
588 SmallVector<unsigned, 4> LRegs;
589 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
Evan Cheng5924bf72007-09-25 01:54:36 +0000590 break;
Evan Cheng1ec79b42007-09-27 07:09:03 +0000591 Delayed = true;
592 LRegsMap.insert(std::make_pair(CurSU, LRegs));
Evan Cheng5924bf72007-09-25 01:54:36 +0000593 }
Evan Cheng1ec79b42007-09-27 07:09:03 +0000594
595 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
596 NotReady.push_back(CurSU);
Evan Cheng5924bf72007-09-25 01:54:36 +0000597 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000598 }
Evan Cheng1ec79b42007-09-27 07:09:03 +0000599
600 // All candidates are delayed due to live physical reg dependencies.
601 // Try backtracking, code duplication, or inserting cross class copies
602 // to resolve it.
603 if (Delayed && !CurSU) {
604 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
605 SUnit *TrySU = NotReady[i];
606 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
607
608 // Try unscheduling up to the point where it's safe to schedule
609 // this node.
610 unsigned LiveCycle = CurCycle;
611 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
612 unsigned Reg = LRegs[j];
613 unsigned LCycle = LiveRegCycles[Reg];
614 LiveCycle = std::min(LiveCycle, LCycle);
615 }
616 SUnit *OldSU = Sequence[LiveCycle];
617 if (!WillCreateCycle(TrySU, OldSU)) {
618 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
619 // Force the current node to be scheduled before the node that
620 // requires the physical reg dep.
621 if (OldSU->isAvailable) {
622 OldSU->isAvailable = false;
623 AvailableQueue->remove(OldSU);
624 }
625 TrySU->addPred(OldSU, true, true);
626 // If one or more successors has been unscheduled, then the current
627 // node is no longer avaialable. Schedule a successor that's now
628 // available instead.
629 if (!TrySU->isAvailable)
630 CurSU = AvailableQueue->pop();
631 else {
632 CurSU = TrySU;
633 TrySU->isPending = false;
634 NotReady.erase(NotReady.begin()+i);
635 }
636 break;
637 }
638 }
639
640 if (!CurSU) {
641 // Can't backtrace. Try duplicating the nodes that produces these
642 // "expensive to copy" values to break the dependency. In case even
643 // that doesn't work, insert cross class copies.
644 SUnit *TrySU = NotReady[0];
645 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
646 assert(LRegs.size() == 1 && "Can't handle this yet!");
647 unsigned Reg = LRegs[0];
648 SUnit *LRDef = LiveRegDefs[Reg];
649 SUnit *NewDef;
650 if (isSafeToCopy(LRDef->Node))
651 NewDef = CopyAndMoveSuccessors(LRDef);
652 else {
653 // Issue expensive cross register class copies.
654 MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
655 const TargetRegisterClass *RC =
656 MRI->getPhysicalRegisterRegClass(VT, Reg);
657 const TargetRegisterClass *DestRC = MRI->getCrossCopyRegClass(RC);
658 if (!DestRC) {
659 assert(false && "Don't know how to copy this physical register!");
660 abort();
661 }
662 SmallVector<SUnit*, 2> Copies;
663 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
664 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
665 << " to SU #" << Copies.front()->NodeNum << "\n";
666 TrySU->addPred(Copies.front(), true, true);
667 NewDef = Copies.back();
668 }
669
670 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
671 << " to SU #" << TrySU->NodeNum << "\n";
672 LiveRegDefs[Reg] = NewDef;
673 NewDef->addPred(TrySU, true, true);
674 TrySU->isAvailable = false;
675 CurSU = NewDef;
676 }
677
678 if (!CurSU) {
679 assert(false && "Unable to resolve live physical register dependencies!");
680 abort();
681 }
682 }
683
Evan Chengd38c22b2006-05-11 23:55:42 +0000684 // Add the nodes that aren't ready back onto the available list.
Evan Cheng5924bf72007-09-25 01:54:36 +0000685 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
686 NotReady[i]->isPending = false;
Evan Cheng1ec79b42007-09-27 07:09:03 +0000687 // May no longer be available due to backtracking.
Evan Cheng5924bf72007-09-25 01:54:36 +0000688 if (NotReady[i]->isAvailable)
689 AvailableQueue->push(NotReady[i]);
690 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000691 NotReady.clear();
692
Evan Cheng5924bf72007-09-25 01:54:36 +0000693 if (!CurSU)
694 Sequence.push_back(0);
695 else {
696 ScheduleNodeBottomUp(CurSU, CurCycle);
697 Sequence.push_back(CurSU);
698 }
699 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000700 }
701
702 // Add entry node last
703 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000704 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000705 Sequence.push_back(Entry);
706 }
707
708 // Reverse the order if it is bottom up.
709 std::reverse(Sequence.begin(), Sequence.end());
710
711
712#ifndef NDEBUG
713 // Verify that all SUnits were scheduled.
714 bool AnyNotSched = false;
715 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
716 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
717 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000718 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000719 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000720 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000721 AnyNotSched = true;
722 }
723 }
724 assert(!AnyNotSched);
725#endif
726}
727
728//===----------------------------------------------------------------------===//
729// Top-Down Scheduling
730//===----------------------------------------------------------------------===//
731
732/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000733/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000734void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
735 unsigned CurCycle) {
736 // FIXME: the distance between two nodes is not always == the predecessor's
737 // latency. For example, the reader can very well read the register written
738 // by the predecessor later than the issue cycle. It also depends on the
739 // interrupt model (drain vs. freeze).
740 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
741
742 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000743 --SuccSU->NumPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000744 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000745 --SuccSU->NumChainPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000746
747#ifndef NDEBUG
748 if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000749 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000750 SuccSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000751 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000752 assert(0);
753 }
754#endif
755
756 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
757 SuccSU->isAvailable = true;
758 AvailableQueue->push(SuccSU);
759 }
760}
761
762
763/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
764/// count of its successors. If a successor pending count is zero, add it to
765/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000766void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000767 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000768 DEBUG(SU->dump(&DAG));
769 SU->Cycle = CurCycle;
770
771 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000772
773 // Top down: release successors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000774 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
775 I != E; ++I)
Evan Cheng0effc3a2007-09-19 01:38:40 +0000776 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +0000777 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000778}
779
Dan Gohman54a187e2007-08-20 19:28:38 +0000780/// ListScheduleTopDown - The main loop of list scheduling for top-down
781/// schedulers.
Evan Chengd38c22b2006-05-11 23:55:42 +0000782void ScheduleDAGRRList::ListScheduleTopDown() {
783 unsigned CurCycle = 0;
Evan Cheng5924bf72007-09-25 01:54:36 +0000784 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000785
786 // All leaves to Available queue.
787 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
788 // It is available if it has no predecessors.
789 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
790 AvailableQueue->push(&SUnits[i]);
791 SUnits[i].isAvailable = true;
792 }
793 }
794
795 // Emit the entry node first.
796 ScheduleNodeTopDown(Entry, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000797 Sequence.push_back(Entry);
798 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000799
800 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000801 // priority. If it is not ready put it back. Schedule the node.
Evan Chengd38c22b2006-05-11 23:55:42 +0000802 std::vector<SUnit*> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000803 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000804 SUnit *CurSU = AvailableQueue->pop();
805 while (CurSU && CurSU->CycleBound > CurCycle) {
806 NotReady.push_back(CurSU);
807 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000808 }
809
810 // Add the nodes that aren't ready back onto the available list.
811 AvailableQueue->push_all(NotReady);
812 NotReady.clear();
813
Evan Cheng5924bf72007-09-25 01:54:36 +0000814 if (!CurSU)
815 Sequence.push_back(0);
816 else {
817 ScheduleNodeTopDown(CurSU, CurCycle);
818 Sequence.push_back(CurSU);
819 }
Evan Chengd12c97d2006-05-30 18:05:39 +0000820 CurCycle++;
Evan Chengd38c22b2006-05-11 23:55:42 +0000821 }
822
823
824#ifndef NDEBUG
825 // Verify that all SUnits were scheduled.
826 bool AnyNotSched = false;
827 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
828 if (!SUnits[i].isScheduled) {
829 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000830 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000831 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000832 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000833 AnyNotSched = true;
834 }
835 }
836 assert(!AnyNotSched);
837#endif
838}
839
840
841
842//===----------------------------------------------------------------------===//
843// RegReductionPriorityQueue Implementation
844//===----------------------------------------------------------------------===//
845//
846// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
847// to reduce register pressure.
848//
849namespace {
850 template<class SF>
851 class RegReductionPriorityQueue;
852
853 /// Sorting functions for the Available queue.
854 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
855 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
856 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
857 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
858
859 bool operator()(const SUnit* left, const SUnit* right) const;
860 };
861
862 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
863 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
864 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
865 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
866
867 bool operator()(const SUnit* left, const SUnit* right) const;
868 };
869} // end anonymous namespace
870
Evan Cheng961bbd32007-01-08 23:50:38 +0000871static inline bool isCopyFromLiveIn(const SUnit *SU) {
872 SDNode *N = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +0000873 return N && N->getOpcode() == ISD::CopyFromReg &&
Evan Cheng961bbd32007-01-08 23:50:38 +0000874 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
875}
876
Evan Chengd38c22b2006-05-11 23:55:42 +0000877namespace {
878 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000879 class VISIBILITY_HIDDEN RegReductionPriorityQueue
880 : public SchedulingPriorityQueue {
Evan Chengd38c22b2006-05-11 23:55:42 +0000881 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
882
883 public:
884 RegReductionPriorityQueue() :
885 Queue(SF(this)) {}
886
Evan Cheng5924bf72007-09-25 01:54:36 +0000887 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000888 std::vector<SUnit> &sunits) {}
Evan Cheng5924bf72007-09-25 01:54:36 +0000889
890 virtual void addNode(const SUnit *SU) {}
891
892 virtual void updateNode(const SUnit *SU) {}
893
Evan Chengd38c22b2006-05-11 23:55:42 +0000894 virtual void releaseState() {}
895
Evan Cheng6730f032007-01-08 23:55:53 +0000896 virtual unsigned getNodePriority(const SUnit *SU) const {
Evan Chengd38c22b2006-05-11 23:55:42 +0000897 return 0;
898 }
899
Evan Cheng5924bf72007-09-25 01:54:36 +0000900 unsigned size() const { return Queue.size(); }
901
Evan Chengd38c22b2006-05-11 23:55:42 +0000902 bool empty() const { return Queue.empty(); }
903
904 void push(SUnit *U) {
905 Queue.push(U);
906 }
907 void push_all(const std::vector<SUnit *> &Nodes) {
908 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
909 Queue.push(Nodes[i]);
910 }
911
912 SUnit *pop() {
Evan Chengd12c97d2006-05-30 18:05:39 +0000913 if (empty()) return NULL;
Evan Chengd38c22b2006-05-11 23:55:42 +0000914 SUnit *V = Queue.top();
915 Queue.pop();
916 return V;
917 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000918
Evan Cheng5924bf72007-09-25 01:54:36 +0000919 /// remove - This is a really inefficient way to remove a node from a
920 /// priority queue. We should roll our own heap to make this better or
921 /// something.
922 void remove(SUnit *SU) {
923 std::vector<SUnit*> Temp;
924
925 assert(!Queue.empty() && "Not in queue!");
926 while (Queue.top() != SU) {
927 Temp.push_back(Queue.top());
928 Queue.pop();
929 assert(!Queue.empty() && "Not in queue!");
930 }
931
932 // Remove the node from the PQ.
933 Queue.pop();
934
935 // Add all the other nodes back.
936 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
937 Queue.push(Temp[i]);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000938 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000939 };
940
941 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000942 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
943 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000944 // SUnitMap SDNode to SUnit mapping (n -> n).
945 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000946
Evan Chengd38c22b2006-05-11 23:55:42 +0000947 // SUnits - The SUnits for the current graph.
948 const std::vector<SUnit> *SUnits;
949
950 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000951 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000952
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000953 const TargetInstrInfo *TII;
Evan Chengd38c22b2006-05-11 23:55:42 +0000954 public:
Dan Gohman54a187e2007-08-20 19:28:38 +0000955 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000956 : TII(tii) {}
Evan Chengd38c22b2006-05-11 23:55:42 +0000957
Evan Cheng5924bf72007-09-25 01:54:36 +0000958 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000959 std::vector<SUnit> &sunits) {
960 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000961 SUnits = &sunits;
962 // Add pseudo dependency edges for two-address nodes.
Evan Chengafed73e2006-05-12 01:58:24 +0000963 AddPseudoTwoAddrDeps();
Evan Chengd38c22b2006-05-11 23:55:42 +0000964 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000965 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000966 }
967
Evan Cheng5924bf72007-09-25 01:54:36 +0000968 void addNode(const SUnit *SU) {
969 SethiUllmanNumbers.resize(SUnits->size(), 0);
970 CalcNodeSethiUllmanNumber(SU);
971 }
972
973 void updateNode(const SUnit *SU) {
974 SethiUllmanNumbers[SU->NodeNum] = 0;
975 CalcNodeSethiUllmanNumber(SU);
976 }
977
Evan Chengd38c22b2006-05-11 23:55:42 +0000978 void releaseState() {
979 SUnits = 0;
980 SethiUllmanNumbers.clear();
981 }
982
Evan Cheng6730f032007-01-08 23:55:53 +0000983 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +0000984 assert(SU->NodeNum < SethiUllmanNumbers.size());
Evan Cheng8e136a92007-09-26 21:36:17 +0000985 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Cheng961bbd32007-01-08 23:50:38 +0000986 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
987 // CopyFromReg should be close to its def because it restricts
988 // allocation choices. But if it is a livein then perhaps we want it
989 // closer to its uses so it can be coalesced.
990 return 0xffff;
991 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
992 // CopyToReg should be close to its uses to facilitate coalescing and
993 // avoid spilling.
994 return 0;
995 else if (SU->NumSuccs == 0)
996 // If SU does not have a use, i.e. it doesn't produce a value that would
997 // be consumed (e.g. store), then it terminates a chain of computation.
998 // Give it a large SethiUllman number so it will be scheduled right
999 // before its predecessors that it doesn't lengthen their live ranges.
1000 return 0xffff;
1001 else if (SU->NumPreds == 0)
1002 // If SU does not have a def, schedule it close to its uses because it
1003 // does not lengthen any live ranges.
1004 return 0;
1005 else
1006 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001007 }
1008
1009 private:
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001010 bool canClobber(SUnit *SU, SUnit *Op);
Evan Chengd38c22b2006-05-11 23:55:42 +00001011 void AddPseudoTwoAddrDeps();
Evan Cheng6730f032007-01-08 23:55:53 +00001012 void CalculateSethiUllmanNumbers();
1013 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001014 };
1015
1016
1017 template<class SF>
Dan Gohman54a187e2007-08-20 19:28:38 +00001018 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1019 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +00001020 // SUnitMap SDNode to SUnit mapping (n -> n).
1021 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001022
Evan Chengd38c22b2006-05-11 23:55:42 +00001023 // SUnits - The SUnits for the current graph.
1024 const std::vector<SUnit> *SUnits;
1025
1026 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +00001027 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +00001028
1029 public:
1030 TDRegReductionPriorityQueue() {}
1031
Evan Cheng5924bf72007-09-25 01:54:36 +00001032 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001033 std::vector<SUnit> &sunits) {
1034 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +00001035 SUnits = &sunits;
1036 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +00001037 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +00001038 }
1039
Evan Cheng5924bf72007-09-25 01:54:36 +00001040 void addNode(const SUnit *SU) {
1041 SethiUllmanNumbers.resize(SUnits->size(), 0);
1042 CalcNodeSethiUllmanNumber(SU);
1043 }
1044
1045 void updateNode(const SUnit *SU) {
1046 SethiUllmanNumbers[SU->NodeNum] = 0;
1047 CalcNodeSethiUllmanNumber(SU);
1048 }
1049
Evan Chengd38c22b2006-05-11 23:55:42 +00001050 void releaseState() {
1051 SUnits = 0;
1052 SethiUllmanNumbers.clear();
1053 }
1054
Evan Cheng6730f032007-01-08 23:55:53 +00001055 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +00001056 assert(SU->NodeNum < SethiUllmanNumbers.size());
1057 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001058 }
1059
1060 private:
Evan Cheng6730f032007-01-08 23:55:53 +00001061 void CalculateSethiUllmanNumbers();
1062 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001063 };
1064}
1065
Evan Chengb9e3db62007-03-14 22:43:40 +00001066/// closestSucc - Returns the scheduled cycle of the successor which is
1067/// closet to the current cycle.
Evan Cheng28748552007-03-13 23:25:11 +00001068static unsigned closestSucc(const SUnit *SU) {
1069 unsigned MaxCycle = 0;
1070 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
Evan Chengb9e3db62007-03-14 22:43:40 +00001071 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001072 unsigned Cycle = I->Dep->Cycle;
Evan Chengb9e3db62007-03-14 22:43:40 +00001073 // If there are bunch of CopyToRegs stacked up, they should be considered
1074 // to be at the same position.
Evan Cheng8e136a92007-09-26 21:36:17 +00001075 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
Evan Cheng0effc3a2007-09-19 01:38:40 +00001076 Cycle = closestSucc(I->Dep)+1;
Evan Chengb9e3db62007-03-14 22:43:40 +00001077 if (Cycle > MaxCycle)
1078 MaxCycle = Cycle;
1079 }
Evan Cheng28748552007-03-13 23:25:11 +00001080 return MaxCycle;
1081}
1082
Evan Chengb9e3db62007-03-14 22:43:40 +00001083/// calcMaxScratches - Returns an cost estimate of the worse case requirement
1084/// for scratch registers. Live-in operands and live-out results don't count
1085/// since they are "fixed".
1086static unsigned calcMaxScratches(const SUnit *SU) {
1087 unsigned Scratches = 0;
1088 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1089 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001090 if (I->isCtrl) continue; // ignore chain preds
Evan Cheng8e136a92007-09-26 21:36:17 +00001091 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
Evan Chengb9e3db62007-03-14 22:43:40 +00001092 Scratches++;
1093 }
1094 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1095 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001096 if (I->isCtrl) continue; // ignore chain succs
Evan Cheng8e136a92007-09-26 21:36:17 +00001097 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
Evan Chengb9e3db62007-03-14 22:43:40 +00001098 Scratches += 10;
1099 }
1100 return Scratches;
1101}
1102
Evan Chengd38c22b2006-05-11 23:55:42 +00001103// Bottom up
1104bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
David Greene4c1e6f32007-06-29 03:42:23 +00001105 // There used to be a special tie breaker here that looked for
David Greene5b6f7552007-06-29 02:48:09 +00001106 // two-address instructions and preferred the instruction with a
1107 // def&use operand. The special case triggered diagnostics when
1108 // _GLIBCXX_DEBUG was enabled because it broke the strict weak
1109 // ordering that priority_queue requires. It didn't help much anyway
1110 // because AddPseudoTwoAddrDeps already covers many of the cases
1111 // where it would have applied. In addition, it's counter-intuitive
1112 // that a tie breaker would be the first thing attempted. There's a
1113 // "real" tie breaker below that is the operation of last resort.
1114 // The fact that the "special tie breaker" would trigger when there
1115 // wasn't otherwise a tie is what broke the strict weak ordering
1116 // constraint.
Evan Cheng99f2f792006-05-13 08:22:24 +00001117
Evan Cheng6730f032007-01-08 23:55:53 +00001118 unsigned LPriority = SPQ->getNodePriority(left);
1119 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng961bbd32007-01-08 23:50:38 +00001120 if (LPriority > RPriority)
Evan Chengd38c22b2006-05-11 23:55:42 +00001121 return true;
Evan Cheng28748552007-03-13 23:25:11 +00001122 else if (LPriority == RPriority) {
Dan Gohmane131e3a2007-04-26 19:40:56 +00001123 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
Evan Cheng28748552007-03-13 23:25:11 +00001124 // e.g.
1125 // t1 = op t2, c1
1126 // t3 = op t4, c2
1127 //
1128 // and the following instructions are both ready.
1129 // t2 = op c3
1130 // t4 = op c4
1131 //
1132 // Then schedule t2 = op first.
1133 // i.e.
1134 // t4 = op c4
1135 // t2 = op c3
1136 // t1 = op t2, c1
1137 // t3 = op t4, c2
1138 //
1139 // This creates more short live intervals.
1140 unsigned LDist = closestSucc(left);
1141 unsigned RDist = closestSucc(right);
1142 if (LDist < RDist)
Evan Chengd38c22b2006-05-11 23:55:42 +00001143 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001144 else if (LDist == RDist) {
1145 // Intuitively, it's good to push down instructions whose results are
1146 // liveout so their long live ranges won't conflict with other values
1147 // which are needed inside the BB. Further prioritize liveout instructions
1148 // by the number of operands which are calculated within the BB.
1149 unsigned LScratch = calcMaxScratches(left);
1150 unsigned RScratch = calcMaxScratches(right);
1151 if (LScratch > RScratch)
Evan Chengd38c22b2006-05-11 23:55:42 +00001152 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001153 else if (LScratch == RScratch)
1154 if (left->Height > right->Height)
Evan Cheng99f2f792006-05-13 08:22:24 +00001155 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001156 else if (left->Height == right->Height)
1157 if (left->Depth < right->Depth)
Evan Cheng28748552007-03-13 23:25:11 +00001158 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001159 else if (left->Depth == right->Depth)
1160 if (left->CycleBound > right->CycleBound)
1161 return true;
1162 }
Evan Cheng28748552007-03-13 23:25:11 +00001163 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001164 return false;
1165}
1166
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001167template<class SF>
1168bool BURegReductionPriorityQueue<SF>::canClobber(SUnit *SU, SUnit *Op) {
1169 if (SU->isTwoAddress) {
1170 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001171 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001172 unsigned NumOps = ScheduleDAG::CountOperands(SU->Node);
1173 for (unsigned i = 0; i != NumOps; ++i) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001174 if (TII->getOperandConstraint(Opc, i+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001175 SDNode *DU = SU->Node->getOperand(i).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001176 if (Op == (*SUnitMap)[DU][SU->InstanceNo])
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001177 return true;
1178 }
1179 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001180 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001181 return false;
1182}
1183
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001184
Evan Chengd38c22b2006-05-11 23:55:42 +00001185/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1186/// it as a def&use operand. Add a pseudo control edge from it to the other
1187/// node (if it won't create a cycle) so the two-address one will be scheduled
1188/// first (lower in the schedule).
1189template<class SF>
1190void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001191 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1192 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1193 if (!SU->isTwoAddress)
1194 continue;
1195
1196 SDNode *Node = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +00001197 if (!Node || !Node->isTargetOpcode())
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001198 continue;
1199
1200 unsigned Opc = Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001201 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001202 unsigned NumOps = ScheduleDAG::CountOperands(Node);
1203 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001204 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001205 SDNode *DU = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001206 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
Evan Chengf24d15f2006-11-06 21:33:46 +00001207 if (!DUSU) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001208 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
1209 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001210 if (I->isCtrl) continue;
1211 SUnit *SuccSU = I->Dep;
Evan Cheng5924bf72007-09-25 01:54:36 +00001212 // Don't constraint nodes with implicit defs. It can create cycles
1213 // plus it may increase register pressures.
1214 if (SuccSU == SU || SuccSU->hasImplicitDefs)
1215 continue;
1216 // Be conservative. Ignore if nodes aren't at the same depth.
1217 if (SuccSU->Depth != SU->Depth)
1218 continue;
1219 if ((!canClobber(SuccSU, DUSU) ||
1220 (!SU->isCommutable && SuccSU->isCommutable)) &&
1221 !isReachable(SuccSU, SU)) {
1222 DOUT << "Adding an edge from SU # " << SU->NodeNum
1223 << " to SU #" << SuccSU->NodeNum << "\n";
1224 SU->addPred(SuccSU, true, true);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001225 }
1226 }
1227 }
1228 }
1229 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001230}
1231
Evan Cheng6730f032007-01-08 23:55:53 +00001232/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001233/// Smaller number is the higher priority.
1234template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001235unsigned BURegReductionPriorityQueue<SF>::
1236CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001237 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001238 if (SethiUllmanNumber != 0)
1239 return SethiUllmanNumber;
1240
Evan Cheng961bbd32007-01-08 23:50:38 +00001241 unsigned Extra = 0;
1242 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1243 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001244 if (I->isCtrl) continue; // ignore chain preds
1245 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001246 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Cheng961bbd32007-01-08 23:50:38 +00001247 if (PredSethiUllman > SethiUllmanNumber) {
1248 SethiUllmanNumber = PredSethiUllman;
1249 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001250 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001251 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001252 }
Evan Cheng961bbd32007-01-08 23:50:38 +00001253
1254 SethiUllmanNumber += Extra;
1255
1256 if (SethiUllmanNumber == 0)
1257 SethiUllmanNumber = 1;
Evan Chengd38c22b2006-05-11 23:55:42 +00001258
1259 return SethiUllmanNumber;
1260}
1261
Evan Cheng6730f032007-01-08 23:55:53 +00001262/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1263/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001264template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001265void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001266 SethiUllmanNumbers.assign(SUnits->size(), 0);
1267
1268 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001269 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001270}
1271
1272static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
1273 unsigned Sum = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001274 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1275 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001276 SUnit *SuccSU = I->Dep;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001277 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1278 EE = SuccSU->Preds.end(); II != EE; ++II) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001279 SUnit *PredSU = II->Dep;
Evan Chengd38c22b2006-05-11 23:55:42 +00001280 if (!PredSU->isScheduled)
Evan Cheng5924bf72007-09-25 01:54:36 +00001281 ++Sum;
Evan Chengd38c22b2006-05-11 23:55:42 +00001282 }
1283 }
1284
1285 return Sum;
1286}
1287
1288
1289// Top down
1290bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00001291 unsigned LPriority = SPQ->getNodePriority(left);
1292 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng8e136a92007-09-26 21:36:17 +00001293 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1294 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
Evan Chengd38c22b2006-05-11 23:55:42 +00001295 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1296 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1297 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
1298 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
1299
1300 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1301 return false;
1302 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1303 return true;
1304
1305 // Special tie breaker: if two nodes share a operand, the one that use it
1306 // as a def&use operand is preferred.
1307 if (LIsTarget && RIsTarget) {
1308 if (left->isTwoAddress && !right->isTwoAddress) {
1309 SDNode *DUNode = left->Node->getOperand(0).Val;
1310 if (DUNode->isOperand(right->Node))
1311 RBonus += 2;
1312 }
1313 if (!left->isTwoAddress && right->isTwoAddress) {
1314 SDNode *DUNode = right->Node->getOperand(0).Val;
1315 if (DUNode->isOperand(left->Node))
1316 LBonus += 2;
1317 }
1318 }
1319 if (LIsFloater)
1320 LBonus -= 2;
1321 if (RIsFloater)
1322 RBonus -= 2;
1323 if (left->NumSuccs == 1)
1324 LBonus += 2;
1325 if (right->NumSuccs == 1)
1326 RBonus += 2;
1327
1328 if (LPriority+LBonus < RPriority+RBonus)
1329 return true;
1330 else if (LPriority == RPriority)
1331 if (left->Depth < right->Depth)
1332 return true;
1333 else if (left->Depth == right->Depth)
1334 if (left->NumSuccsLeft > right->NumSuccsLeft)
1335 return true;
1336 else if (left->NumSuccsLeft == right->NumSuccsLeft)
1337 if (left->CycleBound > right->CycleBound)
1338 return true;
1339 return false;
1340}
1341
Evan Cheng6730f032007-01-08 23:55:53 +00001342/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001343/// Smaller number is the higher priority.
1344template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001345unsigned TDRegReductionPriorityQueue<SF>::
1346CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001347 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001348 if (SethiUllmanNumber != 0)
1349 return SethiUllmanNumber;
1350
Evan Cheng8e136a92007-09-26 21:36:17 +00001351 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001352 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
Evan Cheng961bbd32007-01-08 23:50:38 +00001353 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001354 else if (SU->NumSuccsLeft == 0)
1355 // If SU does not have a use, i.e. it doesn't produce a value that would
1356 // be consumed (e.g. store), then it terminates a chain of computation.
Chris Lattner296a83c2007-02-01 04:55:59 +00001357 // Give it a small SethiUllman number so it will be scheduled right before
1358 // its predecessors that it doesn't lengthen their live ranges.
Evan Cheng961bbd32007-01-08 23:50:38 +00001359 SethiUllmanNumber = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001360 else if (SU->NumPredsLeft == 0 &&
1361 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
Evan Cheng961bbd32007-01-08 23:50:38 +00001362 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001363 else {
1364 int Extra = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001365 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1366 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001367 if (I->isCtrl) continue; // ignore chain preds
1368 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001369 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001370 if (PredSethiUllman > SethiUllmanNumber) {
1371 SethiUllmanNumber = PredSethiUllman;
1372 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001373 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001374 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001375 }
1376
1377 SethiUllmanNumber += Extra;
1378 }
1379
1380 return SethiUllmanNumber;
1381}
1382
Evan Cheng6730f032007-01-08 23:55:53 +00001383/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1384/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001385template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001386void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001387 SethiUllmanNumbers.assign(SUnits->size(), 0);
1388
1389 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001390 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001391}
1392
1393//===----------------------------------------------------------------------===//
1394// Public Constructor Functions
1395//===----------------------------------------------------------------------===//
1396
Jim Laskey03593f72006-08-01 18:29:48 +00001397llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1398 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001399 MachineBasicBlock *BB) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001400 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
Jim Laskey95eda5b2006-08-01 14:21:23 +00001401 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true,
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001402 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII));
Evan Chengd38c22b2006-05-11 23:55:42 +00001403}
1404
Jim Laskey03593f72006-08-01 18:29:48 +00001405llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1406 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001407 MachineBasicBlock *BB) {
Jim Laskey95eda5b2006-08-01 14:21:23 +00001408 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
Chris Lattner296a83c2007-02-01 04:55:59 +00001409 new TDRegReductionPriorityQueue<td_ls_rr_sort>());
Evan Chengd38c22b2006-05-11 23:55:42 +00001410}
1411