blob: 61a005ce10ed6342587dfc81cc63f2e44f2ca235 [file] [log] [blame]
Evan Chengd38c22b2006-05-11 23:55:42 +00001//===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Evan Cheng and is distributed under the
6// University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
Dale Johannesen2182f062007-07-13 17:13:54 +000018#define DEBUG_TYPE "pre-RA-sched"
Evan Chengd38c22b2006-05-11 23:55:42 +000019#include "llvm/CodeGen/ScheduleDAG.h"
Jim Laskey29e635d2006-08-02 12:30:23 +000020#include "llvm/CodeGen/SchedulerRegistry.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000021#include "llvm/CodeGen/SSARegMap.h"
22#include "llvm/Target/MRegisterInfo.h"
Owen Anderson8c2c1e92006-05-12 06:33:49 +000023#include "llvm/Target/TargetData.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000024#include "llvm/Target/TargetMachine.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/Debug.h"
Chris Lattner3d27be12006-08-27 12:54:02 +000027#include "llvm/Support/Compiler.h"
Evan Cheng5924bf72007-09-25 01:54:36 +000028#include "llvm/ADT/SmallSet.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000029#include "llvm/ADT/Statistic.h"
30#include <climits>
Evan Chengd38c22b2006-05-11 23:55:42 +000031#include <queue>
32#include "llvm/Support/CommandLine.h"
33using namespace llvm;
34
Jim Laskey95eda5b2006-08-01 14:21:23 +000035static RegisterScheduler
36 burrListDAGScheduler("list-burr",
37 " Bottom-up register reduction list scheduling",
38 createBURRListDAGScheduler);
39static RegisterScheduler
40 tdrListrDAGScheduler("list-tdrr",
41 " Top-down register reduction list scheduling",
42 createTDRRListDAGScheduler);
43
Evan Chengd38c22b2006-05-11 23:55:42 +000044namespace {
Evan Chengd38c22b2006-05-11 23:55:42 +000045//===----------------------------------------------------------------------===//
46/// ScheduleDAGRRList - The actual register reduction list scheduler
47/// implementation. This supports both top-down and bottom-up scheduling.
48///
Chris Lattnere097e6f2006-06-28 22:17:39 +000049class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
Evan Chengd38c22b2006-05-11 23:55:42 +000050private:
51 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
52 /// it is top-down.
53 bool isBottomUp;
54
55 /// AvailableQueue - The priority queue to use for the available SUnits.
Evan Cheng5924bf72007-09-25 01:54:36 +000056 ///a
Evan Chengd38c22b2006-05-11 23:55:42 +000057 SchedulingPriorityQueue *AvailableQueue;
58
Evan Cheng5924bf72007-09-25 01:54:36 +000059 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
60 /// that are "live". These nodes must be scheduled before any other nodes that
61 /// modifies the registers can be scheduled.
62 SmallSet<unsigned, 4> LiveRegs;
63 std::vector<SUnit*> LiveRegDefs;
64 std::vector<unsigned> LiveRegCycles;
65
Evan Chengd38c22b2006-05-11 23:55:42 +000066public:
67 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
68 const TargetMachine &tm, bool isbottomup,
69 SchedulingPriorityQueue *availqueue)
70 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
71 AvailableQueue(availqueue) {
72 }
73
74 ~ScheduleDAGRRList() {
75 delete AvailableQueue;
76 }
77
78 void Schedule();
79
80private:
Evan Cheng8e136a92007-09-26 21:36:17 +000081 void ReleasePred(SUnit*, bool, unsigned);
82 void ReleaseSucc(SUnit*, bool isChain, unsigned);
83 void CapturePred(SUnit*, SUnit*, bool);
84 void ScheduleNodeBottomUp(SUnit*, unsigned);
85 void ScheduleNodeTopDown(SUnit*, unsigned);
86 void UnscheduleNodeBottomUp(SUnit*);
87 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
88 SUnit *CopyAndMoveSuccessors(SUnit*);
89 SUnit *InsertCopiesAndMoveSuccs(SUnit*, unsigned,
90 const TargetRegisterClass*,
91 const TargetRegisterClass*);
92 bool DelayForLiveRegsBottomUp(SUnit*, unsigned&);
Evan Chengd38c22b2006-05-11 23:55:42 +000093 void ListScheduleTopDown();
94 void ListScheduleBottomUp();
Evan Chengafed73e2006-05-12 01:58:24 +000095 void CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +000096};
97} // end anonymous namespace
98
99
100/// Schedule - Schedule the DAG using list scheduling.
101void ScheduleDAGRRList::Schedule() {
Bill Wendling22e978a2006-12-07 20:04:42 +0000102 DOUT << "********** List Scheduling **********\n";
Evan Cheng5924bf72007-09-25 01:54:36 +0000103
104 LiveRegDefs.resize(MRI->getNumRegs(), NULL);
105 LiveRegCycles.resize(MRI->getNumRegs(), 0);
106
Evan Chengd38c22b2006-05-11 23:55:42 +0000107 // Build scheduling units.
108 BuildSchedUnits();
109
Evan Chengd38c22b2006-05-11 23:55:42 +0000110 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
Chris Lattnerd86418a2006-08-17 00:09:56 +0000111 SUnits[su].dumpAll(&DAG));
Evan Cheng47fbeda2006-10-14 08:34:06 +0000112 CalculateDepths();
113 CalculateHeights();
Evan Chengd38c22b2006-05-11 23:55:42 +0000114
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000115 AvailableQueue->initNodes(SUnitMap, SUnits);
Dan Gohman54a187e2007-08-20 19:28:38 +0000116
Evan Chengd38c22b2006-05-11 23:55:42 +0000117 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
118 if (isBottomUp)
119 ListScheduleBottomUp();
120 else
121 ListScheduleTopDown();
122
123 AvailableQueue->releaseState();
Dan Gohman54a187e2007-08-20 19:28:38 +0000124
Evan Cheng009f5f52006-05-25 08:37:31 +0000125 CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +0000126
Bill Wendling22e978a2006-12-07 20:04:42 +0000127 DOUT << "*** Final schedule ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000128 DEBUG(dumpSchedule());
Bill Wendling22e978a2006-12-07 20:04:42 +0000129 DOUT << "\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000130
131 // Emit in scheduled order
132 EmitSchedule();
133}
134
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000135/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
Evan Chengafed73e2006-05-12 01:58:24 +0000136/// it is not the last use of its first operand, add it to the CommuteSet if
137/// possible. It will be commuted when it is translated to a MI.
138void ScheduleDAGRRList::CommuteNodesToReducePressure() {
Evan Chenge3c44192007-06-22 01:35:51 +0000139 SmallPtrSet<SUnit*, 4> OperandSeen;
Evan Chengafed73e2006-05-12 01:58:24 +0000140 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
141 SUnit *SU = Sequence[i];
Evan Cheng8e136a92007-09-26 21:36:17 +0000142 if (!SU || !SU->Node) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000143 if (SU->isCommutable) {
144 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +0000145 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000146 unsigned NumOps = CountOperands(SU->Node);
147 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +0000148 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) == -1)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000149 continue;
150
151 SDNode *OpN = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000152 SUnit *OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000153 if (OpSU && OperandSeen.count(OpSU) == 1) {
154 // Ok, so SU is not the last use of OpSU, but SU is two-address so
155 // it will clobber OpSU. Try to commute SU if no other source operands
156 // are live below.
157 bool DoCommute = true;
158 for (unsigned k = 0; k < NumOps; ++k) {
159 if (k != j) {
160 OpN = SU->Node->getOperand(k).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +0000161 OpSU = SUnitMap[OpN][SU->InstanceNo];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000162 if (OpSU && OperandSeen.count(OpSU) == 1) {
163 DoCommute = false;
164 break;
165 }
166 }
Evan Chengafed73e2006-05-12 01:58:24 +0000167 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000168 if (DoCommute)
169 CommuteSet.insert(SU->Node);
Evan Chengafed73e2006-05-12 01:58:24 +0000170 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000171
172 // Only look at the first use&def node for now.
173 break;
Evan Chengafed73e2006-05-12 01:58:24 +0000174 }
175 }
176
Chris Lattnerd86418a2006-08-17 00:09:56 +0000177 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
178 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000179 if (!I->isCtrl)
180 OperandSeen.insert(I->Dep);
Evan Chengafed73e2006-05-12 01:58:24 +0000181 }
182 }
183}
Evan Chengd38c22b2006-05-11 23:55:42 +0000184
185//===----------------------------------------------------------------------===//
186// Bottom-Up Scheduling
187//===----------------------------------------------------------------------===//
188
Evan Chengd38c22b2006-05-11 23:55:42 +0000189/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000190/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000191void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
192 unsigned CurCycle) {
193 // FIXME: the distance between two nodes is not always == the predecessor's
194 // latency. For example, the reader can very well read the register written
195 // by the predecessor later than the issue cycle. It also depends on the
196 // interrupt model (drain vs. freeze).
197 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
198
199 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000200 --PredSU->NumSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000201 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000202 --PredSU->NumChainSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000203
204#ifndef NDEBUG
205 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000206 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000207 PredSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000208 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000209 assert(0);
210 }
211#endif
212
213 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
214 // EntryToken has to go last! Special case it here.
Evan Cheng8e136a92007-09-26 21:36:17 +0000215 if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) {
Evan Chengd38c22b2006-05-11 23:55:42 +0000216 PredSU->isAvailable = true;
217 AvailableQueue->push(PredSU);
218 }
219 }
220}
221
222/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
223/// count of its predecessors. If a predecessor pending count is zero, add it to
224/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000225void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000226 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000227 DEBUG(SU->dump(&DAG));
228 SU->Cycle = CurCycle;
229
230 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000231
232 // Bottom up: release predecessors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000233 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Cheng5924bf72007-09-25 01:54:36 +0000234 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000235 ReleasePred(I->Dep, I->isCtrl, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000236 if (I->Cost < 0) {
237 // This is a physical register dependency and it's impossible or
238 // expensive to copy the register. Make sure nothing that can
239 // clobber the register is scheduled between the predecessor and
240 // this node.
241 if (LiveRegs.insert(I->Reg)) {
242 LiveRegDefs[I->Reg] = I->Dep;
243 LiveRegCycles[I->Reg] = CurCycle;
244 }
245 }
246 }
247
248 // Release all the implicit physical register defs that are live.
249 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
250 I != E; ++I) {
251 if (I->Cost < 0) {
252 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
253 LiveRegs.erase(I->Reg);
254 assert(LiveRegDefs[I->Reg] == SU &&
255 "Physical register dependency violated?");
256 LiveRegDefs[I->Reg] = NULL;
257 LiveRegCycles[I->Reg] = 0;
258 }
259 }
260 }
261
Evan Chengd38c22b2006-05-11 23:55:42 +0000262 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000263}
264
Evan Cheng5924bf72007-09-25 01:54:36 +0000265/// CapturePred - This does the opposite of ReleasePred. Since SU is being
266/// unscheduled, incrcease the succ left count of its predecessors. Remove
267/// them from AvailableQueue if necessary.
268void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
269 PredSU->CycleBound = 0;
270 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
271 I != E; ++I) {
272 if (I->Dep == SU)
273 continue;
274 PredSU->CycleBound = std::max(PredSU->CycleBound,
275 I->Dep->Cycle + PredSU->Latency);
276 }
277
278 if (PredSU->isAvailable) {
279 PredSU->isAvailable = false;
280 if (!PredSU->isPending)
281 AvailableQueue->remove(PredSU);
282 }
283
284 if (!isChain)
285 ++PredSU->NumSuccsLeft;
286 else
287 ++PredSU->NumChainSuccsLeft;
288}
289
290/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
291/// its predecessor states to reflect the change.
292void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
293 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
294 DEBUG(SU->dump(&DAG));
295
296 AvailableQueue->UnscheduledNode(SU);
297
298 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
299 I != E; ++I) {
300 CapturePred(I->Dep, SU, I->isCtrl);
301 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
302 LiveRegs.erase(I->Reg);
303 assert(LiveRegDefs[I->Reg] == I->Dep &&
304 "Physical register dependency violated?");
305 LiveRegDefs[I->Reg] = NULL;
306 LiveRegCycles[I->Reg] = 0;
307 }
308 }
309
310 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
311 I != E; ++I) {
312 if (I->Cost < 0) {
313 if (LiveRegs.insert(I->Reg)) {
314 assert(!LiveRegDefs[I->Reg] &&
315 "Physical register dependency violated?");
316 LiveRegDefs[I->Reg] = SU;
317 }
318 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
319 LiveRegCycles[I->Reg] = I->Dep->Cycle;
320 }
321 }
322
323 SU->Cycle = 0;
324 SU->isScheduled = false;
325 SU->isAvailable = true;
326 AvailableQueue->push(SU);
327}
328
Evan Cheng8e136a92007-09-26 21:36:17 +0000329/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
Evan Cheng5924bf72007-09-25 01:54:36 +0000330/// BTCycle in order to schedule a specific node. Returns the last unscheduled
331/// SUnit. Also returns if a successor is unscheduled in the process.
Evan Cheng8e136a92007-09-26 21:36:17 +0000332void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
333 unsigned &CurCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000334 SUnit *OldSU = NULL;
Evan Cheng8e136a92007-09-26 21:36:17 +0000335 while (CurCycle > BtCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000336 OldSU = Sequence.back();
337 Sequence.pop_back();
338 if (SU->isSucc(OldSU))
Evan Cheng8e136a92007-09-26 21:36:17 +0000339 // Don't try to remove SU from AvailableQueue.
340 SU->isAvailable = false;
Evan Cheng5924bf72007-09-25 01:54:36 +0000341 UnscheduleNodeBottomUp(OldSU);
342 --CurCycle;
343 }
344
345
346 if (SU->isSucc(OldSU)) {
347 assert(false && "Something is wrong!");
348 abort();
349 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000350}
351
352/// isSafeToCopy - True if the SUnit for the given SDNode can safely cloned,
353/// i.e. the node does not produce a flag, it does not read a flag and it does
354/// not have an incoming chain.
355static bool isSafeToCopy(SDNode *N) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000356 if (!N)
357 return true;
358
Evan Cheng5924bf72007-09-25 01:54:36 +0000359 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
360 if (N->getValueType(i) == MVT::Flag)
361 return false;
362 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
363 const SDOperand &Op = N->getOperand(i);
364 MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
365 if (VT == MVT::Other || VT == MVT::Flag)
366 return false;
367 }
368
369 return true;
370}
371
372/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
373/// successors to the newly created node.
374SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000375 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
376
Evan Cheng5924bf72007-09-25 01:54:36 +0000377 SUnit *NewSU = Clone(SU);
378
379 // New SUnit has the exact same predecessors.
380 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
381 I != E; ++I)
382 if (!I->isSpecial) {
383 NewSU->addPred(I->Dep, I->isCtrl, false, I->Reg, I->Cost);
384 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
385 }
386
387 // Only copy scheduled successors. Cut them from old node's successor
388 // list and move them over.
389 SmallVector<SDep*, 2> DelDeps;
390 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
391 I != E; ++I) {
392 if (I->isSpecial)
393 continue;
394 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
395 if (I->Dep->isScheduled) {
396 I->Dep->addPred(NewSU, I->isCtrl, false, I->Reg, I->Cost);
397 DelDeps.push_back(I);
398 }
399 }
400 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
401 SUnit *Succ = DelDeps[i]->Dep;
402 bool isCtrl = DelDeps[i]->isCtrl;
403 Succ->removePred(SU, isCtrl, false);
404 }
405
406 AvailableQueue->updateNode(SU);
407 AvailableQueue->addNode(NewSU);
408
409 return NewSU;
410}
411
Evan Cheng8e136a92007-09-26 21:36:17 +0000412/// InsertCopiesAndMoveSuccs - Insert expensive cross register class copies and
413/// move all scheduled successors of the given SUnit to the last copy.
414SUnit *ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
415 const TargetRegisterClass *DestRC,
416 const TargetRegisterClass *SrcRC) {
417 SUnit *CopyFromSU = NewSUnit(NULL);
418 CopyFromSU->CopySrcRC = SrcRC;
419 CopyFromSU->CopyDstRC = DestRC;
420 CopyFromSU->Depth = SU->Depth;
421 CopyFromSU->Height = SU->Height;
422
423 SUnit *CopyToSU = NewSUnit(NULL);
424 CopyToSU->CopySrcRC = DestRC;
425 CopyToSU->CopyDstRC = SrcRC;
426
427 // Only copy scheduled successors. Cut them from old node's successor
428 // list and move them over.
429 SmallVector<SDep*, 2> DelDeps;
430 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
431 I != E; ++I) {
432 if (I->isSpecial)
433 continue;
434 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
435 if (I->Dep->isScheduled) {
436 I->Dep->addPred(CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
437 DelDeps.push_back(I);
438 }
439 }
440 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
441 SUnit *Succ = DelDeps[i]->Dep;
442 bool isCtrl = DelDeps[i]->isCtrl;
443 Succ->removePred(SU, isCtrl, false);
444 }
445
446 CopyFromSU->addPred(SU, false, false, Reg, -1);
447 CopyToSU->addPred(CopyFromSU, false, false, Reg, 1);
448
449 AvailableQueue->updateNode(SU);
450 AvailableQueue->addNode(CopyFromSU);
451 AvailableQueue->addNode(CopyToSU);
452
453 return CopyToSU;
454}
455
456/// getPhysicalRegisterVT - Returns the ValueType of the physical register
457/// definition of the specified node.
458/// FIXME: Move to SelectionDAG?
459static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg,
460 const TargetInstrInfo *TII) {
461 const TargetInstrDescriptor &TID = TII->get(N->getTargetOpcode());
462 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
463 unsigned NumRes = TID.numDefs;
464 for (const unsigned *ImpDef = TID.ImplicitDefs; *ImpDef; ++ImpDef) {
465 if (Reg == *ImpDef)
466 break;
467 ++NumRes;
468 }
469 return N->getValueType(NumRes);
470}
471
472// FIXME: This is probably too slow!
473static void isReachable(SUnit *SU, SUnit *TargetSU,
474 SmallPtrSet<SUnit*, 32> &Visited, bool &Reached) {
475 if (Reached) return;
476 if (SU == TargetSU) {
477 Reached = true;
478 return;
479 }
480 if (!Visited.insert(SU)) return;
481
482 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E;
483 ++I)
484 isReachable(I->Dep, TargetSU, Visited, Reached);
485}
486
487static bool isReachable(SUnit *SU, SUnit *TargetSU) {
488 SmallPtrSet<SUnit*, 32> Visited;
489 bool Reached = false;
490 isReachable(SU, TargetSU, Visited, Reached);
491 return Reached;
492}
493
Evan Cheng5924bf72007-09-25 01:54:36 +0000494/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
495/// scheduling of the given node to satisfy live physical register dependencies.
496/// If the specific node is the last one that's available to schedule, do
497/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
498bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, unsigned &CurCycle){
499 if (LiveRegs.empty())
500 return false;
501
502 // If this node would clobber any "live" register, then it's not ready.
503 // However, if this is the last "available" node, then we may have to
504 // backtrack.
505 bool MustSched = AvailableQueue->empty();
506 SmallVector<unsigned, 4> LRegs;
507 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
508 I != E; ++I) {
509 if (I->Cost < 0) {
510 unsigned Reg = I->Reg;
511 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep)
512 LRegs.push_back(Reg);
513 for (const unsigned *Alias = MRI->getAliasSet(Reg);
514 *Alias; ++Alias)
515 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep)
516 LRegs.push_back(*Alias);
517 }
518 }
519
520 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
521 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
Evan Cheng8e136a92007-09-26 21:36:17 +0000522 if (!Node || !Node->isTargetOpcode())
Evan Cheng5924bf72007-09-25 01:54:36 +0000523 continue;
524 const TargetInstrDescriptor &TID = TII->get(Node->getTargetOpcode());
525 if (!TID.ImplicitDefs)
526 continue;
527 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
528 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU)
529 LRegs.push_back(*Reg);
530 for (const unsigned *Alias = MRI->getAliasSet(*Reg);
531 *Alias; ++Alias)
532 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU)
533 LRegs.push_back(*Alias);
534 }
535 }
536
537 if (MustSched && !LRegs.empty()) {
538 // We have made a mistake by scheduling some nodes too early. Now we must
539 // schedule the current node which will end up clobbering some live
540 // registers that are expensive / impossible to copy. Try unscheduling
541 // up to the point where it's safe to schedule the current node.
542 unsigned LiveCycle = CurCycle;
543 for (unsigned i = 0, e = LRegs.size(); i != e; ++i) {
544 unsigned Reg = LRegs[i];
545 unsigned LCycle = LiveRegCycles[Reg];
546 LiveCycle = std::min(LiveCycle, LCycle);
547 }
548
Evan Cheng8e136a92007-09-26 21:36:17 +0000549 SUnit *OldSU = Sequence[LiveCycle];
550 if (!isReachable(Sequence[LiveCycle], SU)) {
551 // If CycleBound is greater than backtrack cycle, then some of SU
552 // successors are going to be unscheduled.
553 bool SuccUnsched = SU->CycleBound > LiveCycle;
554 BacktrackBottomUp(SU, LiveCycle, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000555 // Force the current node to be scheduled before the node that
556 // requires the physical reg dep.
557 if (OldSU->isAvailable) {
558 OldSU->isAvailable = false;
559 AvailableQueue->remove(OldSU);
560 }
561 SU->addPred(OldSU, true, true);
562 // If a successor has been unscheduled, then it's not possible to
563 // schedule the current node.
564 return SuccUnsched;
565 } else {
566 // Try duplicating the nodes that produces these "expensive to copy"
567 // values to break the dependency.
Evan Cheng8e136a92007-09-26 21:36:17 +0000568 assert(LRegs.size() == 1 && "Can't handle this yet!");
569 unsigned Reg = LRegs[0];
570 SUnit *LRDef = LiveRegDefs[Reg];
571 SUnit *NewDef;
572 if (isSafeToCopy(LRDef->Node))
573 NewDef = CopyAndMoveSuccessors(LRDef);
574 else {
575 // Issue expensive cross register class copies.
576 MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
577 const TargetRegisterClass *RC =
578 MRI->getPhysicalRegisterRegClass(VT, Reg);
579 const TargetRegisterClass *DestRC = MRI->getCrossCopyRegClass(RC);
580 if (!DestRC) {
581 assert(false && "Don't know how to copy this physical register!");
Evan Cheng5924bf72007-09-25 01:54:36 +0000582 abort();
583 }
Evan Cheng8e136a92007-09-26 21:36:17 +0000584 NewDef = InsertCopiesAndMoveSuccs(LRDef,Reg,DestRC,RC);
Evan Cheng5924bf72007-09-25 01:54:36 +0000585 }
Evan Cheng8e136a92007-09-26 21:36:17 +0000586
587 DOUT << "Adding an edge from SU # " << SU->NodeNum
588 << " to SU #" << NewDef->NodeNum << "\n";
589 LiveRegDefs[Reg] = NewDef;
590 NewDef->addPred(SU, true, true);
591 SU->isAvailable = false;
592 AvailableQueue->push(NewDef);
Evan Cheng5924bf72007-09-25 01:54:36 +0000593 return true;
594 }
595 }
596 return !LRegs.empty();
Evan Chengd38c22b2006-05-11 23:55:42 +0000597}
598
599/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
600/// schedulers.
601void ScheduleDAGRRList::ListScheduleBottomUp() {
602 unsigned CurCycle = 0;
603 // Add root to Available queue.
Evan Cheng5924bf72007-09-25 01:54:36 +0000604 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
605 RootSU->isAvailable = true;
606 AvailableQueue->push(RootSU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000607
608 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000609 // priority. If it is not ready put it back. Schedule the node.
Evan Cheng5924bf72007-09-25 01:54:36 +0000610 SmallVector<SUnit*, 4> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000611 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000612 SUnit *CurSU = AvailableQueue->pop();
613 while (CurSU) {
614 if (CurSU->CycleBound <= CurCycle)
615 if (!DelayForLiveRegsBottomUp(CurSU, CurCycle))
616 break;
617
618 // Verify node is still ready. It may not be in case the
619 // scheduler has backtracked.
620 if (CurSU->isAvailable) {
621 CurSU->isPending = true;
622 NotReady.push_back(CurSU);
623 }
624 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000625 }
626
627 // Add the nodes that aren't ready back onto the available list.
Evan Cheng5924bf72007-09-25 01:54:36 +0000628 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
629 NotReady[i]->isPending = false;
630 if (NotReady[i]->isAvailable)
631 AvailableQueue->push(NotReady[i]);
632 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000633 NotReady.clear();
634
Evan Cheng5924bf72007-09-25 01:54:36 +0000635 if (!CurSU)
636 Sequence.push_back(0);
637 else {
638 ScheduleNodeBottomUp(CurSU, CurCycle);
639 Sequence.push_back(CurSU);
640 }
641 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000642 }
643
644 // Add entry node last
645 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000646 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000647 Sequence.push_back(Entry);
648 }
649
650 // Reverse the order if it is bottom up.
651 std::reverse(Sequence.begin(), Sequence.end());
652
653
654#ifndef NDEBUG
655 // Verify that all SUnits were scheduled.
656 bool AnyNotSched = false;
657 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
658 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
659 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000660 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000661 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000662 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000663 AnyNotSched = true;
664 }
665 }
666 assert(!AnyNotSched);
667#endif
668}
669
670//===----------------------------------------------------------------------===//
671// Top-Down Scheduling
672//===----------------------------------------------------------------------===//
673
674/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000675/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000676void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
677 unsigned CurCycle) {
678 // FIXME: the distance between two nodes is not always == the predecessor's
679 // latency. For example, the reader can very well read the register written
680 // by the predecessor later than the issue cycle. It also depends on the
681 // interrupt model (drain vs. freeze).
682 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
683
684 if (!isChain)
Evan Cheng5924bf72007-09-25 01:54:36 +0000685 --SuccSU->NumPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000686 else
Evan Cheng5924bf72007-09-25 01:54:36 +0000687 --SuccSU->NumChainPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000688
689#ifndef NDEBUG
690 if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000691 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000692 SuccSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000693 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000694 assert(0);
695 }
696#endif
697
698 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
699 SuccSU->isAvailable = true;
700 AvailableQueue->push(SuccSU);
701 }
702}
703
704
705/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
706/// count of its successors. If a successor pending count is zero, add it to
707/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000708void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000709 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000710 DEBUG(SU->dump(&DAG));
711 SU->Cycle = CurCycle;
712
713 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000714
715 // Top down: release successors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000716 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
717 I != E; ++I)
Evan Cheng0effc3a2007-09-19 01:38:40 +0000718 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +0000719 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000720}
721
Dan Gohman54a187e2007-08-20 19:28:38 +0000722/// ListScheduleTopDown - The main loop of list scheduling for top-down
723/// schedulers.
Evan Chengd38c22b2006-05-11 23:55:42 +0000724void ScheduleDAGRRList::ListScheduleTopDown() {
725 unsigned CurCycle = 0;
Evan Cheng5924bf72007-09-25 01:54:36 +0000726 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
Evan Chengd38c22b2006-05-11 23:55:42 +0000727
728 // All leaves to Available queue.
729 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
730 // It is available if it has no predecessors.
731 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
732 AvailableQueue->push(&SUnits[i]);
733 SUnits[i].isAvailable = true;
734 }
735 }
736
737 // Emit the entry node first.
738 ScheduleNodeTopDown(Entry, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000739 Sequence.push_back(Entry);
740 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +0000741
742 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000743 // priority. If it is not ready put it back. Schedule the node.
Evan Chengd38c22b2006-05-11 23:55:42 +0000744 std::vector<SUnit*> NotReady;
Evan Chengd38c22b2006-05-11 23:55:42 +0000745 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000746 SUnit *CurSU = AvailableQueue->pop();
747 while (CurSU && CurSU->CycleBound > CurCycle) {
748 NotReady.push_back(CurSU);
749 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000750 }
751
752 // Add the nodes that aren't ready back onto the available list.
753 AvailableQueue->push_all(NotReady);
754 NotReady.clear();
755
Evan Cheng5924bf72007-09-25 01:54:36 +0000756 if (!CurSU)
757 Sequence.push_back(0);
758 else {
759 ScheduleNodeTopDown(CurSU, CurCycle);
760 Sequence.push_back(CurSU);
761 }
Evan Chengd12c97d2006-05-30 18:05:39 +0000762 CurCycle++;
Evan Chengd38c22b2006-05-11 23:55:42 +0000763 }
764
765
766#ifndef NDEBUG
767 // Verify that all SUnits were scheduled.
768 bool AnyNotSched = false;
769 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
770 if (!SUnits[i].isScheduled) {
771 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +0000772 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000773 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000774 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000775 AnyNotSched = true;
776 }
777 }
778 assert(!AnyNotSched);
779#endif
780}
781
782
783
784//===----------------------------------------------------------------------===//
785// RegReductionPriorityQueue Implementation
786//===----------------------------------------------------------------------===//
787//
788// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
789// to reduce register pressure.
790//
791namespace {
792 template<class SF>
793 class RegReductionPriorityQueue;
794
795 /// Sorting functions for the Available queue.
796 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
797 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
798 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
799 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
800
801 bool operator()(const SUnit* left, const SUnit* right) const;
802 };
803
804 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
805 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
806 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
807 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
808
809 bool operator()(const SUnit* left, const SUnit* right) const;
810 };
811} // end anonymous namespace
812
Evan Cheng961bbd32007-01-08 23:50:38 +0000813static inline bool isCopyFromLiveIn(const SUnit *SU) {
814 SDNode *N = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +0000815 return N && N->getOpcode() == ISD::CopyFromReg &&
Evan Cheng961bbd32007-01-08 23:50:38 +0000816 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
817}
818
Evan Chengd38c22b2006-05-11 23:55:42 +0000819namespace {
820 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000821 class VISIBILITY_HIDDEN RegReductionPriorityQueue
822 : public SchedulingPriorityQueue {
Evan Chengd38c22b2006-05-11 23:55:42 +0000823 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
824
825 public:
826 RegReductionPriorityQueue() :
827 Queue(SF(this)) {}
828
Evan Cheng5924bf72007-09-25 01:54:36 +0000829 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000830 std::vector<SUnit> &sunits) {}
Evan Cheng5924bf72007-09-25 01:54:36 +0000831
832 virtual void addNode(const SUnit *SU) {}
833
834 virtual void updateNode(const SUnit *SU) {}
835
Evan Chengd38c22b2006-05-11 23:55:42 +0000836 virtual void releaseState() {}
837
Evan Cheng6730f032007-01-08 23:55:53 +0000838 virtual unsigned getNodePriority(const SUnit *SU) const {
Evan Chengd38c22b2006-05-11 23:55:42 +0000839 return 0;
840 }
841
Evan Cheng5924bf72007-09-25 01:54:36 +0000842 unsigned size() const { return Queue.size(); }
843
Evan Chengd38c22b2006-05-11 23:55:42 +0000844 bool empty() const { return Queue.empty(); }
845
846 void push(SUnit *U) {
847 Queue.push(U);
848 }
849 void push_all(const std::vector<SUnit *> &Nodes) {
850 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
851 Queue.push(Nodes[i]);
852 }
853
854 SUnit *pop() {
Evan Chengd12c97d2006-05-30 18:05:39 +0000855 if (empty()) return NULL;
Evan Chengd38c22b2006-05-11 23:55:42 +0000856 SUnit *V = Queue.top();
857 Queue.pop();
858 return V;
859 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000860
Evan Cheng5924bf72007-09-25 01:54:36 +0000861 /// remove - This is a really inefficient way to remove a node from a
862 /// priority queue. We should roll our own heap to make this better or
863 /// something.
864 void remove(SUnit *SU) {
865 std::vector<SUnit*> Temp;
866
867 assert(!Queue.empty() && "Not in queue!");
868 while (Queue.top() != SU) {
869 Temp.push_back(Queue.top());
870 Queue.pop();
871 assert(!Queue.empty() && "Not in queue!");
872 }
873
874 // Remove the node from the PQ.
875 Queue.pop();
876
877 // Add all the other nodes back.
878 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
879 Queue.push(Temp[i]);
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000880 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000881 };
882
883 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +0000884 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
885 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000886 // SUnitMap SDNode to SUnit mapping (n -> n).
887 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000888
Evan Chengd38c22b2006-05-11 23:55:42 +0000889 // SUnits - The SUnits for the current graph.
890 const std::vector<SUnit> *SUnits;
891
892 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000893 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000894
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000895 const TargetInstrInfo *TII;
Evan Chengd38c22b2006-05-11 23:55:42 +0000896 public:
Dan Gohman54a187e2007-08-20 19:28:38 +0000897 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000898 : TII(tii) {}
Evan Chengd38c22b2006-05-11 23:55:42 +0000899
Evan Cheng5924bf72007-09-25 01:54:36 +0000900 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000901 std::vector<SUnit> &sunits) {
902 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000903 SUnits = &sunits;
904 // Add pseudo dependency edges for two-address nodes.
Evan Chengafed73e2006-05-12 01:58:24 +0000905 AddPseudoTwoAddrDeps();
Evan Chengd38c22b2006-05-11 23:55:42 +0000906 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000907 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000908 }
909
Evan Cheng5924bf72007-09-25 01:54:36 +0000910 void addNode(const SUnit *SU) {
911 SethiUllmanNumbers.resize(SUnits->size(), 0);
912 CalcNodeSethiUllmanNumber(SU);
913 }
914
915 void updateNode(const SUnit *SU) {
916 SethiUllmanNumbers[SU->NodeNum] = 0;
917 CalcNodeSethiUllmanNumber(SU);
918 }
919
Evan Chengd38c22b2006-05-11 23:55:42 +0000920 void releaseState() {
921 SUnits = 0;
922 SethiUllmanNumbers.clear();
923 }
924
Evan Cheng6730f032007-01-08 23:55:53 +0000925 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +0000926 assert(SU->NodeNum < SethiUllmanNumbers.size());
Evan Cheng8e136a92007-09-26 21:36:17 +0000927 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Cheng961bbd32007-01-08 23:50:38 +0000928 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
929 // CopyFromReg should be close to its def because it restricts
930 // allocation choices. But if it is a livein then perhaps we want it
931 // closer to its uses so it can be coalesced.
932 return 0xffff;
933 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
934 // CopyToReg should be close to its uses to facilitate coalescing and
935 // avoid spilling.
936 return 0;
937 else if (SU->NumSuccs == 0)
938 // If SU does not have a use, i.e. it doesn't produce a value that would
939 // be consumed (e.g. store), then it terminates a chain of computation.
940 // Give it a large SethiUllman number so it will be scheduled right
941 // before its predecessors that it doesn't lengthen their live ranges.
942 return 0xffff;
943 else if (SU->NumPreds == 0)
944 // If SU does not have a def, schedule it close to its uses because it
945 // does not lengthen any live ranges.
946 return 0;
947 else
948 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +0000949 }
950
951 private:
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000952 bool canClobber(SUnit *SU, SUnit *Op);
Evan Chengd38c22b2006-05-11 23:55:42 +0000953 void AddPseudoTwoAddrDeps();
Evan Cheng6730f032007-01-08 23:55:53 +0000954 void CalculateSethiUllmanNumbers();
955 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000956 };
957
958
959 template<class SF>
Dan Gohman54a187e2007-08-20 19:28:38 +0000960 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
961 : public RegReductionPriorityQueue<SF> {
Evan Cheng5924bf72007-09-25 01:54:36 +0000962 // SUnitMap SDNode to SUnit mapping (n -> n).
963 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000964
Evan Chengd38c22b2006-05-11 23:55:42 +0000965 // SUnits - The SUnits for the current graph.
966 const std::vector<SUnit> *SUnits;
967
968 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +0000969 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +0000970
971 public:
972 TDRegReductionPriorityQueue() {}
973
Evan Cheng5924bf72007-09-25 01:54:36 +0000974 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000975 std::vector<SUnit> &sunits) {
976 SUnitMap = &sumap;
Evan Chengd38c22b2006-05-11 23:55:42 +0000977 SUnits = &sunits;
978 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +0000979 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +0000980 }
981
Evan Cheng5924bf72007-09-25 01:54:36 +0000982 void addNode(const SUnit *SU) {
983 SethiUllmanNumbers.resize(SUnits->size(), 0);
984 CalcNodeSethiUllmanNumber(SU);
985 }
986
987 void updateNode(const SUnit *SU) {
988 SethiUllmanNumbers[SU->NodeNum] = 0;
989 CalcNodeSethiUllmanNumber(SU);
990 }
991
Evan Chengd38c22b2006-05-11 23:55:42 +0000992 void releaseState() {
993 SUnits = 0;
994 SethiUllmanNumbers.clear();
995 }
996
Evan Cheng6730f032007-01-08 23:55:53 +0000997 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +0000998 assert(SU->NodeNum < SethiUllmanNumbers.size());
999 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001000 }
1001
1002 private:
Evan Cheng6730f032007-01-08 23:55:53 +00001003 void CalculateSethiUllmanNumbers();
1004 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001005 };
1006}
1007
Evan Chengb9e3db62007-03-14 22:43:40 +00001008/// closestSucc - Returns the scheduled cycle of the successor which is
1009/// closet to the current cycle.
Evan Cheng28748552007-03-13 23:25:11 +00001010static unsigned closestSucc(const SUnit *SU) {
1011 unsigned MaxCycle = 0;
1012 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
Evan Chengb9e3db62007-03-14 22:43:40 +00001013 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001014 unsigned Cycle = I->Dep->Cycle;
Evan Chengb9e3db62007-03-14 22:43:40 +00001015 // If there are bunch of CopyToRegs stacked up, they should be considered
1016 // to be at the same position.
Evan Cheng8e136a92007-09-26 21:36:17 +00001017 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
Evan Cheng0effc3a2007-09-19 01:38:40 +00001018 Cycle = closestSucc(I->Dep)+1;
Evan Chengb9e3db62007-03-14 22:43:40 +00001019 if (Cycle > MaxCycle)
1020 MaxCycle = Cycle;
1021 }
Evan Cheng28748552007-03-13 23:25:11 +00001022 return MaxCycle;
1023}
1024
Evan Chengb9e3db62007-03-14 22:43:40 +00001025/// calcMaxScratches - Returns an cost estimate of the worse case requirement
1026/// for scratch registers. Live-in operands and live-out results don't count
1027/// since they are "fixed".
1028static unsigned calcMaxScratches(const SUnit *SU) {
1029 unsigned Scratches = 0;
1030 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1031 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001032 if (I->isCtrl) continue; // ignore chain preds
Evan Cheng8e136a92007-09-26 21:36:17 +00001033 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
Evan Chengb9e3db62007-03-14 22:43:40 +00001034 Scratches++;
1035 }
1036 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1037 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001038 if (I->isCtrl) continue; // ignore chain succs
Evan Cheng8e136a92007-09-26 21:36:17 +00001039 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
Evan Chengb9e3db62007-03-14 22:43:40 +00001040 Scratches += 10;
1041 }
1042 return Scratches;
1043}
1044
Evan Chengd38c22b2006-05-11 23:55:42 +00001045// Bottom up
1046bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
David Greene4c1e6f32007-06-29 03:42:23 +00001047 // There used to be a special tie breaker here that looked for
David Greene5b6f7552007-06-29 02:48:09 +00001048 // two-address instructions and preferred the instruction with a
1049 // def&use operand. The special case triggered diagnostics when
1050 // _GLIBCXX_DEBUG was enabled because it broke the strict weak
1051 // ordering that priority_queue requires. It didn't help much anyway
1052 // because AddPseudoTwoAddrDeps already covers many of the cases
1053 // where it would have applied. In addition, it's counter-intuitive
1054 // that a tie breaker would be the first thing attempted. There's a
1055 // "real" tie breaker below that is the operation of last resort.
1056 // The fact that the "special tie breaker" would trigger when there
1057 // wasn't otherwise a tie is what broke the strict weak ordering
1058 // constraint.
Evan Cheng99f2f792006-05-13 08:22:24 +00001059
Evan Cheng6730f032007-01-08 23:55:53 +00001060 unsigned LPriority = SPQ->getNodePriority(left);
1061 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng961bbd32007-01-08 23:50:38 +00001062 if (LPriority > RPriority)
Evan Chengd38c22b2006-05-11 23:55:42 +00001063 return true;
Evan Cheng28748552007-03-13 23:25:11 +00001064 else if (LPriority == RPriority) {
Dan Gohmane131e3a2007-04-26 19:40:56 +00001065 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
Evan Cheng28748552007-03-13 23:25:11 +00001066 // e.g.
1067 // t1 = op t2, c1
1068 // t3 = op t4, c2
1069 //
1070 // and the following instructions are both ready.
1071 // t2 = op c3
1072 // t4 = op c4
1073 //
1074 // Then schedule t2 = op first.
1075 // i.e.
1076 // t4 = op c4
1077 // t2 = op c3
1078 // t1 = op t2, c1
1079 // t3 = op t4, c2
1080 //
1081 // This creates more short live intervals.
1082 unsigned LDist = closestSucc(left);
1083 unsigned RDist = closestSucc(right);
1084 if (LDist < RDist)
Evan Chengd38c22b2006-05-11 23:55:42 +00001085 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001086 else if (LDist == RDist) {
1087 // Intuitively, it's good to push down instructions whose results are
1088 // liveout so their long live ranges won't conflict with other values
1089 // which are needed inside the BB. Further prioritize liveout instructions
1090 // by the number of operands which are calculated within the BB.
1091 unsigned LScratch = calcMaxScratches(left);
1092 unsigned RScratch = calcMaxScratches(right);
1093 if (LScratch > RScratch)
Evan Chengd38c22b2006-05-11 23:55:42 +00001094 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001095 else if (LScratch == RScratch)
1096 if (left->Height > right->Height)
Evan Cheng99f2f792006-05-13 08:22:24 +00001097 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001098 else if (left->Height == right->Height)
1099 if (left->Depth < right->Depth)
Evan Cheng28748552007-03-13 23:25:11 +00001100 return true;
Evan Chengb9e3db62007-03-14 22:43:40 +00001101 else if (left->Depth == right->Depth)
1102 if (left->CycleBound > right->CycleBound)
1103 return true;
1104 }
Evan Cheng28748552007-03-13 23:25:11 +00001105 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001106 return false;
1107}
1108
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001109template<class SF>
1110bool BURegReductionPriorityQueue<SF>::canClobber(SUnit *SU, SUnit *Op) {
1111 if (SU->isTwoAddress) {
1112 unsigned Opc = SU->Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001113 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001114 unsigned NumOps = ScheduleDAG::CountOperands(SU->Node);
1115 for (unsigned i = 0; i != NumOps; ++i) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001116 if (TII->getOperandConstraint(Opc, i+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001117 SDNode *DU = SU->Node->getOperand(i).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001118 if (Op == (*SUnitMap)[DU][SU->InstanceNo])
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001119 return true;
1120 }
1121 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001122 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001123 return false;
1124}
1125
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001126
Evan Chengd38c22b2006-05-11 23:55:42 +00001127/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1128/// it as a def&use operand. Add a pseudo control edge from it to the other
1129/// node (if it won't create a cycle) so the two-address one will be scheduled
1130/// first (lower in the schedule).
1131template<class SF>
1132void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001133 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1134 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1135 if (!SU->isTwoAddress)
1136 continue;
1137
1138 SDNode *Node = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +00001139 if (!Node || !Node->isTargetOpcode())
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001140 continue;
1141
1142 unsigned Opc = Node->getTargetOpcode();
Evan Cheng100c8d62007-09-13 00:06:00 +00001143 unsigned NumRes = TII->getNumDefs(Opc);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001144 unsigned NumOps = ScheduleDAG::CountOperands(Node);
1145 for (unsigned j = 0; j != NumOps; ++j) {
Evan Cheng67fc1412006-12-01 21:52:58 +00001146 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) != -1) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001147 SDNode *DU = SU->Node->getOperand(j).Val;
Evan Cheng5924bf72007-09-25 01:54:36 +00001148 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
Evan Chengf24d15f2006-11-06 21:33:46 +00001149 if (!DUSU) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001150 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
1151 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001152 if (I->isCtrl) continue;
1153 SUnit *SuccSU = I->Dep;
Evan Cheng5924bf72007-09-25 01:54:36 +00001154 // Don't constraint nodes with implicit defs. It can create cycles
1155 // plus it may increase register pressures.
1156 if (SuccSU == SU || SuccSU->hasImplicitDefs)
1157 continue;
1158 // Be conservative. Ignore if nodes aren't at the same depth.
1159 if (SuccSU->Depth != SU->Depth)
1160 continue;
1161 if ((!canClobber(SuccSU, DUSU) ||
1162 (!SU->isCommutable && SuccSU->isCommutable)) &&
1163 !isReachable(SuccSU, SU)) {
1164 DOUT << "Adding an edge from SU # " << SU->NodeNum
1165 << " to SU #" << SuccSU->NodeNum << "\n";
1166 SU->addPred(SuccSU, true, true);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001167 }
1168 }
1169 }
1170 }
1171 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001172}
1173
Evan Cheng6730f032007-01-08 23:55:53 +00001174/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001175/// Smaller number is the higher priority.
1176template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001177unsigned BURegReductionPriorityQueue<SF>::
1178CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001179 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001180 if (SethiUllmanNumber != 0)
1181 return SethiUllmanNumber;
1182
Evan Cheng961bbd32007-01-08 23:50:38 +00001183 unsigned Extra = 0;
1184 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1185 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001186 if (I->isCtrl) continue; // ignore chain preds
1187 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001188 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Cheng961bbd32007-01-08 23:50:38 +00001189 if (PredSethiUllman > SethiUllmanNumber) {
1190 SethiUllmanNumber = PredSethiUllman;
1191 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001192 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001193 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001194 }
Evan Cheng961bbd32007-01-08 23:50:38 +00001195
1196 SethiUllmanNumber += Extra;
1197
1198 if (SethiUllmanNumber == 0)
1199 SethiUllmanNumber = 1;
Evan Chengd38c22b2006-05-11 23:55:42 +00001200
1201 return SethiUllmanNumber;
1202}
1203
Evan Cheng6730f032007-01-08 23:55:53 +00001204/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1205/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001206template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001207void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001208 SethiUllmanNumbers.assign(SUnits->size(), 0);
1209
1210 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001211 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001212}
1213
1214static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
1215 unsigned Sum = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001216 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1217 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001218 SUnit *SuccSU = I->Dep;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001219 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1220 EE = SuccSU->Preds.end(); II != EE; ++II) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001221 SUnit *PredSU = II->Dep;
Evan Chengd38c22b2006-05-11 23:55:42 +00001222 if (!PredSU->isScheduled)
Evan Cheng5924bf72007-09-25 01:54:36 +00001223 ++Sum;
Evan Chengd38c22b2006-05-11 23:55:42 +00001224 }
1225 }
1226
1227 return Sum;
1228}
1229
1230
1231// Top down
1232bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00001233 unsigned LPriority = SPQ->getNodePriority(left);
1234 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng8e136a92007-09-26 21:36:17 +00001235 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1236 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
Evan Chengd38c22b2006-05-11 23:55:42 +00001237 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1238 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1239 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
1240 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
1241
1242 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1243 return false;
1244 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1245 return true;
1246
1247 // Special tie breaker: if two nodes share a operand, the one that use it
1248 // as a def&use operand is preferred.
1249 if (LIsTarget && RIsTarget) {
1250 if (left->isTwoAddress && !right->isTwoAddress) {
1251 SDNode *DUNode = left->Node->getOperand(0).Val;
1252 if (DUNode->isOperand(right->Node))
1253 RBonus += 2;
1254 }
1255 if (!left->isTwoAddress && right->isTwoAddress) {
1256 SDNode *DUNode = right->Node->getOperand(0).Val;
1257 if (DUNode->isOperand(left->Node))
1258 LBonus += 2;
1259 }
1260 }
1261 if (LIsFloater)
1262 LBonus -= 2;
1263 if (RIsFloater)
1264 RBonus -= 2;
1265 if (left->NumSuccs == 1)
1266 LBonus += 2;
1267 if (right->NumSuccs == 1)
1268 RBonus += 2;
1269
1270 if (LPriority+LBonus < RPriority+RBonus)
1271 return true;
1272 else if (LPriority == RPriority)
1273 if (left->Depth < right->Depth)
1274 return true;
1275 else if (left->Depth == right->Depth)
1276 if (left->NumSuccsLeft > right->NumSuccsLeft)
1277 return true;
1278 else if (left->NumSuccsLeft == right->NumSuccsLeft)
1279 if (left->CycleBound > right->CycleBound)
1280 return true;
1281 return false;
1282}
1283
Evan Cheng6730f032007-01-08 23:55:53 +00001284/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
Evan Chengd38c22b2006-05-11 23:55:42 +00001285/// Smaller number is the higher priority.
1286template<class SF>
Chris Lattner296a83c2007-02-01 04:55:59 +00001287unsigned TDRegReductionPriorityQueue<SF>::
1288CalcNodeSethiUllmanNumber(const SUnit *SU) {
Evan Cheng961bbd32007-01-08 23:50:38 +00001289 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001290 if (SethiUllmanNumber != 0)
1291 return SethiUllmanNumber;
1292
Evan Cheng8e136a92007-09-26 21:36:17 +00001293 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001294 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
Evan Cheng961bbd32007-01-08 23:50:38 +00001295 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001296 else if (SU->NumSuccsLeft == 0)
1297 // If SU does not have a use, i.e. it doesn't produce a value that would
1298 // be consumed (e.g. store), then it terminates a chain of computation.
Chris Lattner296a83c2007-02-01 04:55:59 +00001299 // Give it a small SethiUllman number so it will be scheduled right before
1300 // its predecessors that it doesn't lengthen their live ranges.
Evan Cheng961bbd32007-01-08 23:50:38 +00001301 SethiUllmanNumber = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001302 else if (SU->NumPredsLeft == 0 &&
1303 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
Evan Cheng961bbd32007-01-08 23:50:38 +00001304 SethiUllmanNumber = 0xffff;
Evan Chengd38c22b2006-05-11 23:55:42 +00001305 else {
1306 int Extra = 0;
Chris Lattnerd86418a2006-08-17 00:09:56 +00001307 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1308 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001309 if (I->isCtrl) continue; // ignore chain preds
1310 SUnit *PredSU = I->Dep;
Evan Cheng6730f032007-01-08 23:55:53 +00001311 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001312 if (PredSethiUllman > SethiUllmanNumber) {
1313 SethiUllmanNumber = PredSethiUllman;
1314 Extra = 0;
Evan Cheng0effc3a2007-09-19 01:38:40 +00001315 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
Evan Cheng5924bf72007-09-25 01:54:36 +00001316 ++Extra;
Evan Chengd38c22b2006-05-11 23:55:42 +00001317 }
1318
1319 SethiUllmanNumber += Extra;
1320 }
1321
1322 return SethiUllmanNumber;
1323}
1324
Evan Cheng6730f032007-01-08 23:55:53 +00001325/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1326/// scheduling units.
Evan Chengd38c22b2006-05-11 23:55:42 +00001327template<class SF>
Evan Cheng6730f032007-01-08 23:55:53 +00001328void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001329 SethiUllmanNumbers.assign(SUnits->size(), 0);
1330
1331 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng6730f032007-01-08 23:55:53 +00001332 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001333}
1334
1335//===----------------------------------------------------------------------===//
1336// Public Constructor Functions
1337//===----------------------------------------------------------------------===//
1338
Jim Laskey03593f72006-08-01 18:29:48 +00001339llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1340 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001341 MachineBasicBlock *BB) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001342 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
Jim Laskey95eda5b2006-08-01 14:21:23 +00001343 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true,
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001344 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII));
Evan Chengd38c22b2006-05-11 23:55:42 +00001345}
1346
Jim Laskey03593f72006-08-01 18:29:48 +00001347llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1348 SelectionDAG *DAG,
Evan Chengd38c22b2006-05-11 23:55:42 +00001349 MachineBasicBlock *BB) {
Jim Laskey95eda5b2006-08-01 14:21:23 +00001350 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
Chris Lattner296a83c2007-02-01 04:55:59 +00001351 new TDRegReductionPriorityQueue<td_ls_rr_sort>());
Evan Chengd38c22b2006-05-11 23:55:42 +00001352}
1353