blob: 0cf155ea4d4fadd8d38785458cbd1f2d6671cd2e [file] [log] [blame]
Dan Gohman23785a12008-08-12 17:42:33 +00001//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
Evan Chengd38c22b2006-05-11 23:55:42 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Evan Chengd38c22b2006-05-11 23:55:42 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
Dale Johannesen2182f062007-07-13 17:13:54 +000018#define DEBUG_TYPE "pre-RA-sched"
Evan Chengd38c22b2006-05-11 23:55:42 +000019#include "llvm/CodeGen/ScheduleDAG.h"
Jim Laskey29e635d2006-08-02 12:30:23 +000020#include "llvm/CodeGen/SchedulerRegistry.h"
Dan Gohman3a4be0f2008-02-10 18:45:23 +000021#include "llvm/Target/TargetRegisterInfo.h"
Owen Anderson8c2c1e92006-05-12 06:33:49 +000022#include "llvm/Target/TargetData.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000023#include "llvm/Target/TargetMachine.h"
24#include "llvm/Target/TargetInstrInfo.h"
25#include "llvm/Support/Debug.h"
Chris Lattner3d27be12006-08-27 12:54:02 +000026#include "llvm/Support/Compiler.h"
Dan Gohmana4db3352008-06-21 18:35:25 +000027#include "llvm/ADT/BitVector.h"
28#include "llvm/ADT/PriorityQueue.h"
Evan Chenge6f92252007-09-27 18:46:06 +000029#include "llvm/ADT/SmallPtrSet.h"
Evan Cheng5924bf72007-09-25 01:54:36 +000030#include "llvm/ADT/SmallSet.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000031#include "llvm/ADT/Statistic.h"
Roman Levenstein6b371142008-04-29 09:07:59 +000032#include "llvm/ADT/STLExtras.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000033#include <climits>
Evan Chengd38c22b2006-05-11 23:55:42 +000034#include "llvm/Support/CommandLine.h"
35using namespace llvm;
36
Dan Gohmanfd227e92008-03-25 17:10:29 +000037STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
Evan Cheng79e97132007-10-05 01:39:18 +000038STATISTIC(NumUnfolds, "Number of nodes unfolded");
Evan Cheng1ec79b42007-09-27 07:09:03 +000039STATISTIC(NumDups, "Number of duplicated nodes");
40STATISTIC(NumCCCopies, "Number of cross class copies");
41
Jim Laskey95eda5b2006-08-01 14:21:23 +000042static RegisterScheduler
43 burrListDAGScheduler("list-burr",
44 " Bottom-up register reduction list scheduling",
45 createBURRListDAGScheduler);
46static RegisterScheduler
47 tdrListrDAGScheduler("list-tdrr",
48 " Top-down register reduction list scheduling",
49 createTDRRListDAGScheduler);
50
Evan Chengd38c22b2006-05-11 23:55:42 +000051namespace {
Evan Chengd38c22b2006-05-11 23:55:42 +000052//===----------------------------------------------------------------------===//
53/// ScheduleDAGRRList - The actual register reduction list scheduler
54/// implementation. This supports both top-down and bottom-up scheduling.
55///
Chris Lattnere097e6f2006-06-28 22:17:39 +000056class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
Evan Chengd38c22b2006-05-11 23:55:42 +000057private:
58 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
59 /// it is top-down.
60 bool isBottomUp;
Evan Cheng2c977312008-07-01 18:05:03 +000061
Evan Cheng7e4abde2008-07-02 09:23:51 +000062 /// Fast - True if we are performing fast scheduling.
63 ///
Evan Cheng2c977312008-07-01 18:05:03 +000064 bool Fast;
Evan Chengd38c22b2006-05-11 23:55:42 +000065
66 /// AvailableQueue - The priority queue to use for the available SUnits.
Evan Chengd38c22b2006-05-11 23:55:42 +000067 SchedulingPriorityQueue *AvailableQueue;
68
Evan Cheng5924bf72007-09-25 01:54:36 +000069 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
70 /// that are "live". These nodes must be scheduled before any other nodes that
71 /// modifies the registers can be scheduled.
72 SmallSet<unsigned, 4> LiveRegs;
73 std::vector<SUnit*> LiveRegDefs;
74 std::vector<unsigned> LiveRegCycles;
75
Evan Chengd38c22b2006-05-11 23:55:42 +000076public:
77 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
Evan Cheng2c977312008-07-01 18:05:03 +000078 const TargetMachine &tm, bool isbottomup, bool f,
79 SchedulingPriorityQueue *availqueue)
80 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
Evan Chengd38c22b2006-05-11 23:55:42 +000081 AvailableQueue(availqueue) {
82 }
83
84 ~ScheduleDAGRRList() {
85 delete AvailableQueue;
86 }
87
88 void Schedule();
89
Roman Levenstein733a4d62008-03-26 11:23:38 +000090 /// IsReachable - Checks if SU is reachable from TargetSU.
Dan Gohmane955c482008-08-05 14:45:15 +000091 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +000092
93 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
94 /// create a cycle.
95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
96
97 /// AddPred - This adds the specified node X as a predecessor of
98 /// the current node Y if not already.
Roman Levenstein733a4d62008-03-26 11:23:38 +000099 /// This returns true if this is a new predecessor.
100 /// Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000101 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
Roman Levenstein733a4d62008-03-26 11:23:38 +0000102 unsigned PhyReg = 0, int Cost = 1);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000103
Roman Levenstein733a4d62008-03-26 11:23:38 +0000104 /// RemovePred - This removes the specified node N from the predecessors of
105 /// the current node M. Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000106 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
107
Evan Chengd38c22b2006-05-11 23:55:42 +0000108private:
Evan Cheng8e136a92007-09-26 21:36:17 +0000109 void ReleasePred(SUnit*, bool, unsigned);
110 void ReleaseSucc(SUnit*, bool isChain, unsigned);
111 void CapturePred(SUnit*, SUnit*, bool);
112 void ScheduleNodeBottomUp(SUnit*, unsigned);
113 void ScheduleNodeTopDown(SUnit*, unsigned);
114 void UnscheduleNodeBottomUp(SUnit*);
115 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
116 SUnit *CopyAndMoveSuccessors(SUnit*);
Evan Cheng1ec79b42007-09-27 07:09:03 +0000117 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
Evan Cheng8e136a92007-09-26 21:36:17 +0000118 const TargetRegisterClass*,
Evan Cheng1ec79b42007-09-27 07:09:03 +0000119 const TargetRegisterClass*,
120 SmallVector<SUnit*, 2>&);
121 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
Evan Chengd38c22b2006-05-11 23:55:42 +0000122 void ListScheduleTopDown();
123 void ListScheduleBottomUp();
Evan Chengafed73e2006-05-12 01:58:24 +0000124 void CommuteNodesToReducePressure();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000125
126
127 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
Roman Levenstein733a4d62008-03-26 11:23:38 +0000128 /// Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000129 SUnit *CreateNewSUnit(SDNode *N) {
130 SUnit *NewNode = NewSUnit(N);
Roman Levenstein733a4d62008-03-26 11:23:38 +0000131 // Update the topological ordering.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000132 if (NewNode->NodeNum >= Node2Index.size())
133 InitDAGTopologicalSorting();
134 return NewNode;
135 }
136
Roman Levenstein733a4d62008-03-26 11:23:38 +0000137 /// CreateClone - Creates a new SUnit from an existing one.
138 /// Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000139 SUnit *CreateClone(SUnit *N) {
140 SUnit *NewNode = Clone(N);
Roman Levenstein733a4d62008-03-26 11:23:38 +0000141 // Update the topological ordering.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000142 if (NewNode->NodeNum >= Node2Index.size())
143 InitDAGTopologicalSorting();
144 return NewNode;
145 }
146
147 /// Functions for preserving the topological ordering
148 /// even after dynamic insertions of new edges.
Roman Levenstein733a4d62008-03-26 11:23:38 +0000149 /// This allows a very fast implementation of IsReachable.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000150
Roman Levenstein733a4d62008-03-26 11:23:38 +0000151 /// InitDAGTopologicalSorting - create the initial topological
152 /// ordering from the DAG to be scheduled.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000153 void InitDAGTopologicalSorting();
154
155 /// DFS - make a DFS traversal and mark all nodes affected by the
Roman Levenstein733a4d62008-03-26 11:23:38 +0000156 /// edge insertion. These nodes will later get new topological indexes
157 /// by means of the Shift method.
Dan Gohmane955c482008-08-05 14:45:15 +0000158 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000159
160 /// Shift - reassign topological indexes for the nodes in the DAG
Roman Levenstein733a4d62008-03-26 11:23:38 +0000161 /// to preserve the topological ordering.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000162 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
163
Roman Levenstein733a4d62008-03-26 11:23:38 +0000164 /// Allocate - assign the topological index to the node n.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000165 void Allocate(int n, int index);
166
Roman Levenstein733a4d62008-03-26 11:23:38 +0000167 /// Index2Node - Maps topological index to the node number.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000168 std::vector<int> Index2Node;
Roman Levenstein733a4d62008-03-26 11:23:38 +0000169 /// Node2Index - Maps the node number to its topological index.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000170 std::vector<int> Node2Index;
Roman Levenstein733a4d62008-03-26 11:23:38 +0000171 /// Visited - a set of nodes visited during a DFS traversal.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000172 BitVector Visited;
Evan Chengd38c22b2006-05-11 23:55:42 +0000173};
174} // end anonymous namespace
175
176
177/// Schedule - Schedule the DAG using list scheduling.
178void ScheduleDAGRRList::Schedule() {
Bill Wendling22e978a2006-12-07 20:04:42 +0000179 DOUT << "********** List Scheduling **********\n";
Evan Cheng5924bf72007-09-25 01:54:36 +0000180
Dan Gohman3a4be0f2008-02-10 18:45:23 +0000181 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
182 LiveRegCycles.resize(TRI->getNumRegs(), 0);
Evan Cheng5924bf72007-09-25 01:54:36 +0000183
Evan Chengd38c22b2006-05-11 23:55:42 +0000184 // Build scheduling units.
185 BuildSchedUnits();
186
Evan Chengd38c22b2006-05-11 23:55:42 +0000187 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
Chris Lattnerd86418a2006-08-17 00:09:56 +0000188 SUnits[su].dumpAll(&DAG));
Evan Cheng2c977312008-07-01 18:05:03 +0000189 if (!Fast) {
190 CalculateDepths();
191 CalculateHeights();
192 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000193 InitDAGTopologicalSorting();
Evan Chengd38c22b2006-05-11 23:55:42 +0000194
Dan Gohman46520a22008-06-21 19:18:17 +0000195 AvailableQueue->initNodes(SUnits);
Dan Gohman54a187e2007-08-20 19:28:38 +0000196
Evan Chengd38c22b2006-05-11 23:55:42 +0000197 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
198 if (isBottomUp)
199 ListScheduleBottomUp();
200 else
201 ListScheduleTopDown();
202
203 AvailableQueue->releaseState();
Evan Cheng2c977312008-07-01 18:05:03 +0000204
205 if (!Fast)
206 CommuteNodesToReducePressure();
Evan Chengd38c22b2006-05-11 23:55:42 +0000207}
208
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000209/// CommuteNodesToReducePressure - If a node is two-address and commutable, and
Evan Chengafed73e2006-05-12 01:58:24 +0000210/// it is not the last use of its first operand, add it to the CommuteSet if
211/// possible. It will be commuted when it is translated to a MI.
212void ScheduleDAGRRList::CommuteNodesToReducePressure() {
Evan Chenge3c44192007-06-22 01:35:51 +0000213 SmallPtrSet<SUnit*, 4> OperandSeen;
Dan Gohman4370f262008-04-15 01:22:18 +0000214 for (unsigned i = Sequence.size(); i != 0; ) {
215 --i;
Evan Chengafed73e2006-05-12 01:58:24 +0000216 SUnit *SU = Sequence[i];
Evan Cheng8e136a92007-09-26 21:36:17 +0000217 if (!SU || !SU->Node) continue;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000218 if (SU->isCommutable) {
Dan Gohman17059682008-07-17 19:10:17 +0000219 unsigned Opc = SU->Node->getMachineOpcode();
Chris Lattner03ad8852008-01-07 07:27:27 +0000220 const TargetInstrDesc &TID = TII->get(Opc);
Chris Lattnerfd2e3382008-01-07 06:47:00 +0000221 unsigned NumRes = TID.getNumDefs();
Dan Gohman0340d1e2008-02-15 20:50:13 +0000222 unsigned NumOps = TID.getNumOperands() - NumRes;
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000223 for (unsigned j = 0; j != NumOps; ++j) {
Chris Lattnerfd2e3382008-01-07 06:47:00 +0000224 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000225 continue;
226
Gabor Greiff304a7a2008-08-28 21:40:38 +0000227 SDNode *OpN = SU->Node->getOperand(j).getNode();
Dan Gohman46520a22008-06-21 19:18:17 +0000228 SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000229 if (OpSU && OperandSeen.count(OpSU) == 1) {
230 // Ok, so SU is not the last use of OpSU, but SU is two-address so
231 // it will clobber OpSU. Try to commute SU if no other source operands
232 // are live below.
233 bool DoCommute = true;
234 for (unsigned k = 0; k < NumOps; ++k) {
235 if (k != j) {
Gabor Greiff304a7a2008-08-28 21:40:38 +0000236 OpN = SU->Node->getOperand(k).getNode();
Dan Gohman46520a22008-06-21 19:18:17 +0000237 OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000238 if (OpSU && OperandSeen.count(OpSU) == 1) {
239 DoCommute = false;
240 break;
241 }
242 }
Evan Chengafed73e2006-05-12 01:58:24 +0000243 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000244 if (DoCommute)
245 CommuteSet.insert(SU->Node);
Evan Chengafed73e2006-05-12 01:58:24 +0000246 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +0000247
248 // Only look at the first use&def node for now.
249 break;
Evan Chengafed73e2006-05-12 01:58:24 +0000250 }
251 }
252
Chris Lattnerd86418a2006-08-17 00:09:56 +0000253 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
254 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000255 if (!I->isCtrl)
Dan Gohmane6e13482008-06-21 15:52:51 +0000256 OperandSeen.insert(I->Dep->OrigNode);
Evan Chengafed73e2006-05-12 01:58:24 +0000257 }
258 }
259}
Evan Chengd38c22b2006-05-11 23:55:42 +0000260
261//===----------------------------------------------------------------------===//
262// Bottom-Up Scheduling
263//===----------------------------------------------------------------------===//
264
Evan Chengd38c22b2006-05-11 23:55:42 +0000265/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000266/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +0000267void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
268 unsigned CurCycle) {
269 // FIXME: the distance between two nodes is not always == the predecessor's
270 // latency. For example, the reader can very well read the register written
271 // by the predecessor later than the issue cycle. It also depends on the
272 // interrupt model (drain vs. freeze).
273 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
274
Evan Cheng038dcc52007-09-28 19:24:24 +0000275 --PredSU->NumSuccsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +0000276
277#ifndef NDEBUG
Evan Cheng038dcc52007-09-28 19:24:24 +0000278 if (PredSU->NumSuccsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000279 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000280 PredSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +0000281 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +0000282 assert(0);
283 }
284#endif
285
Evan Cheng038dcc52007-09-28 19:24:24 +0000286 if (PredSU->NumSuccsLeft == 0) {
Dan Gohman4370f262008-04-15 01:22:18 +0000287 PredSU->isAvailable = true;
288 AvailableQueue->push(PredSU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000289 }
290}
291
292/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
293/// count of its predecessors. If a predecessor pending count is zero, add it to
294/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +0000295void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +0000296 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +0000297 DEBUG(SU->dump(&DAG));
298 SU->Cycle = CurCycle;
299
300 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +0000301
302 // Bottom up: release predecessors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000303 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Cheng5924bf72007-09-25 01:54:36 +0000304 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +0000305 ReleasePred(I->Dep, I->isCtrl, CurCycle);
Evan Cheng5924bf72007-09-25 01:54:36 +0000306 if (I->Cost < 0) {
307 // This is a physical register dependency and it's impossible or
308 // expensive to copy the register. Make sure nothing that can
309 // clobber the register is scheduled between the predecessor and
310 // this node.
311 if (LiveRegs.insert(I->Reg)) {
312 LiveRegDefs[I->Reg] = I->Dep;
313 LiveRegCycles[I->Reg] = CurCycle;
314 }
315 }
316 }
317
318 // Release all the implicit physical register defs that are live.
319 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
320 I != E; ++I) {
321 if (I->Cost < 0) {
322 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
323 LiveRegs.erase(I->Reg);
324 assert(LiveRegDefs[I->Reg] == SU &&
325 "Physical register dependency violated?");
326 LiveRegDefs[I->Reg] = NULL;
327 LiveRegCycles[I->Reg] = 0;
328 }
329 }
330 }
331
Evan Chengd38c22b2006-05-11 23:55:42 +0000332 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +0000333}
334
Evan Cheng5924bf72007-09-25 01:54:36 +0000335/// CapturePred - This does the opposite of ReleasePred. Since SU is being
336/// unscheduled, incrcease the succ left count of its predecessors. Remove
337/// them from AvailableQueue if necessary.
Roman Levenstein6b371142008-04-29 09:07:59 +0000338void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
339 unsigned CycleBound = 0;
Evan Cheng5924bf72007-09-25 01:54:36 +0000340 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
341 I != E; ++I) {
342 if (I->Dep == SU)
343 continue;
Roman Levenstein6b371142008-04-29 09:07:59 +0000344 CycleBound = std::max(CycleBound,
345 I->Dep->Cycle + PredSU->Latency);
Evan Cheng5924bf72007-09-25 01:54:36 +0000346 }
347
348 if (PredSU->isAvailable) {
349 PredSU->isAvailable = false;
350 if (!PredSU->isPending)
351 AvailableQueue->remove(PredSU);
352 }
353
Roman Levenstein6b371142008-04-29 09:07:59 +0000354 PredSU->CycleBound = CycleBound;
Evan Cheng038dcc52007-09-28 19:24:24 +0000355 ++PredSU->NumSuccsLeft;
Evan Cheng5924bf72007-09-25 01:54:36 +0000356}
357
358/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
359/// its predecessor states to reflect the change.
360void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
361 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
362 DEBUG(SU->dump(&DAG));
363
364 AvailableQueue->UnscheduledNode(SU);
365
366 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
367 I != E; ++I) {
368 CapturePred(I->Dep, SU, I->isCtrl);
369 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
370 LiveRegs.erase(I->Reg);
371 assert(LiveRegDefs[I->Reg] == I->Dep &&
372 "Physical register dependency violated?");
373 LiveRegDefs[I->Reg] = NULL;
374 LiveRegCycles[I->Reg] = 0;
375 }
376 }
377
378 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
379 I != E; ++I) {
380 if (I->Cost < 0) {
381 if (LiveRegs.insert(I->Reg)) {
382 assert(!LiveRegDefs[I->Reg] &&
383 "Physical register dependency violated?");
384 LiveRegDefs[I->Reg] = SU;
385 }
386 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
387 LiveRegCycles[I->Reg] = I->Dep->Cycle;
388 }
389 }
390
391 SU->Cycle = 0;
392 SU->isScheduled = false;
393 SU->isAvailable = true;
394 AvailableQueue->push(SU);
395}
396
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000397/// IsReachable - Checks if SU is reachable from TargetSU.
Dan Gohmane955c482008-08-05 14:45:15 +0000398bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) {
Roman Levenstein733a4d62008-03-26 11:23:38 +0000399 // If insertion of the edge SU->TargetSU would create a cycle
400 // then there is a path from TargetSU to SU.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000401 int UpperBound, LowerBound;
402 LowerBound = Node2Index[TargetSU->NodeNum];
403 UpperBound = Node2Index[SU->NodeNum];
404 bool HasLoop = false;
405 // Is Ord(TargetSU) < Ord(SU) ?
406 if (LowerBound < UpperBound) {
407 Visited.reset();
408 // There may be a path from TargetSU to SU. Check for it.
409 DFS(TargetSU, UpperBound, HasLoop);
Evan Chengcfd5f822007-09-27 00:25:29 +0000410 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000411 return HasLoop;
Evan Chengcfd5f822007-09-27 00:25:29 +0000412}
413
Roman Levenstein733a4d62008-03-26 11:23:38 +0000414/// Allocate - assign the topological index to the node n.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000415inline void ScheduleDAGRRList::Allocate(int n, int index) {
416 Node2Index[n] = index;
417 Index2Node[index] = n;
Evan Chengcfd5f822007-09-27 00:25:29 +0000418}
419
Roman Levenstein733a4d62008-03-26 11:23:38 +0000420/// InitDAGTopologicalSorting - create the initial topological
421/// ordering from the DAG to be scheduled.
Evan Cheng2c977312008-07-01 18:05:03 +0000422
423/// The idea of the algorithm is taken from
424/// "Online algorithms for managing the topological order of
425/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
426/// This is the MNR algorithm, which was first introduced by
427/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
428/// "Maintaining a topological order under edge insertions".
429///
430/// Short description of the algorithm:
431///
432/// Topological ordering, ord, of a DAG maps each node to a topological
433/// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
434///
435/// This means that if there is a path from the node X to the node Z,
436/// then ord(X) < ord(Z).
437///
438/// This property can be used to check for reachability of nodes:
439/// if Z is reachable from X, then an insertion of the edge Z->X would
440/// create a cycle.
441///
442/// The algorithm first computes a topological ordering for the DAG by
443/// initializing the Index2Node and Node2Index arrays and then tries to keep
444/// the ordering up-to-date after edge insertions by reordering the DAG.
445///
446/// On insertion of the edge X->Y, the algorithm first marks by calling DFS
447/// the nodes reachable from Y, and then shifts them using Shift to lie
448/// immediately after X in Index2Node.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000449void ScheduleDAGRRList::InitDAGTopologicalSorting() {
450 unsigned DAGSize = SUnits.size();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000451 std::vector<SUnit*> WorkList;
452 WorkList.reserve(DAGSize);
Dan Gohman3a3a52d2008-08-27 16:29:48 +0000453
454 Index2Node.resize(DAGSize);
455 Node2Index.resize(DAGSize);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000456
Roman Levenstein733a4d62008-03-26 11:23:38 +0000457 // Initialize the data structures.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000458 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
459 SUnit *SU = &SUnits[i];
460 int NodeNum = SU->NodeNum;
461 unsigned Degree = SU->Succs.size();
Dan Gohman3a3a52d2008-08-27 16:29:48 +0000462 // Temporarily use the Node2Index array as scratch space for degree counts.
463 Node2Index[NodeNum] = Degree;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000464
465 // Is it a node without dependencies?
466 if (Degree == 0) {
467 assert(SU->Succs.empty() && "SUnit should have no successors");
Roman Levenstein733a4d62008-03-26 11:23:38 +0000468 // Collect leaf nodes.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000469 WorkList.push_back(SU);
470 }
471 }
472
Dan Gohman3a3a52d2008-08-27 16:29:48 +0000473 int Id = DAGSize;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000474 while (!WorkList.empty()) {
475 SUnit *SU = WorkList.back();
476 WorkList.pop_back();
Dan Gohman3a3a52d2008-08-27 16:29:48 +0000477 Allocate(SU->NodeNum, --Id);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000478 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
479 I != E; ++I) {
480 SUnit *SU = I->Dep;
Dan Gohman3a3a52d2008-08-27 16:29:48 +0000481 if (!--Node2Index[SU->NodeNum])
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000482 // If all dependencies of the node are processed already,
Roman Levenstein733a4d62008-03-26 11:23:38 +0000483 // then the node can be computed now.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000484 WorkList.push_back(SU);
485 }
486 }
487
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000488 Visited.resize(DAGSize);
489
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000490#ifndef NDEBUG
491 // Check correctness of the ordering
492 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
493 SUnit *SU = &SUnits[i];
494 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
495 I != E; ++I) {
496 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
497 "Wrong topological sorting");
498 }
499 }
500#endif
501}
502
Roman Levenstein733a4d62008-03-26 11:23:38 +0000503/// AddPred - adds an edge from SUnit X to SUnit Y.
504/// Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000505bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
506 unsigned PhyReg, int Cost) {
507 int UpperBound, LowerBound;
508 LowerBound = Node2Index[Y->NodeNum];
509 UpperBound = Node2Index[X->NodeNum];
510 bool HasLoop = false;
511 // Is Ord(X) < Ord(Y) ?
512 if (LowerBound < UpperBound) {
Roman Levenstein733a4d62008-03-26 11:23:38 +0000513 // Update the topological order.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000514 Visited.reset();
515 DFS(Y, UpperBound, HasLoop);
516 assert(!HasLoop && "Inserted edge creates a loop!");
Roman Levenstein733a4d62008-03-26 11:23:38 +0000517 // Recompute topological indexes.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000518 Shift(Visited, LowerBound, UpperBound);
519 }
Roman Levenstein733a4d62008-03-26 11:23:38 +0000520 // Now really insert the edge.
521 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000522}
523
Roman Levenstein733a4d62008-03-26 11:23:38 +0000524/// RemovePred - This removes the specified node N from the predecessors of
525/// the current node M. Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000526bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
527 bool isCtrl, bool isSpecial) {
528 // InitDAGTopologicalSorting();
529 return M->removePred(N, isCtrl, isSpecial);
530}
531
Roman Levenstein733a4d62008-03-26 11:23:38 +0000532/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
533/// all nodes affected by the edge insertion. These nodes will later get new
534/// topological indexes by means of the Shift method.
Dan Gohmane955c482008-08-05 14:45:15 +0000535void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) {
536 std::vector<const SUnit*> WorkList;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000537 WorkList.reserve(SUnits.size());
538
539 WorkList.push_back(SU);
540 while (!WorkList.empty()) {
541 SU = WorkList.back();
542 WorkList.pop_back();
543 Visited.set(SU->NodeNum);
544 for (int I = SU->Succs.size()-1; I >= 0; --I) {
545 int s = SU->Succs[I].Dep->NodeNum;
546 if (Node2Index[s] == UpperBound) {
547 HasLoop = true;
548 return;
549 }
Roman Levenstein733a4d62008-03-26 11:23:38 +0000550 // Visit successors if not already and in affected region.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000551 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
552 WorkList.push_back(SU->Succs[I].Dep);
553 }
554 }
555 }
556}
557
Roman Levenstein733a4d62008-03-26 11:23:38 +0000558/// Shift - Renumber the nodes so that the topological ordering is
559/// preserved.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000560void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
561 int UpperBound) {
562 std::vector<int> L;
563 int shift = 0;
564 int i;
565
566 for (i = LowerBound; i <= UpperBound; ++i) {
Roman Levenstein733a4d62008-03-26 11:23:38 +0000567 // w is node at topological index i.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000568 int w = Index2Node[i];
569 if (Visited.test(w)) {
Roman Levenstein733a4d62008-03-26 11:23:38 +0000570 // Unmark.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000571 Visited.reset(w);
572 L.push_back(w);
573 shift = shift + 1;
574 } else {
575 Allocate(w, i - shift);
576 }
577 }
578
579 for (unsigned j = 0; j < L.size(); ++j) {
580 Allocate(L[j], i - shift);
581 i = i + 1;
582 }
583}
584
585
Dan Gohmanfd227e92008-03-25 17:10:29 +0000586/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
Evan Chengcfd5f822007-09-27 00:25:29 +0000587/// create a cycle.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000588bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
589 if (IsReachable(TargetSU, SU))
Evan Chengcfd5f822007-09-27 00:25:29 +0000590 return true;
591 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
592 I != E; ++I)
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000593 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
Evan Chengcfd5f822007-09-27 00:25:29 +0000594 return true;
595 return false;
596}
597
Evan Cheng8e136a92007-09-26 21:36:17 +0000598/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
Evan Cheng5924bf72007-09-25 01:54:36 +0000599/// BTCycle in order to schedule a specific node. Returns the last unscheduled
600/// SUnit. Also returns if a successor is unscheduled in the process.
Evan Cheng8e136a92007-09-26 21:36:17 +0000601void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
602 unsigned &CurCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000603 SUnit *OldSU = NULL;
Evan Cheng8e136a92007-09-26 21:36:17 +0000604 while (CurCycle > BtCycle) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000605 OldSU = Sequence.back();
606 Sequence.pop_back();
607 if (SU->isSucc(OldSU))
Evan Cheng8e136a92007-09-26 21:36:17 +0000608 // Don't try to remove SU from AvailableQueue.
609 SU->isAvailable = false;
Evan Cheng5924bf72007-09-25 01:54:36 +0000610 UnscheduleNodeBottomUp(OldSU);
611 --CurCycle;
612 }
613
614
615 if (SU->isSucc(OldSU)) {
616 assert(false && "Something is wrong!");
617 abort();
618 }
Evan Cheng1ec79b42007-09-27 07:09:03 +0000619
620 ++NumBacktracks;
Evan Cheng5924bf72007-09-25 01:54:36 +0000621}
622
Evan Cheng5924bf72007-09-25 01:54:36 +0000623/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
624/// successors to the newly created node.
625SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
Evan Cheng79e97132007-10-05 01:39:18 +0000626 if (SU->FlaggedNodes.size())
627 return NULL;
Evan Cheng8e136a92007-09-26 21:36:17 +0000628
Evan Cheng79e97132007-10-05 01:39:18 +0000629 SDNode *N = SU->Node;
630 if (!N)
631 return NULL;
632
633 SUnit *NewSU;
Evan Cheng79e97132007-10-05 01:39:18 +0000634 bool TryUnfold = false;
Evan Cheng84d0ebc2007-10-05 01:42:35 +0000635 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
Duncan Sands13237ac2008-06-06 12:08:01 +0000636 MVT VT = N->getValueType(i);
Evan Cheng84d0ebc2007-10-05 01:42:35 +0000637 if (VT == MVT::Flag)
638 return NULL;
639 else if (VT == MVT::Other)
640 TryUnfold = true;
641 }
Evan Cheng79e97132007-10-05 01:39:18 +0000642 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000643 const SDValue &Op = N->getOperand(i);
Gabor Greiff304a7a2008-08-28 21:40:38 +0000644 MVT VT = Op.getNode()->getValueType(Op.getResNo());
Evan Cheng79e97132007-10-05 01:39:18 +0000645 if (VT == MVT::Flag)
646 return NULL;
Evan Cheng79e97132007-10-05 01:39:18 +0000647 }
648
649 if (TryUnfold) {
Dan Gohmane6e13482008-06-21 15:52:51 +0000650 SmallVector<SDNode*, 2> NewNodes;
Owen Anderson0ec92e92008-01-07 01:35:56 +0000651 if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
Evan Cheng79e97132007-10-05 01:39:18 +0000652 return NULL;
653
654 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
655 assert(NewNodes.size() == 2 && "Expected a load folding node!");
656
657 N = NewNodes[1];
658 SDNode *LoadNode = NewNodes[0];
Evan Cheng79e97132007-10-05 01:39:18 +0000659 unsigned NumVals = N->getNumValues();
660 unsigned OldNumVals = SU->Node->getNumValues();
661 for (unsigned i = 0; i != NumVals; ++i)
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000662 DAG.ReplaceAllUsesOfValueWith(SDValue(SU->Node, i), SDValue(N, i));
663 DAG.ReplaceAllUsesOfValueWith(SDValue(SU->Node, OldNumVals-1),
664 SDValue(LoadNode, 1));
Evan Cheng79e97132007-10-05 01:39:18 +0000665
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000666 SUnit *NewSU = CreateNewSUnit(N);
Dan Gohman46520a22008-06-21 19:18:17 +0000667 assert(N->getNodeId() == -1 && "Node already inserted!");
668 N->setNodeId(NewSU->NodeNum);
Dan Gohmane6e13482008-06-21 15:52:51 +0000669
Dan Gohman17059682008-07-17 19:10:17 +0000670 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
Dan Gohman856c0122008-02-16 00:25:40 +0000671 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
Chris Lattnerfd2e3382008-01-07 06:47:00 +0000672 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
Evan Cheng79e97132007-10-05 01:39:18 +0000673 NewSU->isTwoAddress = true;
674 break;
675 }
676 }
Chris Lattnerfd2e3382008-01-07 06:47:00 +0000677 if (TID.isCommutable())
Evan Cheng79e97132007-10-05 01:39:18 +0000678 NewSU->isCommutable = true;
Evan Cheng79e97132007-10-05 01:39:18 +0000679 // FIXME: Calculate height / depth and propagate the changes?
Evan Cheng91e0fc92007-12-18 08:42:10 +0000680 NewSU->Depth = SU->Depth;
681 NewSU->Height = SU->Height;
Evan Cheng79e97132007-10-05 01:39:18 +0000682 ComputeLatency(NewSU);
683
Evan Cheng91e0fc92007-12-18 08:42:10 +0000684 // LoadNode may already exist. This can happen when there is another
685 // load from the same location and producing the same type of value
686 // but it has different alignment or volatileness.
687 bool isNewLoad = true;
688 SUnit *LoadSU;
Dan Gohman46520a22008-06-21 19:18:17 +0000689 if (LoadNode->getNodeId() != -1) {
690 LoadSU = &SUnits[LoadNode->getNodeId()];
Evan Cheng91e0fc92007-12-18 08:42:10 +0000691 isNewLoad = false;
692 } else {
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000693 LoadSU = CreateNewSUnit(LoadNode);
Dan Gohman46520a22008-06-21 19:18:17 +0000694 LoadNode->setNodeId(LoadSU->NodeNum);
Evan Cheng91e0fc92007-12-18 08:42:10 +0000695
696 LoadSU->Depth = SU->Depth;
697 LoadSU->Height = SU->Height;
698 ComputeLatency(LoadSU);
699 }
700
Evan Cheng79e97132007-10-05 01:39:18 +0000701 SUnit *ChainPred = NULL;
702 SmallVector<SDep, 4> ChainSuccs;
703 SmallVector<SDep, 4> LoadPreds;
704 SmallVector<SDep, 4> NodePreds;
705 SmallVector<SDep, 4> NodeSuccs;
706 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
707 I != E; ++I) {
708 if (I->isCtrl)
709 ChainPred = I->Dep;
Evan Cheng567d2e52008-03-04 00:41:45 +0000710 else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode))
Evan Cheng79e97132007-10-05 01:39:18 +0000711 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
712 else
713 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
714 }
715 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
716 I != E; ++I) {
717 if (I->isCtrl)
718 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
719 I->isCtrl, I->isSpecial));
720 else
721 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
722 I->isCtrl, I->isSpecial));
723 }
724
Dan Gohman4370f262008-04-15 01:22:18 +0000725 if (ChainPred) {
726 RemovePred(SU, ChainPred, true, false);
727 if (isNewLoad)
728 AddPred(LoadSU, ChainPred, true, false);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000729 }
Evan Cheng79e97132007-10-05 01:39:18 +0000730 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
731 SDep *Pred = &LoadPreds[i];
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000732 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
733 if (isNewLoad) {
734 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
Roman Levenstein733a4d62008-03-26 11:23:38 +0000735 Pred->Reg, Pred->Cost);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000736 }
Evan Cheng79e97132007-10-05 01:39:18 +0000737 }
738 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
739 SDep *Pred = &NodePreds[i];
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000740 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
741 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
Roman Levenstein733a4d62008-03-26 11:23:38 +0000742 Pred->Reg, Pred->Cost);
Evan Cheng79e97132007-10-05 01:39:18 +0000743 }
744 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
745 SDep *Succ = &NodeSuccs[i];
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000746 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
747 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
Roman Levenstein733a4d62008-03-26 11:23:38 +0000748 Succ->Reg, Succ->Cost);
Evan Cheng79e97132007-10-05 01:39:18 +0000749 }
750 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
751 SDep *Succ = &ChainSuccs[i];
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000752 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
753 if (isNewLoad) {
754 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
Roman Levenstein733a4d62008-03-26 11:23:38 +0000755 Succ->Reg, Succ->Cost);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000756 }
Evan Cheng79e97132007-10-05 01:39:18 +0000757 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000758 if (isNewLoad) {
759 AddPred(NewSU, LoadSU, false, false);
760 }
Evan Cheng79e97132007-10-05 01:39:18 +0000761
Evan Cheng91e0fc92007-12-18 08:42:10 +0000762 if (isNewLoad)
763 AvailableQueue->addNode(LoadSU);
Evan Cheng79e97132007-10-05 01:39:18 +0000764 AvailableQueue->addNode(NewSU);
765
766 ++NumUnfolds;
767
768 if (NewSU->NumSuccsLeft == 0) {
769 NewSU->isAvailable = true;
770 return NewSU;
Evan Cheng91e0fc92007-12-18 08:42:10 +0000771 }
772 SU = NewSU;
Evan Cheng79e97132007-10-05 01:39:18 +0000773 }
774
775 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000776 NewSU = CreateClone(SU);
Evan Cheng5924bf72007-09-25 01:54:36 +0000777
778 // New SUnit has the exact same predecessors.
779 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
780 I != E; ++I)
781 if (!I->isSpecial) {
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000782 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
Evan Cheng5924bf72007-09-25 01:54:36 +0000783 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
784 }
785
786 // Only copy scheduled successors. Cut them from old node's successor
787 // list and move them over.
Evan Chengbde499b2007-09-27 07:29:27 +0000788 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
Evan Cheng5924bf72007-09-25 01:54:36 +0000789 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
790 I != E; ++I) {
791 if (I->isSpecial)
792 continue;
Evan Cheng5924bf72007-09-25 01:54:36 +0000793 if (I->Dep->isScheduled) {
Evan Chengbde499b2007-09-27 07:29:27 +0000794 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000795 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
Evan Chengbde499b2007-09-27 07:29:27 +0000796 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
Evan Cheng5924bf72007-09-25 01:54:36 +0000797 }
798 }
799 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
Evan Chengbde499b2007-09-27 07:29:27 +0000800 SUnit *Succ = DelDeps[i].first;
801 bool isCtrl = DelDeps[i].second;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000802 RemovePred(Succ, SU, isCtrl, false);
Evan Cheng5924bf72007-09-25 01:54:36 +0000803 }
804
805 AvailableQueue->updateNode(SU);
806 AvailableQueue->addNode(NewSU);
807
Evan Cheng1ec79b42007-09-27 07:09:03 +0000808 ++NumDups;
Evan Cheng5924bf72007-09-25 01:54:36 +0000809 return NewSU;
810}
811
Evan Cheng1ec79b42007-09-27 07:09:03 +0000812/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
813/// and move all scheduled successors of the given SUnit to the last copy.
814void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
815 const TargetRegisterClass *DestRC,
816 const TargetRegisterClass *SrcRC,
817 SmallVector<SUnit*, 2> &Copies) {
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000818 SUnit *CopyFromSU = CreateNewSUnit(NULL);
Evan Cheng8e136a92007-09-26 21:36:17 +0000819 CopyFromSU->CopySrcRC = SrcRC;
820 CopyFromSU->CopyDstRC = DestRC;
821 CopyFromSU->Depth = SU->Depth;
822 CopyFromSU->Height = SU->Height;
823
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000824 SUnit *CopyToSU = CreateNewSUnit(NULL);
Evan Cheng8e136a92007-09-26 21:36:17 +0000825 CopyToSU->CopySrcRC = DestRC;
826 CopyToSU->CopyDstRC = SrcRC;
827
828 // Only copy scheduled successors. Cut them from old node's successor
829 // list and move them over.
Evan Chengbde499b2007-09-27 07:29:27 +0000830 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
Evan Cheng8e136a92007-09-26 21:36:17 +0000831 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
832 I != E; ++I) {
833 if (I->isSpecial)
834 continue;
Evan Cheng8e136a92007-09-26 21:36:17 +0000835 if (I->Dep->isScheduled) {
Evan Chengbde499b2007-09-27 07:29:27 +0000836 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000837 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
Evan Chengbde499b2007-09-27 07:29:27 +0000838 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
Evan Cheng8e136a92007-09-26 21:36:17 +0000839 }
840 }
841 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
Evan Chengbde499b2007-09-27 07:29:27 +0000842 SUnit *Succ = DelDeps[i].first;
843 bool isCtrl = DelDeps[i].second;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000844 RemovePred(Succ, SU, isCtrl, false);
Evan Cheng8e136a92007-09-26 21:36:17 +0000845 }
846
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000847 AddPred(CopyFromSU, SU, false, false, Reg, -1);
848 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
Evan Cheng8e136a92007-09-26 21:36:17 +0000849
850 AvailableQueue->updateNode(SU);
851 AvailableQueue->addNode(CopyFromSU);
852 AvailableQueue->addNode(CopyToSU);
Evan Cheng1ec79b42007-09-27 07:09:03 +0000853 Copies.push_back(CopyFromSU);
854 Copies.push_back(CopyToSU);
Evan Cheng8e136a92007-09-26 21:36:17 +0000855
Evan Cheng1ec79b42007-09-27 07:09:03 +0000856 ++NumCCCopies;
Evan Cheng8e136a92007-09-26 21:36:17 +0000857}
858
859/// getPhysicalRegisterVT - Returns the ValueType of the physical register
860/// definition of the specified node.
861/// FIXME: Move to SelectionDAG?
Duncan Sands13237ac2008-06-06 12:08:01 +0000862static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
863 const TargetInstrInfo *TII) {
Dan Gohman17059682008-07-17 19:10:17 +0000864 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
Evan Cheng8e136a92007-09-26 21:36:17 +0000865 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
Chris Lattnerb0d06b42008-01-07 03:13:06 +0000866 unsigned NumRes = TID.getNumDefs();
867 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000868 if (Reg == *ImpDef)
869 break;
870 ++NumRes;
871 }
872 return N->getValueType(NumRes);
873}
874
Evan Cheng5924bf72007-09-25 01:54:36 +0000875/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
876/// scheduling of the given node to satisfy live physical register dependencies.
877/// If the specific node is the last one that's available to schedule, do
878/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
Evan Cheng1ec79b42007-09-27 07:09:03 +0000879bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
880 SmallVector<unsigned, 4> &LRegs){
Evan Cheng5924bf72007-09-25 01:54:36 +0000881 if (LiveRegs.empty())
882 return false;
883
Evan Chenge6f92252007-09-27 18:46:06 +0000884 SmallSet<unsigned, 4> RegAdded;
Evan Cheng5924bf72007-09-25 01:54:36 +0000885 // If this node would clobber any "live" register, then it's not ready.
Evan Cheng5924bf72007-09-25 01:54:36 +0000886 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
887 I != E; ++I) {
888 if (I->Cost < 0) {
889 unsigned Reg = I->Reg;
Evan Chenge6f92252007-09-27 18:46:06 +0000890 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
891 if (RegAdded.insert(Reg))
892 LRegs.push_back(Reg);
893 }
Dan Gohman3a4be0f2008-02-10 18:45:23 +0000894 for (const unsigned *Alias = TRI->getAliasSet(Reg);
Evan Cheng5924bf72007-09-25 01:54:36 +0000895 *Alias; ++Alias)
Evan Chenge6f92252007-09-27 18:46:06 +0000896 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
897 if (RegAdded.insert(*Alias))
898 LRegs.push_back(*Alias);
899 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000900 }
901 }
902
903 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
904 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
Dan Gohman17059682008-07-17 19:10:17 +0000905 if (!Node || !Node->isMachineOpcode())
Evan Cheng5924bf72007-09-25 01:54:36 +0000906 continue;
Dan Gohman17059682008-07-17 19:10:17 +0000907 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
Evan Cheng5924bf72007-09-25 01:54:36 +0000908 if (!TID.ImplicitDefs)
909 continue;
910 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
Evan Chenge6f92252007-09-27 18:46:06 +0000911 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
912 if (RegAdded.insert(*Reg))
913 LRegs.push_back(*Reg);
914 }
Dan Gohman3a4be0f2008-02-10 18:45:23 +0000915 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
Evan Cheng5924bf72007-09-25 01:54:36 +0000916 *Alias; ++Alias)
Evan Chenge6f92252007-09-27 18:46:06 +0000917 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
918 if (RegAdded.insert(*Alias))
919 LRegs.push_back(*Alias);
920 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000921 }
922 }
Evan Cheng5924bf72007-09-25 01:54:36 +0000923 return !LRegs.empty();
Evan Chengd38c22b2006-05-11 23:55:42 +0000924}
925
Evan Cheng1ec79b42007-09-27 07:09:03 +0000926
Evan Chengd38c22b2006-05-11 23:55:42 +0000927/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
928/// schedulers.
929void ScheduleDAGRRList::ListScheduleBottomUp() {
930 unsigned CurCycle = 0;
931 // Add root to Available queue.
Dan Gohman4370f262008-04-15 01:22:18 +0000932 if (!SUnits.empty()) {
Gabor Greiff304a7a2008-08-28 21:40:38 +0000933 SUnit *RootSU = &SUnits[DAG.getRoot().getNode()->getNodeId()];
Dan Gohman4370f262008-04-15 01:22:18 +0000934 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
935 RootSU->isAvailable = true;
936 AvailableQueue->push(RootSU);
937 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000938
939 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +0000940 // priority. If it is not ready put it back. Schedule the node.
Evan Cheng5924bf72007-09-25 01:54:36 +0000941 SmallVector<SUnit*, 4> NotReady;
Dan Gohmanfa63cc42008-06-23 21:15:00 +0000942 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
Dan Gohmane6e13482008-06-21 15:52:51 +0000943 Sequence.reserve(SUnits.size());
Evan Chengd38c22b2006-05-11 23:55:42 +0000944 while (!AvailableQueue->empty()) {
Evan Cheng1ec79b42007-09-27 07:09:03 +0000945 bool Delayed = false;
Dan Gohmanfa63cc42008-06-23 21:15:00 +0000946 LRegsMap.clear();
Evan Cheng5924bf72007-09-25 01:54:36 +0000947 SUnit *CurSU = AvailableQueue->pop();
948 while (CurSU) {
Evan Cheng1ec79b42007-09-27 07:09:03 +0000949 if (CurSU->CycleBound <= CurCycle) {
950 SmallVector<unsigned, 4> LRegs;
951 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
Evan Cheng5924bf72007-09-25 01:54:36 +0000952 break;
Evan Cheng1ec79b42007-09-27 07:09:03 +0000953 Delayed = true;
954 LRegsMap.insert(std::make_pair(CurSU, LRegs));
Evan Cheng5924bf72007-09-25 01:54:36 +0000955 }
Evan Cheng1ec79b42007-09-27 07:09:03 +0000956
957 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
958 NotReady.push_back(CurSU);
Evan Cheng5924bf72007-09-25 01:54:36 +0000959 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +0000960 }
Evan Cheng1ec79b42007-09-27 07:09:03 +0000961
962 // All candidates are delayed due to live physical reg dependencies.
963 // Try backtracking, code duplication, or inserting cross class copies
964 // to resolve it.
965 if (Delayed && !CurSU) {
966 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
967 SUnit *TrySU = NotReady[i];
968 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
969
970 // Try unscheduling up to the point where it's safe to schedule
971 // this node.
972 unsigned LiveCycle = CurCycle;
973 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
974 unsigned Reg = LRegs[j];
975 unsigned LCycle = LiveRegCycles[Reg];
976 LiveCycle = std::min(LiveCycle, LCycle);
977 }
978 SUnit *OldSU = Sequence[LiveCycle];
979 if (!WillCreateCycle(TrySU, OldSU)) {
980 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
981 // Force the current node to be scheduled before the node that
982 // requires the physical reg dep.
983 if (OldSU->isAvailable) {
984 OldSU->isAvailable = false;
985 AvailableQueue->remove(OldSU);
986 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000987 AddPred(TrySU, OldSU, true, true);
Evan Cheng1ec79b42007-09-27 07:09:03 +0000988 // If one or more successors has been unscheduled, then the current
989 // node is no longer avaialable. Schedule a successor that's now
990 // available instead.
991 if (!TrySU->isAvailable)
992 CurSU = AvailableQueue->pop();
993 else {
994 CurSU = TrySU;
995 TrySU->isPending = false;
996 NotReady.erase(NotReady.begin()+i);
997 }
998 break;
999 }
1000 }
1001
1002 if (!CurSU) {
Dan Gohmanfd227e92008-03-25 17:10:29 +00001003 // Can't backtrack. Try duplicating the nodes that produces these
Evan Cheng1ec79b42007-09-27 07:09:03 +00001004 // "expensive to copy" values to break the dependency. In case even
1005 // that doesn't work, insert cross class copies.
1006 SUnit *TrySU = NotReady[0];
1007 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1008 assert(LRegs.size() == 1 && "Can't handle this yet!");
1009 unsigned Reg = LRegs[0];
1010 SUnit *LRDef = LiveRegDefs[Reg];
Evan Cheng79e97132007-10-05 01:39:18 +00001011 SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
1012 if (!NewDef) {
Evan Cheng1ec79b42007-09-27 07:09:03 +00001013 // Issue expensive cross register class copies.
Duncan Sands13237ac2008-06-06 12:08:01 +00001014 MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
Evan Cheng1ec79b42007-09-27 07:09:03 +00001015 const TargetRegisterClass *RC =
Evan Chenge88a6252008-03-11 07:19:34 +00001016 TRI->getPhysicalRegisterRegClass(Reg, VT);
Dan Gohman3a4be0f2008-02-10 18:45:23 +00001017 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
Evan Cheng1ec79b42007-09-27 07:09:03 +00001018 if (!DestRC) {
1019 assert(false && "Don't know how to copy this physical register!");
1020 abort();
1021 }
1022 SmallVector<SUnit*, 2> Copies;
1023 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1024 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
1025 << " to SU #" << Copies.front()->NodeNum << "\n";
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001026 AddPred(TrySU, Copies.front(), true, true);
Evan Cheng1ec79b42007-09-27 07:09:03 +00001027 NewDef = Copies.back();
1028 }
1029
1030 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
1031 << " to SU #" << TrySU->NodeNum << "\n";
1032 LiveRegDefs[Reg] = NewDef;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001033 AddPred(NewDef, TrySU, true, true);
Evan Cheng1ec79b42007-09-27 07:09:03 +00001034 TrySU->isAvailable = false;
1035 CurSU = NewDef;
1036 }
1037
1038 if (!CurSU) {
1039 assert(false && "Unable to resolve live physical register dependencies!");
1040 abort();
1041 }
1042 }
1043
Evan Chengd38c22b2006-05-11 23:55:42 +00001044 // Add the nodes that aren't ready back onto the available list.
Evan Cheng5924bf72007-09-25 01:54:36 +00001045 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
1046 NotReady[i]->isPending = false;
Evan Cheng1ec79b42007-09-27 07:09:03 +00001047 // May no longer be available due to backtracking.
Evan Cheng5924bf72007-09-25 01:54:36 +00001048 if (NotReady[i]->isAvailable)
1049 AvailableQueue->push(NotReady[i]);
1050 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001051 NotReady.clear();
1052
Evan Cheng5924bf72007-09-25 01:54:36 +00001053 if (!CurSU)
1054 Sequence.push_back(0);
1055 else {
1056 ScheduleNodeBottomUp(CurSU, CurCycle);
1057 Sequence.push_back(CurSU);
1058 }
1059 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +00001060 }
1061
Evan Chengd38c22b2006-05-11 23:55:42 +00001062 // Reverse the order if it is bottom up.
1063 std::reverse(Sequence.begin(), Sequence.end());
1064
1065
1066#ifndef NDEBUG
1067 // Verify that all SUnits were scheduled.
1068 bool AnyNotSched = false;
Dan Gohman4370f262008-04-15 01:22:18 +00001069 unsigned DeadNodes = 0;
Dan Gohman82b66732008-04-15 22:40:14 +00001070 unsigned Noops = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001071 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
Dan Gohman4370f262008-04-15 01:22:18 +00001072 if (!SUnits[i].isScheduled) {
1073 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1074 ++DeadNodes;
1075 continue;
1076 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001077 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +00001078 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +00001079 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +00001080 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +00001081 AnyNotSched = true;
1082 }
Dan Gohman4370f262008-04-15 01:22:18 +00001083 if (SUnits[i].NumSuccsLeft != 0) {
1084 if (!AnyNotSched)
1085 cerr << "*** List scheduling failed! ***\n";
1086 SUnits[i].dump(&DAG);
1087 cerr << "has successors left!\n";
1088 AnyNotSched = true;
1089 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001090 }
Dan Gohman82b66732008-04-15 22:40:14 +00001091 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1092 if (!Sequence[i])
1093 ++Noops;
Evan Chengd38c22b2006-05-11 23:55:42 +00001094 assert(!AnyNotSched);
Dan Gohman82b66732008-04-15 22:40:14 +00001095 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
Dan Gohman4370f262008-04-15 01:22:18 +00001096 "The number of nodes scheduled doesn't match the expected number!");
Evan Chengd38c22b2006-05-11 23:55:42 +00001097#endif
1098}
1099
1100//===----------------------------------------------------------------------===//
1101// Top-Down Scheduling
1102//===----------------------------------------------------------------------===//
1103
1104/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +00001105/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Evan Chengd38c22b2006-05-11 23:55:42 +00001106void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
1107 unsigned CurCycle) {
1108 // FIXME: the distance between two nodes is not always == the predecessor's
1109 // latency. For example, the reader can very well read the register written
1110 // by the predecessor later than the issue cycle. It also depends on the
1111 // interrupt model (drain vs. freeze).
1112 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
1113
Evan Cheng038dcc52007-09-28 19:24:24 +00001114 --SuccSU->NumPredsLeft;
Evan Chengd38c22b2006-05-11 23:55:42 +00001115
1116#ifndef NDEBUG
Evan Cheng038dcc52007-09-28 19:24:24 +00001117 if (SuccSU->NumPredsLeft < 0) {
Bill Wendling22e978a2006-12-07 20:04:42 +00001118 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +00001119 SuccSU->dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +00001120 cerr << " has been released too many times!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +00001121 assert(0);
1122 }
1123#endif
1124
Evan Cheng038dcc52007-09-28 19:24:24 +00001125 if (SuccSU->NumPredsLeft == 0) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001126 SuccSU->isAvailable = true;
1127 AvailableQueue->push(SuccSU);
1128 }
1129}
1130
1131
1132/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1133/// count of its successors. If a successor pending count is zero, add it to
1134/// the Available queue.
Evan Chengd12c97d2006-05-30 18:05:39 +00001135void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
Bill Wendling22e978a2006-12-07 20:04:42 +00001136 DOUT << "*** Scheduling [" << CurCycle << "]: ";
Evan Chengd38c22b2006-05-11 23:55:42 +00001137 DEBUG(SU->dump(&DAG));
1138 SU->Cycle = CurCycle;
1139
1140 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001141
1142 // Top down: release successors
Chris Lattnerd86418a2006-08-17 00:09:56 +00001143 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1144 I != E; ++I)
Evan Cheng0effc3a2007-09-19 01:38:40 +00001145 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +00001146 SU->isScheduled = true;
Evan Chengd38c22b2006-05-11 23:55:42 +00001147}
1148
Dan Gohman54a187e2007-08-20 19:28:38 +00001149/// ListScheduleTopDown - The main loop of list scheduling for top-down
1150/// schedulers.
Evan Chengd38c22b2006-05-11 23:55:42 +00001151void ScheduleDAGRRList::ListScheduleTopDown() {
1152 unsigned CurCycle = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001153
1154 // All leaves to Available queue.
1155 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1156 // It is available if it has no predecessors.
Dan Gohman4370f262008-04-15 01:22:18 +00001157 if (SUnits[i].Preds.empty()) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001158 AvailableQueue->push(&SUnits[i]);
1159 SUnits[i].isAvailable = true;
1160 }
1161 }
1162
Evan Chengd38c22b2006-05-11 23:55:42 +00001163 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +00001164 // priority. If it is not ready put it back. Schedule the node.
Evan Chengd38c22b2006-05-11 23:55:42 +00001165 std::vector<SUnit*> NotReady;
Dan Gohmane6e13482008-06-21 15:52:51 +00001166 Sequence.reserve(SUnits.size());
Evan Chengd38c22b2006-05-11 23:55:42 +00001167 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +00001168 SUnit *CurSU = AvailableQueue->pop();
1169 while (CurSU && CurSU->CycleBound > CurCycle) {
1170 NotReady.push_back(CurSU);
1171 CurSU = AvailableQueue->pop();
Evan Chengd38c22b2006-05-11 23:55:42 +00001172 }
1173
1174 // Add the nodes that aren't ready back onto the available list.
1175 AvailableQueue->push_all(NotReady);
1176 NotReady.clear();
1177
Evan Cheng5924bf72007-09-25 01:54:36 +00001178 if (!CurSU)
1179 Sequence.push_back(0);
1180 else {
1181 ScheduleNodeTopDown(CurSU, CurCycle);
1182 Sequence.push_back(CurSU);
1183 }
Dan Gohman4370f262008-04-15 01:22:18 +00001184 ++CurCycle;
Evan Chengd38c22b2006-05-11 23:55:42 +00001185 }
1186
1187
1188#ifndef NDEBUG
1189 // Verify that all SUnits were scheduled.
1190 bool AnyNotSched = false;
Dan Gohman4370f262008-04-15 01:22:18 +00001191 unsigned DeadNodes = 0;
Dan Gohman82b66732008-04-15 22:40:14 +00001192 unsigned Noops = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001193 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1194 if (!SUnits[i].isScheduled) {
Dan Gohman4370f262008-04-15 01:22:18 +00001195 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1196 ++DeadNodes;
1197 continue;
1198 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001199 if (!AnyNotSched)
Bill Wendling22e978a2006-12-07 20:04:42 +00001200 cerr << "*** List scheduling failed! ***\n";
Evan Chengd38c22b2006-05-11 23:55:42 +00001201 SUnits[i].dump(&DAG);
Bill Wendling22e978a2006-12-07 20:04:42 +00001202 cerr << "has not been scheduled!\n";
Evan Chengd38c22b2006-05-11 23:55:42 +00001203 AnyNotSched = true;
1204 }
Dan Gohman4370f262008-04-15 01:22:18 +00001205 if (SUnits[i].NumPredsLeft != 0) {
1206 if (!AnyNotSched)
1207 cerr << "*** List scheduling failed! ***\n";
1208 SUnits[i].dump(&DAG);
1209 cerr << "has predecessors left!\n";
1210 AnyNotSched = true;
1211 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001212 }
Dan Gohman82b66732008-04-15 22:40:14 +00001213 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1214 if (!Sequence[i])
1215 ++Noops;
Evan Chengd38c22b2006-05-11 23:55:42 +00001216 assert(!AnyNotSched);
Dan Gohman82b66732008-04-15 22:40:14 +00001217 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
Dan Gohman4370f262008-04-15 01:22:18 +00001218 "The number of nodes scheduled doesn't match the expected number!");
Evan Chengd38c22b2006-05-11 23:55:42 +00001219#endif
1220}
1221
1222
1223
1224//===----------------------------------------------------------------------===//
1225// RegReductionPriorityQueue Implementation
1226//===----------------------------------------------------------------------===//
1227//
1228// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1229// to reduce register pressure.
1230//
1231namespace {
1232 template<class SF>
1233 class RegReductionPriorityQueue;
1234
1235 /// Sorting functions for the Available queue.
1236 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1237 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
1238 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
1239 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1240
1241 bool operator()(const SUnit* left, const SUnit* right) const;
1242 };
1243
Evan Cheng7e4abde2008-07-02 09:23:51 +00001244 struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{
1245 RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ;
1246 bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq)
1247 : SPQ(spq) {}
1248 bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {}
1249
1250 bool operator()(const SUnit* left, const SUnit* right) const;
1251 };
1252
Evan Chengd38c22b2006-05-11 23:55:42 +00001253 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1254 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
1255 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
1256 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1257
1258 bool operator()(const SUnit* left, const SUnit* right) const;
1259 };
1260} // end anonymous namespace
1261
Evan Cheng961bbd32007-01-08 23:50:38 +00001262static inline bool isCopyFromLiveIn(const SUnit *SU) {
1263 SDNode *N = SU->Node;
Evan Cheng8e136a92007-09-26 21:36:17 +00001264 return N && N->getOpcode() == ISD::CopyFromReg &&
Evan Cheng961bbd32007-01-08 23:50:38 +00001265 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1266}
1267
Evan Cheng7e4abde2008-07-02 09:23:51 +00001268/// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up
1269/// scheduling. Smaller number is the higher priority.
1270static unsigned
1271CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1272 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1273 if (SethiUllmanNumber != 0)
1274 return SethiUllmanNumber;
1275
1276 unsigned Extra = 0;
1277 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1278 I != E; ++I) {
1279 if (I->isCtrl) continue; // ignore chain preds
1280 SUnit *PredSU = I->Dep;
1281 unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers);
1282 if (PredSethiUllman > SethiUllmanNumber) {
1283 SethiUllmanNumber = PredSethiUllman;
1284 Extra = 0;
1285 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1286 ++Extra;
1287 }
1288
1289 SethiUllmanNumber += Extra;
1290
1291 if (SethiUllmanNumber == 0)
1292 SethiUllmanNumber = 1;
1293
1294 return SethiUllmanNumber;
1295}
1296
1297/// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down
1298/// scheduling. Smaller number is the higher priority.
1299static unsigned
1300CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1301 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1302 if (SethiUllmanNumber != 0)
1303 return SethiUllmanNumber;
1304
1305 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1306 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1307 SethiUllmanNumber = 0xffff;
1308 else if (SU->NumSuccsLeft == 0)
1309 // If SU does not have a use, i.e. it doesn't produce a value that would
1310 // be consumed (e.g. store), then it terminates a chain of computation.
1311 // Give it a small SethiUllman number so it will be scheduled right before
1312 // its predecessors that it doesn't lengthen their live ranges.
1313 SethiUllmanNumber = 0;
1314 else if (SU->NumPredsLeft == 0 &&
1315 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1316 SethiUllmanNumber = 0xffff;
1317 else {
1318 int Extra = 0;
1319 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1320 I != E; ++I) {
1321 if (I->isCtrl) continue; // ignore chain preds
1322 SUnit *PredSU = I->Dep;
1323 unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers);
1324 if (PredSethiUllman > SethiUllmanNumber) {
1325 SethiUllmanNumber = PredSethiUllman;
1326 Extra = 0;
1327 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1328 ++Extra;
1329 }
1330
1331 SethiUllmanNumber += Extra;
1332 }
1333
1334 return SethiUllmanNumber;
1335}
1336
1337
Evan Chengd38c22b2006-05-11 23:55:42 +00001338namespace {
1339 template<class SF>
Chris Lattner996795b2006-06-28 23:17:24 +00001340 class VISIBILITY_HIDDEN RegReductionPriorityQueue
1341 : public SchedulingPriorityQueue {
Dan Gohmana4db3352008-06-21 18:35:25 +00001342 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
Roman Levenstein6b371142008-04-29 09:07:59 +00001343 unsigned currentQueueId;
Evan Chengd38c22b2006-05-11 23:55:42 +00001344
1345 public:
1346 RegReductionPriorityQueue() :
Roman Levenstein6b371142008-04-29 09:07:59 +00001347 Queue(SF(this)), currentQueueId(0) {}
Evan Chengd38c22b2006-05-11 23:55:42 +00001348
Dan Gohman46520a22008-06-21 19:18:17 +00001349 virtual void initNodes(std::vector<SUnit> &sunits) {}
Evan Cheng5924bf72007-09-25 01:54:36 +00001350
1351 virtual void addNode(const SUnit *SU) {}
1352
1353 virtual void updateNode(const SUnit *SU) {}
1354
Evan Chengd38c22b2006-05-11 23:55:42 +00001355 virtual void releaseState() {}
1356
Evan Cheng6730f032007-01-08 23:55:53 +00001357 virtual unsigned getNodePriority(const SUnit *SU) const {
Evan Chengd38c22b2006-05-11 23:55:42 +00001358 return 0;
1359 }
1360
Evan Cheng5924bf72007-09-25 01:54:36 +00001361 unsigned size() const { return Queue.size(); }
1362
Evan Chengd38c22b2006-05-11 23:55:42 +00001363 bool empty() const { return Queue.empty(); }
1364
1365 void push(SUnit *U) {
Roman Levenstein6b371142008-04-29 09:07:59 +00001366 assert(!U->NodeQueueId && "Node in the queue already");
1367 U->NodeQueueId = ++currentQueueId;
Dan Gohmana4db3352008-06-21 18:35:25 +00001368 Queue.push(U);
Evan Chengd38c22b2006-05-11 23:55:42 +00001369 }
Roman Levenstein6b371142008-04-29 09:07:59 +00001370
Evan Chengd38c22b2006-05-11 23:55:42 +00001371 void push_all(const std::vector<SUnit *> &Nodes) {
1372 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
Roman Levenstein6b371142008-04-29 09:07:59 +00001373 push(Nodes[i]);
Evan Chengd38c22b2006-05-11 23:55:42 +00001374 }
1375
1376 SUnit *pop() {
Evan Chengd12c97d2006-05-30 18:05:39 +00001377 if (empty()) return NULL;
Dan Gohmana4db3352008-06-21 18:35:25 +00001378 SUnit *V = Queue.top();
1379 Queue.pop();
Roman Levenstein6b371142008-04-29 09:07:59 +00001380 V->NodeQueueId = 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001381 return V;
1382 }
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001383
Evan Cheng5924bf72007-09-25 01:54:36 +00001384 void remove(SUnit *SU) {
Roman Levenstein6b371142008-04-29 09:07:59 +00001385 assert(!Queue.empty() && "Queue is empty!");
Dan Gohmana4db3352008-06-21 18:35:25 +00001386 assert(SU->NodeQueueId != 0 && "Not in queue!");
1387 Queue.erase_one(SU);
Roman Levenstein6b371142008-04-29 09:07:59 +00001388 SU->NodeQueueId = 0;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001389 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001390 };
1391
Chris Lattner996795b2006-06-28 23:17:24 +00001392 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
Dan Gohman4b49be12008-06-21 01:08:22 +00001393 : public RegReductionPriorityQueue<bu_ls_rr_sort> {
Evan Chengd38c22b2006-05-11 23:55:42 +00001394 // SUnits - The SUnits for the current graph.
Dan Gohmane955c482008-08-05 14:45:15 +00001395 std::vector<SUnit> *SUnits;
Evan Chengd38c22b2006-05-11 23:55:42 +00001396
1397 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +00001398 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +00001399
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001400 const TargetInstrInfo *TII;
Dan Gohman3a4be0f2008-02-10 18:45:23 +00001401 const TargetRegisterInfo *TRI;
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001402 ScheduleDAGRRList *scheduleDAG;
Evan Cheng2c977312008-07-01 18:05:03 +00001403
Evan Chengd38c22b2006-05-11 23:55:42 +00001404 public:
Evan Chengf9891412007-12-20 09:25:31 +00001405 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
Evan Cheng7e4abde2008-07-02 09:23:51 +00001406 const TargetRegisterInfo *tri)
1407 : TII(tii), TRI(tri), scheduleDAG(NULL) {}
Evan Chengd38c22b2006-05-11 23:55:42 +00001408
Dan Gohman46520a22008-06-21 19:18:17 +00001409 void initNodes(std::vector<SUnit> &sunits) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001410 SUnits = &sunits;
1411 // Add pseudo dependency edges for two-address nodes.
Evan Cheng7e4abde2008-07-02 09:23:51 +00001412 AddPseudoTwoAddrDeps();
Evan Chengd38c22b2006-05-11 23:55:42 +00001413 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +00001414 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +00001415 }
1416
Evan Cheng5924bf72007-09-25 01:54:36 +00001417 void addNode(const SUnit *SU) {
Evan Cheng7e4abde2008-07-02 09:23:51 +00001418 unsigned SUSize = SethiUllmanNumbers.size();
1419 if (SUnits->size() > SUSize)
1420 SethiUllmanNumbers.resize(SUSize*2, 0);
1421 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
Evan Cheng5924bf72007-09-25 01:54:36 +00001422 }
1423
1424 void updateNode(const SUnit *SU) {
1425 SethiUllmanNumbers[SU->NodeNum] = 0;
Evan Cheng7e4abde2008-07-02 09:23:51 +00001426 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
Evan Cheng5924bf72007-09-25 01:54:36 +00001427 }
1428
Evan Chengd38c22b2006-05-11 23:55:42 +00001429 void releaseState() {
1430 SUnits = 0;
1431 SethiUllmanNumbers.clear();
1432 }
1433
Evan Cheng6730f032007-01-08 23:55:53 +00001434 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +00001435 assert(SU->NodeNum < SethiUllmanNumbers.size());
Evan Cheng8e136a92007-09-26 21:36:17 +00001436 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
Evan Cheng961bbd32007-01-08 23:50:38 +00001437 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
1438 // CopyFromReg should be close to its def because it restricts
1439 // allocation choices. But if it is a livein then perhaps we want it
1440 // closer to its uses so it can be coalesced.
1441 return 0xffff;
1442 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1443 // CopyToReg should be close to its uses to facilitate coalescing and
1444 // avoid spilling.
1445 return 0;
Evan Chengaa2d6ef2007-10-12 08:50:34 +00001446 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1447 Opc == TargetInstrInfo::INSERT_SUBREG)
1448 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1449 // facilitate coalescing.
1450 return 0;
Evan Cheng961bbd32007-01-08 23:50:38 +00001451 else if (SU->NumSuccs == 0)
1452 // If SU does not have a use, i.e. it doesn't produce a value that would
1453 // be consumed (e.g. store), then it terminates a chain of computation.
1454 // Give it a large SethiUllman number so it will be scheduled right
1455 // before its predecessors that it doesn't lengthen their live ranges.
1456 return 0xffff;
1457 else if (SU->NumPreds == 0)
1458 // If SU does not have a def, schedule it close to its uses because it
1459 // does not lengthen any live ranges.
1460 return 0;
1461 else
1462 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001463 }
1464
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001465 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1466 scheduleDAG = scheduleDag;
1467 }
1468
Evan Chengd38c22b2006-05-11 23:55:42 +00001469 private:
Evan Cheng73bdf042008-03-01 00:39:47 +00001470 bool canClobber(const SUnit *SU, const SUnit *Op);
Evan Chengd38c22b2006-05-11 23:55:42 +00001471 void AddPseudoTwoAddrDeps();
Evan Cheng6730f032007-01-08 23:55:53 +00001472 void CalculateSethiUllmanNumbers();
Evan Cheng7e4abde2008-07-02 09:23:51 +00001473 };
1474
1475
1476 class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue
1477 : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> {
1478 // SUnits - The SUnits for the current graph.
1479 const std::vector<SUnit> *SUnits;
1480
1481 // SethiUllmanNumbers - The SethiUllman number for each node.
1482 std::vector<unsigned> SethiUllmanNumbers;
1483 public:
1484 explicit BURegReductionFastPriorityQueue() {}
1485
1486 void initNodes(std::vector<SUnit> &sunits) {
1487 SUnits = &sunits;
1488 // Calculate node priorities.
1489 CalculateSethiUllmanNumbers();
1490 }
1491
1492 void addNode(const SUnit *SU) {
1493 unsigned SUSize = SethiUllmanNumbers.size();
1494 if (SUnits->size() > SUSize)
1495 SethiUllmanNumbers.resize(SUSize*2, 0);
1496 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1497 }
1498
1499 void updateNode(const SUnit *SU) {
1500 SethiUllmanNumbers[SU->NodeNum] = 0;
1501 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1502 }
1503
1504 void releaseState() {
1505 SUnits = 0;
1506 SethiUllmanNumbers.clear();
1507 }
1508
1509 unsigned getNodePriority(const SUnit *SU) const {
1510 return SethiUllmanNumbers[SU->NodeNum];
1511 }
1512
1513 private:
1514 void CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +00001515 };
1516
1517
Dan Gohman54a187e2007-08-20 19:28:38 +00001518 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
Dan Gohman4b49be12008-06-21 01:08:22 +00001519 : public RegReductionPriorityQueue<td_ls_rr_sort> {
Evan Chengd38c22b2006-05-11 23:55:42 +00001520 // SUnits - The SUnits for the current graph.
1521 const std::vector<SUnit> *SUnits;
1522
1523 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Cheng961bbd32007-01-08 23:50:38 +00001524 std::vector<unsigned> SethiUllmanNumbers;
Evan Chengd38c22b2006-05-11 23:55:42 +00001525
1526 public:
1527 TDRegReductionPriorityQueue() {}
1528
Dan Gohman46520a22008-06-21 19:18:17 +00001529 void initNodes(std::vector<SUnit> &sunits) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001530 SUnits = &sunits;
1531 // Calculate node priorities.
Evan Cheng6730f032007-01-08 23:55:53 +00001532 CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +00001533 }
1534
Evan Cheng5924bf72007-09-25 01:54:36 +00001535 void addNode(const SUnit *SU) {
Evan Cheng7e4abde2008-07-02 09:23:51 +00001536 unsigned SUSize = SethiUllmanNumbers.size();
1537 if (SUnits->size() > SUSize)
1538 SethiUllmanNumbers.resize(SUSize*2, 0);
1539 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
Evan Cheng5924bf72007-09-25 01:54:36 +00001540 }
1541
1542 void updateNode(const SUnit *SU) {
1543 SethiUllmanNumbers[SU->NodeNum] = 0;
Evan Cheng7e4abde2008-07-02 09:23:51 +00001544 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
Evan Cheng5924bf72007-09-25 01:54:36 +00001545 }
1546
Evan Chengd38c22b2006-05-11 23:55:42 +00001547 void releaseState() {
1548 SUnits = 0;
1549 SethiUllmanNumbers.clear();
1550 }
1551
Evan Cheng6730f032007-01-08 23:55:53 +00001552 unsigned getNodePriority(const SUnit *SU) const {
Evan Cheng961bbd32007-01-08 23:50:38 +00001553 assert(SU->NodeNum < SethiUllmanNumbers.size());
1554 return SethiUllmanNumbers[SU->NodeNum];
Evan Chengd38c22b2006-05-11 23:55:42 +00001555 }
1556
1557 private:
Evan Cheng6730f032007-01-08 23:55:53 +00001558 void CalculateSethiUllmanNumbers();
Evan Chengd38c22b2006-05-11 23:55:42 +00001559 };
1560}
1561
Evan Chengb9e3db62007-03-14 22:43:40 +00001562/// closestSucc - Returns the scheduled cycle of the successor which is
1563/// closet to the current cycle.
Evan Cheng28748552007-03-13 23:25:11 +00001564static unsigned closestSucc(const SUnit *SU) {
1565 unsigned MaxCycle = 0;
1566 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
Evan Chengb9e3db62007-03-14 22:43:40 +00001567 I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001568 unsigned Cycle = I->Dep->Cycle;
Evan Chengb9e3db62007-03-14 22:43:40 +00001569 // If there are bunch of CopyToRegs stacked up, they should be considered
1570 // to be at the same position.
Evan Cheng8e136a92007-09-26 21:36:17 +00001571 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
Evan Cheng0effc3a2007-09-19 01:38:40 +00001572 Cycle = closestSucc(I->Dep)+1;
Evan Chengb9e3db62007-03-14 22:43:40 +00001573 if (Cycle > MaxCycle)
1574 MaxCycle = Cycle;
1575 }
Evan Cheng28748552007-03-13 23:25:11 +00001576 return MaxCycle;
1577}
1578
Evan Cheng61bc51e2007-12-20 02:22:36 +00001579/// calcMaxScratches - Returns an cost estimate of the worse case requirement
1580/// for scratch registers. Live-in operands and live-out results don't count
1581/// since they are "fixed".
1582static unsigned calcMaxScratches(const SUnit *SU) {
1583 unsigned Scratches = 0;
1584 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1585 I != E; ++I) {
1586 if (I->isCtrl) continue; // ignore chain preds
Evan Cheng0e400d42008-01-09 23:01:55 +00001587 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
Evan Cheng61bc51e2007-12-20 02:22:36 +00001588 Scratches++;
1589 }
1590 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1591 I != E; ++I) {
1592 if (I->isCtrl) continue; // ignore chain succs
Evan Cheng0e400d42008-01-09 23:01:55 +00001593 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
Evan Cheng61bc51e2007-12-20 02:22:36 +00001594 Scratches += 10;
1595 }
1596 return Scratches;
1597}
1598
Evan Chengd38c22b2006-05-11 23:55:42 +00001599// Bottom up
1600bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00001601 unsigned LPriority = SPQ->getNodePriority(left);
1602 unsigned RPriority = SPQ->getNodePriority(right);
Evan Cheng73bdf042008-03-01 00:39:47 +00001603 if (LPriority != RPriority)
1604 return LPriority > RPriority;
1605
1606 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1607 // e.g.
1608 // t1 = op t2, c1
1609 // t3 = op t4, c2
1610 //
1611 // and the following instructions are both ready.
1612 // t2 = op c3
1613 // t4 = op c4
1614 //
1615 // Then schedule t2 = op first.
1616 // i.e.
1617 // t4 = op c4
1618 // t2 = op c3
1619 // t1 = op t2, c1
1620 // t3 = op t4, c2
1621 //
1622 // This creates more short live intervals.
1623 unsigned LDist = closestSucc(left);
1624 unsigned RDist = closestSucc(right);
1625 if (LDist != RDist)
1626 return LDist < RDist;
1627
1628 // Intuitively, it's good to push down instructions whose results are
1629 // liveout so their long live ranges won't conflict with other values
1630 // which are needed inside the BB. Further prioritize liveout instructions
1631 // by the number of operands which are calculated within the BB.
1632 unsigned LScratch = calcMaxScratches(left);
1633 unsigned RScratch = calcMaxScratches(right);
1634 if (LScratch != RScratch)
1635 return LScratch > RScratch;
1636
1637 if (left->Height != right->Height)
1638 return left->Height > right->Height;
1639
1640 if (left->Depth != right->Depth)
1641 return left->Depth < right->Depth;
1642
1643 if (left->CycleBound != right->CycleBound)
1644 return left->CycleBound > right->CycleBound;
1645
Roman Levenstein6b371142008-04-29 09:07:59 +00001646 assert(left->NodeQueueId && right->NodeQueueId &&
1647 "NodeQueueId cannot be zero");
1648 return (left->NodeQueueId > right->NodeQueueId);
Evan Chengd38c22b2006-05-11 23:55:42 +00001649}
1650
Dan Gohman4b49be12008-06-21 01:08:22 +00001651bool
Evan Cheng7e4abde2008-07-02 09:23:51 +00001652bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const {
1653 unsigned LPriority = SPQ->getNodePriority(left);
1654 unsigned RPriority = SPQ->getNodePriority(right);
1655 if (LPriority != RPriority)
1656 return LPriority > RPriority;
1657 assert(left->NodeQueueId && right->NodeQueueId &&
1658 "NodeQueueId cannot be zero");
1659 return (left->NodeQueueId > right->NodeQueueId);
1660}
1661
1662bool
Dan Gohman4b49be12008-06-21 01:08:22 +00001663BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001664 if (SU->isTwoAddress) {
Dan Gohman17059682008-07-17 19:10:17 +00001665 unsigned Opc = SU->Node->getMachineOpcode();
Chris Lattner03ad8852008-01-07 07:27:27 +00001666 const TargetInstrDesc &TID = TII->get(Opc);
Chris Lattnerfd2e3382008-01-07 06:47:00 +00001667 unsigned NumRes = TID.getNumDefs();
Dan Gohman0340d1e2008-02-15 20:50:13 +00001668 unsigned NumOps = TID.getNumOperands() - NumRes;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001669 for (unsigned i = 0; i != NumOps; ++i) {
Chris Lattnerfd2e3382008-01-07 06:47:00 +00001670 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
Gabor Greiff304a7a2008-08-28 21:40:38 +00001671 SDNode *DU = SU->Node->getOperand(i).getNode();
Dan Gohman46520a22008-06-21 19:18:17 +00001672 if (DU->getNodeId() != -1 &&
1673 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001674 return true;
1675 }
1676 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001677 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001678 return false;
1679}
1680
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001681
Evan Chenga5e595d2007-09-28 22:32:30 +00001682/// hasCopyToRegUse - Return true if SU has a value successor that is a
1683/// CopyToReg node.
Dan Gohmane955c482008-08-05 14:45:15 +00001684static bool hasCopyToRegUse(const SUnit *SU) {
Evan Chenga5e595d2007-09-28 22:32:30 +00001685 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1686 I != E; ++I) {
1687 if (I->isCtrl) continue;
Dan Gohmane955c482008-08-05 14:45:15 +00001688 const SUnit *SuccSU = I->Dep;
Evan Chenga5e595d2007-09-28 22:32:30 +00001689 if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
1690 return true;
1691 }
1692 return false;
1693}
1694
Evan Chengf9891412007-12-20 09:25:31 +00001695/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
Dan Gohmanea045202008-06-21 22:05:24 +00001696/// physical register defs.
Dan Gohmane955c482008-08-05 14:45:15 +00001697static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
Evan Chengf9891412007-12-20 09:25:31 +00001698 const TargetInstrInfo *TII,
Dan Gohman3a4be0f2008-02-10 18:45:23 +00001699 const TargetRegisterInfo *TRI) {
Evan Chengf9891412007-12-20 09:25:31 +00001700 SDNode *N = SuccSU->Node;
Dan Gohman17059682008-07-17 19:10:17 +00001701 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1702 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
Dan Gohmanea045202008-06-21 22:05:24 +00001703 assert(ImpDefs && "Caller should check hasPhysRegDefs");
Chris Lattnerb0d06b42008-01-07 03:13:06 +00001704 const unsigned *SUImpDefs =
Dan Gohman17059682008-07-17 19:10:17 +00001705 TII->get(SU->Node->getMachineOpcode()).getImplicitDefs();
Evan Chengf9891412007-12-20 09:25:31 +00001706 if (!SUImpDefs)
1707 return false;
1708 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
Duncan Sands13237ac2008-06-06 12:08:01 +00001709 MVT VT = N->getValueType(i);
Evan Chengf9891412007-12-20 09:25:31 +00001710 if (VT == MVT::Flag || VT == MVT::Other)
1711 continue;
Dan Gohman6ab52a82008-09-17 15:25:49 +00001712 if (!N->hasAnyUseOfValue(i))
1713 continue;
Evan Chengf9891412007-12-20 09:25:31 +00001714 unsigned Reg = ImpDefs[i - NumDefs];
1715 for (;*SUImpDefs; ++SUImpDefs) {
1716 unsigned SUReg = *SUImpDefs;
Dan Gohman3a4be0f2008-02-10 18:45:23 +00001717 if (TRI->regsOverlap(Reg, SUReg))
Evan Chengf9891412007-12-20 09:25:31 +00001718 return true;
1719 }
1720 }
1721 return false;
1722}
1723
Evan Chengd38c22b2006-05-11 23:55:42 +00001724/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1725/// it as a def&use operand. Add a pseudo control edge from it to the other
1726/// node (if it won't create a cycle) so the two-address one will be scheduled
Evan Chenga5e595d2007-09-28 22:32:30 +00001727/// first (lower in the schedule). If both nodes are two-address, favor the
1728/// one that has a CopyToReg use (more likely to be a loop induction update).
1729/// If both are two-address, but one is commutable while the other is not
1730/// commutable, favor the one that's not commutable.
Dan Gohman4b49be12008-06-21 01:08:22 +00001731void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001732 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
Dan Gohmane955c482008-08-05 14:45:15 +00001733 SUnit *SU = &(*SUnits)[i];
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001734 if (!SU->isTwoAddress)
1735 continue;
1736
1737 SDNode *Node = SU->Node;
Dan Gohman17059682008-07-17 19:10:17 +00001738 if (!Node || !Node->isMachineOpcode() || SU->FlaggedNodes.size() > 0)
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001739 continue;
1740
Dan Gohman17059682008-07-17 19:10:17 +00001741 unsigned Opc = Node->getMachineOpcode();
Chris Lattner03ad8852008-01-07 07:27:27 +00001742 const TargetInstrDesc &TID = TII->get(Opc);
Chris Lattnerfd2e3382008-01-07 06:47:00 +00001743 unsigned NumRes = TID.getNumDefs();
Dan Gohman0340d1e2008-02-15 20:50:13 +00001744 unsigned NumOps = TID.getNumOperands() - NumRes;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001745 for (unsigned j = 0; j != NumOps; ++j) {
Chris Lattnerfd2e3382008-01-07 06:47:00 +00001746 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
Gabor Greiff304a7a2008-08-28 21:40:38 +00001747 SDNode *DU = SU->Node->getOperand(j).getNode();
Dan Gohman46520a22008-06-21 19:18:17 +00001748 if (DU->getNodeId() == -1)
Evan Cheng1bf166312007-11-09 01:27:11 +00001749 continue;
Dan Gohman46520a22008-06-21 19:18:17 +00001750 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
Evan Chengf24d15f2006-11-06 21:33:46 +00001751 if (!DUSU) continue;
Dan Gohman46520a22008-06-21 19:18:17 +00001752 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1753 E = DUSU->Succs.end(); I != E; ++I) {
Evan Cheng0effc3a2007-09-19 01:38:40 +00001754 if (I->isCtrl) continue;
1755 SUnit *SuccSU = I->Dep;
Evan Chengf9891412007-12-20 09:25:31 +00001756 if (SuccSU == SU)
Evan Cheng5924bf72007-09-25 01:54:36 +00001757 continue;
Evan Cheng2dbffa42007-11-06 08:44:59 +00001758 // Be conservative. Ignore if nodes aren't at roughly the same
1759 // depth and height.
1760 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
1761 continue;
Dan Gohman17059682008-07-17 19:10:17 +00001762 if (!SuccSU->Node || !SuccSU->Node->isMachineOpcode())
Evan Chengaa2d6ef2007-10-12 08:50:34 +00001763 continue;
Evan Chengf9891412007-12-20 09:25:31 +00001764 // Don't constrain nodes with physical register defs if the
Dan Gohmancf8827a2008-01-29 12:43:50 +00001765 // predecessor can clobber them.
Evan Chengf9891412007-12-20 09:25:31 +00001766 if (SuccSU->hasPhysRegDefs) {
Dan Gohman3a4be0f2008-02-10 18:45:23 +00001767 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
Evan Chengf9891412007-12-20 09:25:31 +00001768 continue;
1769 }
Evan Chengaa2d6ef2007-10-12 08:50:34 +00001770 // Don't constraint extract_subreg / insert_subreg these may be
1771 // coalesced away. We don't them close to their uses.
Dan Gohman17059682008-07-17 19:10:17 +00001772 unsigned SuccOpc = SuccSU->Node->getMachineOpcode();
Evan Chengaa2d6ef2007-10-12 08:50:34 +00001773 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1774 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1775 continue;
Evan Cheng5924bf72007-09-25 01:54:36 +00001776 if ((!canClobber(SuccSU, DUSU) ||
Evan Chenga5e595d2007-09-28 22:32:30 +00001777 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
Evan Cheng5924bf72007-09-25 01:54:36 +00001778 (!SU->isCommutable && SuccSU->isCommutable)) &&
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001779 !scheduleDAG->IsReachable(SuccSU, SU)) {
Evan Cheng5924bf72007-09-25 01:54:36 +00001780 DOUT << "Adding an edge from SU # " << SU->NodeNum
1781 << " to SU #" << SuccSU->NodeNum << "\n";
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001782 scheduleDAG->AddPred(SU, SuccSU, true, true);
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001783 }
1784 }
1785 }
1786 }
1787 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001788}
1789
Evan Cheng6730f032007-01-08 23:55:53 +00001790/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1791/// scheduling units.
Dan Gohman4b49be12008-06-21 01:08:22 +00001792void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001793 SethiUllmanNumbers.assign(SUnits->size(), 0);
1794
1795 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng7e4abde2008-07-02 09:23:51 +00001796 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1797}
1798void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() {
1799 SethiUllmanNumbers.assign(SUnits->size(), 0);
1800
1801 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1802 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
Evan Chengd38c22b2006-05-11 23:55:42 +00001803}
1804
Roman Levenstein30d09512008-03-27 09:44:37 +00001805/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
Roman Levensteinbc674502008-03-27 09:14:57 +00001806/// predecessors of the successors of the SUnit SU. Stop when the provided
1807/// limit is exceeded.
Roman Levensteinbc674502008-03-27 09:14:57 +00001808static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1809 unsigned Limit) {
1810 unsigned Sum = 0;
1811 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1812 I != E; ++I) {
Dan Gohmane955c482008-08-05 14:45:15 +00001813 const SUnit *SuccSU = I->Dep;
Roman Levensteinbc674502008-03-27 09:14:57 +00001814 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1815 EE = SuccSU->Preds.end(); II != EE; ++II) {
1816 SUnit *PredSU = II->Dep;
Evan Cheng16d72072008-03-29 18:34:22 +00001817 if (!PredSU->isScheduled)
1818 if (++Sum > Limit)
1819 return Sum;
Roman Levensteinbc674502008-03-27 09:14:57 +00001820 }
1821 }
1822 return Sum;
1823}
1824
Evan Chengd38c22b2006-05-11 23:55:42 +00001825
1826// Top down
1827bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00001828 unsigned LPriority = SPQ->getNodePriority(left);
1829 unsigned RPriority = SPQ->getNodePriority(right);
Dan Gohman17059682008-07-17 19:10:17 +00001830 bool LIsTarget = left->Node && left->Node->isMachineOpcode();
1831 bool RIsTarget = right->Node && right->Node->isMachineOpcode();
Evan Chengd38c22b2006-05-11 23:55:42 +00001832 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1833 bool RIsFloater = RIsTarget && right->NumPreds == 0;
Roman Levensteinbc674502008-03-27 09:14:57 +00001834 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1835 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00001836
1837 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1838 return false;
1839 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1840 return true;
1841
Evan Chengd38c22b2006-05-11 23:55:42 +00001842 if (LIsFloater)
1843 LBonus -= 2;
1844 if (RIsFloater)
1845 RBonus -= 2;
1846 if (left->NumSuccs == 1)
1847 LBonus += 2;
1848 if (right->NumSuccs == 1)
1849 RBonus += 2;
1850
Evan Cheng73bdf042008-03-01 00:39:47 +00001851 if (LPriority+LBonus != RPriority+RBonus)
1852 return LPriority+LBonus < RPriority+RBonus;
Anton Korobeynikov035eaac2008-02-20 11:10:28 +00001853
Evan Cheng73bdf042008-03-01 00:39:47 +00001854 if (left->Depth != right->Depth)
1855 return left->Depth < right->Depth;
1856
1857 if (left->NumSuccsLeft != right->NumSuccsLeft)
1858 return left->NumSuccsLeft > right->NumSuccsLeft;
1859
1860 if (left->CycleBound != right->CycleBound)
1861 return left->CycleBound > right->CycleBound;
1862
Roman Levenstein6b371142008-04-29 09:07:59 +00001863 assert(left->NodeQueueId && right->NodeQueueId &&
1864 "NodeQueueId cannot be zero");
1865 return (left->NodeQueueId > right->NodeQueueId);
Evan Chengd38c22b2006-05-11 23:55:42 +00001866}
1867
Evan Cheng6730f032007-01-08 23:55:53 +00001868/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1869/// scheduling units.
Dan Gohman4b49be12008-06-21 01:08:22 +00001870void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
Evan Chengd38c22b2006-05-11 23:55:42 +00001871 SethiUllmanNumbers.assign(SUnits->size(), 0);
1872
1873 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
Evan Cheng7e4abde2008-07-02 09:23:51 +00001874 CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
Evan Chengd38c22b2006-05-11 23:55:42 +00001875}
1876
1877//===----------------------------------------------------------------------===//
1878// Public Constructor Functions
1879//===----------------------------------------------------------------------===//
1880
Jim Laskey03593f72006-08-01 18:29:48 +00001881llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1882 SelectionDAG *DAG,
Evan Cheng2c977312008-07-01 18:05:03 +00001883 MachineBasicBlock *BB,
1884 bool Fast) {
Evan Cheng7e4abde2008-07-02 09:23:51 +00001885 if (Fast)
1886 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, true,
1887 new BURegReductionFastPriorityQueue());
1888
Evan Chengfd2c5dd2006-11-04 09:44:31 +00001889 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
Dan Gohman3a4be0f2008-02-10 18:45:23 +00001890 const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001891
Evan Cheng7e4abde2008-07-02 09:23:51 +00001892 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +00001893
Evan Cheng7e4abde2008-07-02 09:23:51 +00001894 ScheduleDAGRRList *SD =
1895 new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(),true,false, PQ);
1896 PQ->setScheduleDAG(SD);
1897 return SD;
Evan Chengd38c22b2006-05-11 23:55:42 +00001898}
1899
Jim Laskey03593f72006-08-01 18:29:48 +00001900llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1901 SelectionDAG *DAG,
Evan Cheng2c977312008-07-01 18:05:03 +00001902 MachineBasicBlock *BB,
1903 bool Fast) {
1904 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, Fast,
Evan Cheng7e4abde2008-07-02 09:23:51 +00001905 new TDRegReductionPriorityQueue());
Evan Chengd38c22b2006-05-11 23:55:42 +00001906}