blob: e4d8a0c799b399ee705920873adb7fd9e5731db4 [file] [log] [blame]
Dan Gohman23785a12008-08-12 17:42:33 +00001//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
Evan Chengd38c22b2006-05-11 23:55:42 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Evan Chengd38c22b2006-05-11 23:55:42 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements bottom-up and top-down register pressure reduction list
11// schedulers, using standard algorithms. The basic approach uses a priority
12// queue of available nodes to schedule. One at a time, nodes are taken from
13// the priority queue (thus in priority order), checked for legality to
14// schedule, and emitted if legal.
15//
16//===----------------------------------------------------------------------===//
17
Dale Johannesen2182f062007-07-13 17:13:54 +000018#define DEBUG_TYPE "pre-RA-sched"
Dan Gohman483377c2009-02-06 17:22:58 +000019#include "ScheduleDAGSDNodes.h"
Chris Lattner3b9f02a2010-04-07 05:20:54 +000020#include "llvm/InlineAsm.h"
Jim Laskey29e635d2006-08-02 12:30:23 +000021#include "llvm/CodeGen/SchedulerRegistry.h"
Dan Gohman619ef482009-01-15 19:20:50 +000022#include "llvm/CodeGen/SelectionDAGISel.h"
Andrew Trick10ffc2b2010-12-24 05:03:26 +000023#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Dan Gohman3a4be0f2008-02-10 18:45:23 +000024#include "llvm/Target/TargetRegisterInfo.h"
Owen Anderson8c2c1e92006-05-12 06:33:49 +000025#include "llvm/Target/TargetData.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000026#include "llvm/Target/TargetMachine.h"
27#include "llvm/Target/TargetInstrInfo.h"
Evan Chenga77f3d32010-07-21 06:09:07 +000028#include "llvm/Target/TargetLowering.h"
Evan Cheng5924bf72007-09-25 01:54:36 +000029#include "llvm/ADT/SmallSet.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000030#include "llvm/ADT/Statistic.h"
Roman Levenstein6b371142008-04-29 09:07:59 +000031#include "llvm/ADT/STLExtras.h"
Chris Lattner3b9f02a2010-04-07 05:20:54 +000032#include "llvm/Support/Debug.h"
33#include "llvm/Support/ErrorHandling.h"
Chris Lattner4dc3edd2009-08-23 06:35:02 +000034#include "llvm/Support/raw_ostream.h"
Evan Chengd38c22b2006-05-11 23:55:42 +000035#include <climits>
Evan Chengd38c22b2006-05-11 23:55:42 +000036using namespace llvm;
37
Dan Gohmanfd227e92008-03-25 17:10:29 +000038STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
Evan Cheng79e97132007-10-05 01:39:18 +000039STATISTIC(NumUnfolds, "Number of nodes unfolded");
Evan Cheng1ec79b42007-09-27 07:09:03 +000040STATISTIC(NumDups, "Number of duplicated nodes");
Evan Chengb2c42c62009-01-12 03:19:55 +000041STATISTIC(NumPRCopies, "Number of physical register copies");
Evan Cheng1ec79b42007-09-27 07:09:03 +000042
Jim Laskey95eda5b2006-08-01 14:21:23 +000043static RegisterScheduler
44 burrListDAGScheduler("list-burr",
Dan Gohman9c4b7d52008-10-14 20:25:08 +000045 "Bottom-up register reduction list scheduling",
Jim Laskey95eda5b2006-08-01 14:21:23 +000046 createBURRListDAGScheduler);
47static RegisterScheduler
48 tdrListrDAGScheduler("list-tdrr",
Dan Gohman9c4b7d52008-10-14 20:25:08 +000049 "Top-down register reduction list scheduling",
Jim Laskey95eda5b2006-08-01 14:21:23 +000050 createTDRRListDAGScheduler);
Bill Wendling8cbc25d2010-01-23 10:26:57 +000051static RegisterScheduler
52 sourceListDAGScheduler("source",
53 "Similar to list-burr but schedules in source "
54 "order when possible",
55 createSourceListDAGScheduler);
Jim Laskey95eda5b2006-08-01 14:21:23 +000056
Evan Chengbdd062d2010-05-20 06:13:19 +000057static RegisterScheduler
Evan Cheng725211e2010-05-21 00:42:32 +000058 hybridListDAGScheduler("list-hybrid",
Evan Cheng37b740c2010-07-24 00:39:05 +000059 "Bottom-up register pressure aware list scheduling "
60 "which tries to balance latency and register pressure",
Evan Chengbdd062d2010-05-20 06:13:19 +000061 createHybridListDAGScheduler);
62
Evan Cheng37b740c2010-07-24 00:39:05 +000063static RegisterScheduler
64 ILPListDAGScheduler("list-ilp",
65 "Bottom-up register pressure aware list scheduling "
66 "which tries to balance ILP and register pressure",
67 createILPListDAGScheduler);
68
Andrew Trick47ff14b2011-01-21 05:51:33 +000069static cl::opt<bool> DisableSchedCycles(
Andrew Trickbd428ec2011-01-21 06:19:05 +000070 "disable-sched-cycles", cl::Hidden, cl::init(false),
Andrew Trick47ff14b2011-01-21 05:51:33 +000071 cl::desc("Disable cycle-level precision during preRA scheduling"));
Andrew Trick10ffc2b2010-12-24 05:03:26 +000072
Andrew Trick641e2d42011-03-05 08:00:22 +000073// Temporary sched=list-ilp flags until the heuristics are robust.
74static cl::opt<bool> DisableSchedRegPressure(
75 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
76 cl::desc("Disable regpressure priority in sched=list-ilp"));
77static cl::opt<bool> DisableSchedLiveUses(
Andrew Trickdd017322011-03-06 00:03:32 +000078 "disable-sched-live-uses", cl::Hidden, cl::init(true),
Andrew Trick641e2d42011-03-05 08:00:22 +000079 cl::desc("Disable live use priority in sched=list-ilp"));
80static cl::opt<bool> DisableSchedStalls(
Andrew Trickdd017322011-03-06 00:03:32 +000081 "disable-sched-stalls", cl::Hidden, cl::init(true),
Andrew Trick641e2d42011-03-05 08:00:22 +000082 cl::desc("Disable no-stall priority in sched=list-ilp"));
83static cl::opt<bool> DisableSchedCriticalPath(
84 "disable-sched-critical-path", cl::Hidden, cl::init(false),
85 cl::desc("Disable critical path priority in sched=list-ilp"));
86static cl::opt<bool> DisableSchedHeight(
87 "disable-sched-height", cl::Hidden, cl::init(false),
88 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
89
90static cl::opt<int> MaxReorderWindow(
91 "max-sched-reorder", cl::Hidden, cl::init(6),
92 cl::desc("Number of instructions to allow ahead of the critical path "
93 "in sched=list-ilp"));
94
95static cl::opt<unsigned> AvgIPC(
96 "sched-avg-ipc", cl::Hidden, cl::init(1),
97 cl::desc("Average inst/cycle whan no target itinerary exists."));
98
99#ifndef NDEBUG
100namespace {
101 // For sched=list-ilp, Count the number of times each factor comes into play.
102 enum { FactPressureDiff, FactRegUses, FactHeight, FactDepth, FactUllman,
103 NumFactors };
104}
105static const char *FactorName[NumFactors] =
106{"PressureDiff", "RegUses", "Height", "Depth","Ullman"};
107static int FactorCount[NumFactors];
108#endif //!NDEBUG
109
Evan Chengd38c22b2006-05-11 23:55:42 +0000110namespace {
Evan Chengd38c22b2006-05-11 23:55:42 +0000111//===----------------------------------------------------------------------===//
112/// ScheduleDAGRRList - The actual register reduction list scheduler
113/// implementation. This supports both top-down and bottom-up scheduling.
114///
Nick Lewycky02d5f772009-10-25 06:33:48 +0000115class ScheduleDAGRRList : public ScheduleDAGSDNodes {
Evan Chengd38c22b2006-05-11 23:55:42 +0000116private:
117 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
118 /// it is top-down.
119 bool isBottomUp;
Evan Cheng2c977312008-07-01 18:05:03 +0000120
Evan Chengbdd062d2010-05-20 06:13:19 +0000121 /// NeedLatency - True if the scheduler will make use of latency information.
122 ///
123 bool NeedLatency;
124
Evan Chengd38c22b2006-05-11 23:55:42 +0000125 /// AvailableQueue - The priority queue to use for the available SUnits.
Evan Chengd38c22b2006-05-11 23:55:42 +0000126 SchedulingPriorityQueue *AvailableQueue;
127
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000128 /// PendingQueue - This contains all of the instructions whose operands have
129 /// been issued, but their results are not ready yet (due to the latency of
130 /// the operation). Once the operands becomes available, the instruction is
131 /// added to the AvailableQueue.
132 std::vector<SUnit*> PendingQueue;
133
134 /// HazardRec - The hazard recognizer to use.
135 ScheduleHazardRecognizer *HazardRec;
136
Andrew Trick528fad92010-12-23 05:42:20 +0000137 /// CurCycle - The current scheduler state corresponds to this cycle.
138 unsigned CurCycle;
139
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000140 /// MinAvailableCycle - Cycle of the soonest available instruction.
141 unsigned MinAvailableCycle;
142
Andrew Trick641e2d42011-03-05 08:00:22 +0000143 /// IssueCount - Count instructions issued in this cycle
144 /// Currently valid only for bottom-up scheduling.
145 unsigned IssueCount;
146
Dan Gohmanc07f6862008-09-23 18:50:48 +0000147 /// LiveRegDefs - A set of physical registers and their definition
Evan Cheng5924bf72007-09-25 01:54:36 +0000148 /// that are "live". These nodes must be scheduled before any other nodes that
149 /// modifies the registers can be scheduled.
Dan Gohmanc07f6862008-09-23 18:50:48 +0000150 unsigned NumLiveRegs;
Evan Cheng5924bf72007-09-25 01:54:36 +0000151 std::vector<SUnit*> LiveRegDefs;
Andrew Tricka52f3252010-12-23 04:16:14 +0000152 std::vector<SUnit*> LiveRegGens;
Evan Cheng5924bf72007-09-25 01:54:36 +0000153
Dan Gohmanad2134d2008-11-25 00:52:40 +0000154 /// Topo - A topological ordering for SUnits which permits fast IsReachable
155 /// and similar queries.
156 ScheduleDAGTopologicalSort Topo;
157
Evan Chengd38c22b2006-05-11 23:55:42 +0000158public:
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000159 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
160 SchedulingPriorityQueue *availqueue,
161 CodeGenOpt::Level OptLevel)
162 : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
163 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
164 Topo(SUnits) {
165
166 const TargetMachine &tm = mf.getTarget();
Andrew Trick47ff14b2011-01-21 05:51:33 +0000167 if (DisableSchedCycles || !NeedLatency)
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000168 HazardRec = new ScheduleHazardRecognizer();
Andrew Trick47ff14b2011-01-21 05:51:33 +0000169 else
170 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000171 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000172
173 ~ScheduleDAGRRList() {
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000174 delete HazardRec;
Evan Chengd38c22b2006-05-11 23:55:42 +0000175 delete AvailableQueue;
176 }
177
178 void Schedule();
179
Andrew Trick9ccce772011-01-14 21:11:41 +0000180 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
181
Roman Levenstein733a4d62008-03-26 11:23:38 +0000182 /// IsReachable - Checks if SU is reachable from TargetSU.
Dan Gohmanad2134d2008-11-25 00:52:40 +0000183 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
184 return Topo.IsReachable(SU, TargetSU);
185 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000186
Dan Gohman60d68442009-01-29 19:49:27 +0000187 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000188 /// create a cycle.
Dan Gohmanad2134d2008-11-25 00:52:40 +0000189 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
190 return Topo.WillCreateCycle(SU, TargetSU);
191 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000192
Dan Gohman2d170892008-12-09 22:54:47 +0000193 /// AddPred - adds a predecessor edge to SUnit SU.
Roman Levenstein733a4d62008-03-26 11:23:38 +0000194 /// This returns true if this is a new predecessor.
195 /// Updates the topological ordering if required.
Dan Gohman17214e62008-12-16 01:00:55 +0000196 void AddPred(SUnit *SU, const SDep &D) {
Dan Gohman2d170892008-12-09 22:54:47 +0000197 Topo.AddPred(SU, D.getSUnit());
Dan Gohman17214e62008-12-16 01:00:55 +0000198 SU->addPred(D);
Dan Gohmanad2134d2008-11-25 00:52:40 +0000199 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000200
Dan Gohman2d170892008-12-09 22:54:47 +0000201 /// RemovePred - removes a predecessor edge from SUnit SU.
202 /// This returns true if an edge was removed.
203 /// Updates the topological ordering if required.
Dan Gohman17214e62008-12-16 01:00:55 +0000204 void RemovePred(SUnit *SU, const SDep &D) {
Dan Gohman2d170892008-12-09 22:54:47 +0000205 Topo.RemovePred(SU, D.getSUnit());
Dan Gohman17214e62008-12-16 01:00:55 +0000206 SU->removePred(D);
Dan Gohmanad2134d2008-11-25 00:52:40 +0000207 }
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000208
Evan Chengd38c22b2006-05-11 23:55:42 +0000209private:
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000210 bool isReady(SUnit *SU) {
Andrew Trick47ff14b2011-01-21 05:51:33 +0000211 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000212 AvailableQueue->isReady(SU);
213 }
214
Dan Gohman60d68442009-01-29 19:49:27 +0000215 void ReleasePred(SUnit *SU, const SDep *PredEdge);
Andrew Tricka52f3252010-12-23 04:16:14 +0000216 void ReleasePredecessors(SUnit *SU);
Dan Gohman60d68442009-01-29 19:49:27 +0000217 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
Dan Gohmanb9543432009-02-10 23:27:53 +0000218 void ReleaseSuccessors(SUnit *SU);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000219 void ReleasePending();
220 void AdvanceToCycle(unsigned NextCycle);
221 void AdvancePastStalls(SUnit *SU);
222 void EmitNode(SUnit *SU);
Andrew Trick528fad92010-12-23 05:42:20 +0000223 void ScheduleNodeBottomUp(SUnit*);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000224 void CapturePred(SDep *PredEdge);
Evan Cheng8e136a92007-09-26 21:36:17 +0000225 void UnscheduleNodeBottomUp(SUnit*);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000226 void RestoreHazardCheckerBottomUp();
227 void BacktrackBottomUp(SUnit*, SUnit*);
Evan Cheng8e136a92007-09-26 21:36:17 +0000228 SUnit *CopyAndMoveSuccessors(SUnit*);
Evan Chengb2c42c62009-01-12 03:19:55 +0000229 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
230 const TargetRegisterClass*,
231 const TargetRegisterClass*,
232 SmallVector<SUnit*, 2>&);
Evan Cheng1ec79b42007-09-27 07:09:03 +0000233 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000234
Andrew Trick528fad92010-12-23 05:42:20 +0000235 SUnit *PickNodeToScheduleBottomUp();
Evan Chengd38c22b2006-05-11 23:55:42 +0000236 void ListScheduleBottomUp();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000237
Andrew Trick528fad92010-12-23 05:42:20 +0000238 void ScheduleNodeTopDown(SUnit*);
239 void ListScheduleTopDown();
240
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000241
242 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
Roman Levenstein733a4d62008-03-26 11:23:38 +0000243 /// Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000244 SUnit *CreateNewSUnit(SDNode *N) {
Dan Gohmanad2134d2008-11-25 00:52:40 +0000245 unsigned NumSUnits = SUnits.size();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000246 SUnit *NewNode = NewSUnit(N);
Roman Levenstein733a4d62008-03-26 11:23:38 +0000247 // Update the topological ordering.
Dan Gohmanad2134d2008-11-25 00:52:40 +0000248 if (NewNode->NodeNum >= NumSUnits)
249 Topo.InitDAGTopologicalSorting();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000250 return NewNode;
251 }
252
Roman Levenstein733a4d62008-03-26 11:23:38 +0000253 /// CreateClone - Creates a new SUnit from an existing one.
254 /// Updates the topological ordering if required.
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000255 SUnit *CreateClone(SUnit *N) {
Dan Gohmanad2134d2008-11-25 00:52:40 +0000256 unsigned NumSUnits = SUnits.size();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000257 SUnit *NewNode = Clone(N);
Roman Levenstein733a4d62008-03-26 11:23:38 +0000258 // Update the topological ordering.
Dan Gohmanad2134d2008-11-25 00:52:40 +0000259 if (NewNode->NodeNum >= NumSUnits)
260 Topo.InitDAGTopologicalSorting();
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000261 return NewNode;
262 }
Dan Gohmandddc1ac2008-12-16 03:25:46 +0000263
Evan Chengbdd062d2010-05-20 06:13:19 +0000264 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
265 /// need actual latency information but the hybrid scheduler does.
266 bool ForceUnitLatencies() const {
267 return !NeedLatency;
268 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000269};
270} // end anonymous namespace
271
272
273/// Schedule - Schedule the DAG using list scheduling.
274void ScheduleDAGRRList::Schedule() {
Evan Chenga77f3d32010-07-21 06:09:07 +0000275 DEBUG(dbgs()
276 << "********** List Scheduling BB#" << BB->getNumber()
Evan Cheng6c1414f2010-10-29 18:09:28 +0000277 << " '" << BB->getName() << "' **********\n");
Andrew Trick641e2d42011-03-05 08:00:22 +0000278#ifndef NDEBUG
279 for (int i = 0; i < NumFactors; ++i) {
280 FactorCount[i] = 0;
281 }
282#endif //!NDEBUG
Evan Cheng5924bf72007-09-25 01:54:36 +0000283
Andrew Trick528fad92010-12-23 05:42:20 +0000284 CurCycle = 0;
Andrew Trick641e2d42011-03-05 08:00:22 +0000285 IssueCount = 0;
Andrew Trick47ff14b2011-01-21 05:51:33 +0000286 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
Dan Gohmanc07f6862008-09-23 18:50:48 +0000287 NumLiveRegs = 0;
Andrew Trick2085a962010-12-21 22:25:04 +0000288 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
Andrew Tricka52f3252010-12-23 04:16:14 +0000289 LiveRegGens.resize(TRI->getNumRegs(), NULL);
Evan Cheng5924bf72007-09-25 01:54:36 +0000290
Dan Gohman04543e72008-12-23 18:36:58 +0000291 // Build the scheduling graph.
Dan Gohman918ec532009-10-09 23:33:48 +0000292 BuildSchedGraph(NULL);
Evan Chengd38c22b2006-05-11 23:55:42 +0000293
Evan Chengd38c22b2006-05-11 23:55:42 +0000294 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
Dan Gohman22d07b12008-11-18 02:06:40 +0000295 SUnits[su].dumpAll(this));
Dan Gohmanad2134d2008-11-25 00:52:40 +0000296 Topo.InitDAGTopologicalSorting();
Evan Chengd38c22b2006-05-11 23:55:42 +0000297
Dan Gohman46520a22008-06-21 19:18:17 +0000298 AvailableQueue->initNodes(SUnits);
Andrew Trick2085a962010-12-21 22:25:04 +0000299
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000300 HazardRec->Reset();
301
Evan Chengd38c22b2006-05-11 23:55:42 +0000302 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
303 if (isBottomUp)
304 ListScheduleBottomUp();
305 else
306 ListScheduleTopDown();
Andrew Trick2085a962010-12-21 22:25:04 +0000307
Andrew Trick641e2d42011-03-05 08:00:22 +0000308#ifndef NDEBUG
309 for (int i = 0; i < NumFactors; ++i) {
310 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
311 }
312#endif // !NDEBUG
Evan Chengd38c22b2006-05-11 23:55:42 +0000313 AvailableQueue->releaseState();
Evan Chengafed73e2006-05-12 01:58:24 +0000314}
Evan Chengd38c22b2006-05-11 23:55:42 +0000315
316//===----------------------------------------------------------------------===//
317// Bottom-Up Scheduling
318//===----------------------------------------------------------------------===//
319
Evan Chengd38c22b2006-05-11 23:55:42 +0000320/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +0000321/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Dan Gohman60d68442009-01-29 19:49:27 +0000322void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
Dan Gohman2d170892008-12-09 22:54:47 +0000323 SUnit *PredSU = PredEdge->getSUnit();
Reid Klecknercea8dab2009-09-30 20:43:07 +0000324
Evan Chengd38c22b2006-05-11 23:55:42 +0000325#ifndef NDEBUG
Reid Klecknercea8dab2009-09-30 20:43:07 +0000326 if (PredSU->NumSuccsLeft == 0) {
David Greenef34d7ac2010-01-05 01:24:54 +0000327 dbgs() << "*** Scheduling failed! ***\n";
Dan Gohman22d07b12008-11-18 02:06:40 +0000328 PredSU->dump(this);
David Greenef34d7ac2010-01-05 01:24:54 +0000329 dbgs() << " has been released too many times!\n";
Torok Edwinfbcc6632009-07-14 16:55:14 +0000330 llvm_unreachable(0);
Evan Chengd38c22b2006-05-11 23:55:42 +0000331 }
332#endif
Reid Klecknercea8dab2009-09-30 20:43:07 +0000333 --PredSU->NumSuccsLeft;
334
Evan Chengbdd062d2010-05-20 06:13:19 +0000335 if (!ForceUnitLatencies()) {
336 // Updating predecessor's height. This is now the cycle when the
337 // predecessor can be scheduled without causing a pipeline stall.
338 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
339 }
340
Dan Gohmanb9543432009-02-10 23:27:53 +0000341 // If all the node's successors are scheduled, this node is ready
342 // to be scheduled. Ignore the special EntrySU node.
343 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
Dan Gohman4370f262008-04-15 01:22:18 +0000344 PredSU->isAvailable = true;
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000345
346 unsigned Height = PredSU->getHeight();
347 if (Height < MinAvailableCycle)
348 MinAvailableCycle = Height;
349
Andrew Trickc88b7ec2011-03-04 02:03:45 +0000350 if (isReady(PredSU)) {
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000351 AvailableQueue->push(PredSU);
352 }
353 // CapturePred and others may have left the node in the pending queue, avoid
354 // adding it twice.
355 else if (!PredSU->isPending) {
356 PredSU->isPending = true;
357 PendingQueue.push_back(PredSU);
358 }
Evan Chengd38c22b2006-05-11 23:55:42 +0000359 }
360}
361
Andrew Trick033efdf2010-12-23 03:15:51 +0000362/// Call ReleasePred for each predecessor, then update register live def/gen.
363/// Always update LiveRegDefs for a register dependence even if the current SU
364/// also defines the register. This effectively create one large live range
365/// across a sequence of two-address node. This is important because the
366/// entire chain must be scheduled together. Example:
367///
368/// flags = (3) add
369/// flags = (2) addc flags
370/// flags = (1) addc flags
371///
372/// results in
373///
374/// LiveRegDefs[flags] = 3
Andrew Tricka52f3252010-12-23 04:16:14 +0000375/// LiveRegGens[flags] = 1
Andrew Trick033efdf2010-12-23 03:15:51 +0000376///
377/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
378/// interference on flags.
Andrew Tricka52f3252010-12-23 04:16:14 +0000379void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
Evan Chengd38c22b2006-05-11 23:55:42 +0000380 // Bottom up: release predecessors
Chris Lattnerd86418a2006-08-17 00:09:56 +0000381 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Cheng5924bf72007-09-25 01:54:36 +0000382 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000383 ReleasePred(SU, &*I);
384 if (I->isAssignedRegDep()) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000385 // This is a physical register dependency and it's impossible or
Andrew Trick2085a962010-12-21 22:25:04 +0000386 // expensive to copy the register. Make sure nothing that can
Evan Cheng5924bf72007-09-25 01:54:36 +0000387 // clobber the register is scheduled between the predecessor and
388 // this node.
Andrew Tricka52f3252010-12-23 04:16:14 +0000389 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
Andrew Trick033efdf2010-12-23 03:15:51 +0000390 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
391 "interference on register dependence");
Andrew Tricka52f3252010-12-23 04:16:14 +0000392 LiveRegDefs[I->getReg()] = I->getSUnit();
393 if (!LiveRegGens[I->getReg()]) {
Dan Gohmanc07f6862008-09-23 18:50:48 +0000394 ++NumLiveRegs;
Andrew Tricka52f3252010-12-23 04:16:14 +0000395 LiveRegGens[I->getReg()] = SU;
Evan Cheng5924bf72007-09-25 01:54:36 +0000396 }
397 }
398 }
Dan Gohmanb9543432009-02-10 23:27:53 +0000399}
400
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000401/// Check to see if any of the pending instructions are ready to issue. If
402/// so, add them to the available queue.
403void ScheduleDAGRRList::ReleasePending() {
Andrew Trick47ff14b2011-01-21 05:51:33 +0000404 if (DisableSchedCycles) {
Andrew Trick5ce945c2010-12-24 07:10:19 +0000405 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
406 return;
407 }
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000408
409 // If the available queue is empty, it is safe to reset MinAvailableCycle.
410 if (AvailableQueue->empty())
411 MinAvailableCycle = UINT_MAX;
412
413 // Check to see if any of the pending instructions are ready to issue. If
414 // so, add them to the available queue.
415 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
416 unsigned ReadyCycle =
417 isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
418 if (ReadyCycle < MinAvailableCycle)
419 MinAvailableCycle = ReadyCycle;
420
421 if (PendingQueue[i]->isAvailable) {
422 if (!isReady(PendingQueue[i]))
423 continue;
424 AvailableQueue->push(PendingQueue[i]);
425 }
426 PendingQueue[i]->isPending = false;
427 PendingQueue[i] = PendingQueue.back();
428 PendingQueue.pop_back();
429 --i; --e;
430 }
431}
432
433/// Move the scheduler state forward by the specified number of Cycles.
434void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
435 if (NextCycle <= CurCycle)
436 return;
437
Andrew Trick641e2d42011-03-05 08:00:22 +0000438 IssueCount = 0;
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000439 AvailableQueue->setCurCycle(NextCycle);
Andrew Trick47ff14b2011-01-21 05:51:33 +0000440 if (!HazardRec->isEnabled()) {
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000441 // Bypass lots of virtual calls in case of long latency.
442 CurCycle = NextCycle;
443 }
444 else {
445 for (; CurCycle != NextCycle; ++CurCycle) {
446 if (isBottomUp)
447 HazardRec->RecedeCycle();
448 else
449 HazardRec->AdvanceCycle();
450 }
451 }
452 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
453 // available Q to release pending nodes at least once before popping.
454 ReleasePending();
455}
456
457/// Move the scheduler state forward until the specified node's dependents are
458/// ready and can be scheduled with no resource conflicts.
459void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
Andrew Trick47ff14b2011-01-21 05:51:33 +0000460 if (DisableSchedCycles)
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000461 return;
462
463 unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
464
465 // Bump CurCycle to account for latency. We assume the latency of other
466 // available instructions may be hidden by the stall (not a full pipe stall).
467 // This updates the hazard recognizer's cycle before reserving resources for
468 // this instruction.
469 AdvanceToCycle(ReadyCycle);
470
471 // Calls are scheduled in their preceding cycle, so don't conflict with
472 // hazards from instructions after the call. EmitNode will reset the
473 // scoreboard state before emitting the call.
474 if (isBottomUp && SU->isCall)
475 return;
476
477 // FIXME: For resource conflicts in very long non-pipelined stages, we
478 // should probably skip ahead here to avoid useless scoreboard checks.
479 int Stalls = 0;
480 while (true) {
481 ScheduleHazardRecognizer::HazardType HT =
482 HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
483
484 if (HT == ScheduleHazardRecognizer::NoHazard)
485 break;
486
487 ++Stalls;
488 }
489 AdvanceToCycle(CurCycle + Stalls);
490}
491
492/// Record this SUnit in the HazardRecognizer.
493/// Does not update CurCycle.
494void ScheduleDAGRRList::EmitNode(SUnit *SU) {
Andrew Trick47ff14b2011-01-21 05:51:33 +0000495 if (!HazardRec->isEnabled())
Andrew Trickc9405662010-12-24 06:46:50 +0000496 return;
497
498 // Check for phys reg copy.
499 if (!SU->getNode())
500 return;
501
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000502 switch (SU->getNode()->getOpcode()) {
503 default:
504 assert(SU->getNode()->isMachineOpcode() &&
505 "This target-independent node should not be scheduled.");
506 break;
507 case ISD::MERGE_VALUES:
508 case ISD::TokenFactor:
509 case ISD::CopyToReg:
510 case ISD::CopyFromReg:
511 case ISD::EH_LABEL:
512 // Noops don't affect the scoreboard state. Copies are likely to be
513 // removed.
514 return;
515 case ISD::INLINEASM:
516 // For inline asm, clear the pipeline state.
517 HazardRec->Reset();
518 return;
519 }
520 if (isBottomUp && SU->isCall) {
521 // Calls are scheduled with their preceding instructions. For bottom-up
522 // scheduling, clear the pipeline state before emitting.
523 HazardRec->Reset();
524 }
525
526 HazardRec->EmitInstruction(SU);
527
528 if (!isBottomUp && SU->isCall) {
529 HazardRec->Reset();
530 }
531}
532
Dan Gohmanb9543432009-02-10 23:27:53 +0000533/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
534/// count of its predecessors. If a predecessor pending count is zero, add it to
535/// the Available queue.
Andrew Trick528fad92010-12-23 05:42:20 +0000536void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
Evan Chengbdd062d2010-05-20 06:13:19 +0000537 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
Dan Gohmanb9543432009-02-10 23:27:53 +0000538 DEBUG(SU->dump(this));
539
Evan Chengbdd062d2010-05-20 06:13:19 +0000540#ifndef NDEBUG
541 if (CurCycle < SU->getHeight())
542 DEBUG(dbgs() << " Height [" << SU->getHeight() << "] pipeline stall!\n");
543#endif
544
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000545 // FIXME: Do not modify node height. It may interfere with
546 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
547 // node it's ready cycle can aid heuristics, and after scheduling it can
548 // indicate the scheduled cycle.
Dan Gohmanb9543432009-02-10 23:27:53 +0000549 SU->setHeightToAtLeast(CurCycle);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000550
551 // Reserve resources for the scheduled intruction.
552 EmitNode(SU);
553
Dan Gohmanb9543432009-02-10 23:27:53 +0000554 Sequence.push_back(SU);
555
Evan Cheng28590382010-07-21 23:53:58 +0000556 AvailableQueue->ScheduledNode(SU);
Chris Lattner981afd22010-12-20 00:55:43 +0000557
Andrew Trick641e2d42011-03-05 08:00:22 +0000558 // If HazardRec is disabled, and each inst counts as one cycle, then
559 // advance CurCycle before ReleasePredecessors to avoid useles pushed to
Andrew Trickc88b7ec2011-03-04 02:03:45 +0000560 // PendingQueue for schedulers that implement HasReadyFilter.
Andrew Trick641e2d42011-03-05 08:00:22 +0000561 if (!HazardRec->isEnabled() && AvgIPC < 2)
Andrew Trickc88b7ec2011-03-04 02:03:45 +0000562 AdvanceToCycle(CurCycle + 1);
563
Andrew Trick033efdf2010-12-23 03:15:51 +0000564 // Update liveness of predecessors before successors to avoid treating a
565 // two-address node as a live range def.
Andrew Tricka52f3252010-12-23 04:16:14 +0000566 ReleasePredecessors(SU);
Evan Cheng5924bf72007-09-25 01:54:36 +0000567
568 // Release all the implicit physical register defs that are live.
569 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
570 I != E; ++I) {
Andrew Trick033efdf2010-12-23 03:15:51 +0000571 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
572 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
573 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
574 --NumLiveRegs;
575 LiveRegDefs[I->getReg()] = NULL;
Andrew Tricka52f3252010-12-23 04:16:14 +0000576 LiveRegGens[I->getReg()] = NULL;
Evan Cheng5924bf72007-09-25 01:54:36 +0000577 }
578 }
579
Evan Chengd38c22b2006-05-11 23:55:42 +0000580 SU->isScheduled = true;
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000581
582 // Conditions under which the scheduler should eagerly advance the cycle:
583 // (1) No available instructions
584 // (2) All pipelines full, so available instructions must have hazards.
585 //
Andrew Trickc88b7ec2011-03-04 02:03:45 +0000586 // If HazardRec is disabled, the cycle was advanced earlier.
587 //
588 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
Andrew Trick641e2d42011-03-05 08:00:22 +0000589 ++IssueCount;
Andrew Trickc88b7ec2011-03-04 02:03:45 +0000590 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
Andrew Trick641e2d42011-03-05 08:00:22 +0000591 || (!HazardRec->isEnabled() && AvgIPC > 1 && IssueCount == AvgIPC)
Andrew Trick47ff14b2011-01-21 05:51:33 +0000592 || AvailableQueue->empty())
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000593 AdvanceToCycle(CurCycle + 1);
Evan Chengd38c22b2006-05-11 23:55:42 +0000594}
595
Evan Cheng5924bf72007-09-25 01:54:36 +0000596/// CapturePred - This does the opposite of ReleasePred. Since SU is being
597/// unscheduled, incrcease the succ left count of its predecessors. Remove
598/// them from AvailableQueue if necessary.
Andrew Trick2085a962010-12-21 22:25:04 +0000599void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
Dan Gohman2d170892008-12-09 22:54:47 +0000600 SUnit *PredSU = PredEdge->getSUnit();
Evan Cheng5924bf72007-09-25 01:54:36 +0000601 if (PredSU->isAvailable) {
602 PredSU->isAvailable = false;
603 if (!PredSU->isPending)
604 AvailableQueue->remove(PredSU);
605 }
606
Reid Kleckner8ff5c192009-09-30 20:15:38 +0000607 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
Evan Cheng038dcc52007-09-28 19:24:24 +0000608 ++PredSU->NumSuccsLeft;
Evan Cheng5924bf72007-09-25 01:54:36 +0000609}
610
611/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
612/// its predecessor states to reflect the change.
613void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
David Greenef34d7ac2010-01-05 01:24:54 +0000614 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
Dan Gohman22d07b12008-11-18 02:06:40 +0000615 DEBUG(SU->dump(this));
Evan Cheng5924bf72007-09-25 01:54:36 +0000616
Evan Cheng5924bf72007-09-25 01:54:36 +0000617 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
618 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000619 CapturePred(&*I);
Andrew Tricka52f3252010-12-23 04:16:14 +0000620 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
Dan Gohmanc07f6862008-09-23 18:50:48 +0000621 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
Dan Gohman2d170892008-12-09 22:54:47 +0000622 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
Evan Cheng5924bf72007-09-25 01:54:36 +0000623 "Physical register dependency violated?");
Dan Gohmanc07f6862008-09-23 18:50:48 +0000624 --NumLiveRegs;
Dan Gohman2d170892008-12-09 22:54:47 +0000625 LiveRegDefs[I->getReg()] = NULL;
Andrew Tricka52f3252010-12-23 04:16:14 +0000626 LiveRegGens[I->getReg()] = NULL;
Evan Cheng5924bf72007-09-25 01:54:36 +0000627 }
628 }
629
630 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
631 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000632 if (I->isAssignedRegDep()) {
Andrew Trick033efdf2010-12-23 03:15:51 +0000633 // This becomes the nearest def. Note that an earlier def may still be
634 // pending if this is a two-address node.
635 LiveRegDefs[I->getReg()] = SU;
Dan Gohman2d170892008-12-09 22:54:47 +0000636 if (!LiveRegDefs[I->getReg()]) {
Dan Gohmanc07f6862008-09-23 18:50:48 +0000637 ++NumLiveRegs;
Evan Cheng5924bf72007-09-25 01:54:36 +0000638 }
Andrew Tricka52f3252010-12-23 04:16:14 +0000639 if (LiveRegGens[I->getReg()] == NULL ||
640 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
641 LiveRegGens[I->getReg()] = I->getSUnit();
Evan Cheng5924bf72007-09-25 01:54:36 +0000642 }
643 }
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000644 if (SU->getHeight() < MinAvailableCycle)
645 MinAvailableCycle = SU->getHeight();
Evan Cheng5924bf72007-09-25 01:54:36 +0000646
Dan Gohmandddc1ac2008-12-16 03:25:46 +0000647 SU->setHeightDirty();
Evan Cheng5924bf72007-09-25 01:54:36 +0000648 SU->isScheduled = false;
649 SU->isAvailable = true;
Andrew Trick47ff14b2011-01-21 05:51:33 +0000650 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000651 // Don't make available until backtracking is complete.
652 SU->isPending = true;
653 PendingQueue.push_back(SU);
654 }
655 else {
656 AvailableQueue->push(SU);
657 }
Evan Cheng28590382010-07-21 23:53:58 +0000658 AvailableQueue->UnscheduledNode(SU);
Evan Cheng5924bf72007-09-25 01:54:36 +0000659}
660
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000661/// After backtracking, the hazard checker needs to be restored to a state
662/// corresponding the the current cycle.
663void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
664 HazardRec->Reset();
665
666 unsigned LookAhead = std::min((unsigned)Sequence.size(),
667 HazardRec->getMaxLookAhead());
668 if (LookAhead == 0)
669 return;
670
671 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
672 unsigned HazardCycle = (*I)->getHeight();
673 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
674 SUnit *SU = *I;
675 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
676 HazardRec->RecedeCycle();
677 }
678 EmitNode(SU);
679 }
680}
681
Evan Cheng8e136a92007-09-26 21:36:17 +0000682/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
Dan Gohman60d68442009-01-29 19:49:27 +0000683/// BTCycle in order to schedule a specific node.
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000684void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
685 SUnit *OldSU = Sequence.back();
686 while (true) {
Evan Cheng5924bf72007-09-25 01:54:36 +0000687 Sequence.pop_back();
688 if (SU->isSucc(OldSU))
Evan Cheng8e136a92007-09-26 21:36:17 +0000689 // Don't try to remove SU from AvailableQueue.
690 SU->isAvailable = false;
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000691 // FIXME: use ready cycle instead of height
692 CurCycle = OldSU->getHeight();
Evan Cheng5924bf72007-09-25 01:54:36 +0000693 UnscheduleNodeBottomUp(OldSU);
Evan Chengbdd062d2010-05-20 06:13:19 +0000694 AvailableQueue->setCurCycle(CurCycle);
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000695 if (OldSU == BtSU)
696 break;
697 OldSU = Sequence.back();
Evan Cheng5924bf72007-09-25 01:54:36 +0000698 }
699
Dan Gohman60d68442009-01-29 19:49:27 +0000700 assert(!SU->isSucc(OldSU) && "Something is wrong!");
Evan Cheng1ec79b42007-09-27 07:09:03 +0000701
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000702 RestoreHazardCheckerBottomUp();
703
Andrew Trick5ce945c2010-12-24 07:10:19 +0000704 ReleasePending();
Andrew Trick10ffc2b2010-12-24 05:03:26 +0000705
Evan Cheng1ec79b42007-09-27 07:09:03 +0000706 ++NumBacktracks;
Evan Cheng5924bf72007-09-25 01:54:36 +0000707}
708
Evan Cheng3b245872010-02-05 01:27:11 +0000709static bool isOperandOf(const SUnit *SU, SDNode *N) {
710 for (const SDNode *SUNode = SU->getNode(); SUNode;
Chris Lattner11a33812010-12-23 17:24:32 +0000711 SUNode = SUNode->getGluedNode()) {
Evan Cheng3b245872010-02-05 01:27:11 +0000712 if (SUNode->isOperandOf(N))
713 return true;
714 }
715 return false;
716}
717
Evan Cheng5924bf72007-09-25 01:54:36 +0000718/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
719/// successors to the newly created node.
720SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
Dan Gohman1ddfcba2008-11-13 21:36:12 +0000721 SDNode *N = SU->getNode();
Evan Cheng79e97132007-10-05 01:39:18 +0000722 if (!N)
723 return NULL;
724
Andrew Trickc9405662010-12-24 06:46:50 +0000725 if (SU->getNode()->getGluedNode())
726 return NULL;
727
Evan Cheng79e97132007-10-05 01:39:18 +0000728 SUnit *NewSU;
Evan Cheng79e97132007-10-05 01:39:18 +0000729 bool TryUnfold = false;
Evan Cheng84d0ebc2007-10-05 01:42:35 +0000730 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
Owen Anderson53aa7a92009-08-10 22:56:29 +0000731 EVT VT = N->getValueType(i);
Chris Lattner3e5fbd72010-12-21 02:38:05 +0000732 if (VT == MVT::Glue)
Evan Cheng84d0ebc2007-10-05 01:42:35 +0000733 return NULL;
Owen Anderson9f944592009-08-11 20:47:22 +0000734 else if (VT == MVT::Other)
Evan Cheng84d0ebc2007-10-05 01:42:35 +0000735 TryUnfold = true;
736 }
Evan Cheng79e97132007-10-05 01:39:18 +0000737 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000738 const SDValue &Op = N->getOperand(i);
Owen Anderson53aa7a92009-08-10 22:56:29 +0000739 EVT VT = Op.getNode()->getValueType(Op.getResNo());
Chris Lattner3e5fbd72010-12-21 02:38:05 +0000740 if (VT == MVT::Glue)
Evan Cheng79e97132007-10-05 01:39:18 +0000741 return NULL;
Evan Cheng79e97132007-10-05 01:39:18 +0000742 }
743
744 if (TryUnfold) {
Dan Gohmane6e13482008-06-21 15:52:51 +0000745 SmallVector<SDNode*, 2> NewNodes;
Dan Gohman5a390b92008-11-13 21:21:28 +0000746 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
Evan Cheng79e97132007-10-05 01:39:18 +0000747 return NULL;
748
Evan Chengbdd062d2010-05-20 06:13:19 +0000749 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
Evan Cheng79e97132007-10-05 01:39:18 +0000750 assert(NewNodes.size() == 2 && "Expected a load folding node!");
751
752 N = NewNodes[1];
753 SDNode *LoadNode = NewNodes[0];
Evan Cheng79e97132007-10-05 01:39:18 +0000754 unsigned NumVals = N->getNumValues();
Dan Gohman1ddfcba2008-11-13 21:36:12 +0000755 unsigned OldNumVals = SU->getNode()->getNumValues();
Evan Cheng79e97132007-10-05 01:39:18 +0000756 for (unsigned i = 0; i != NumVals; ++i)
Dan Gohman1ddfcba2008-11-13 21:36:12 +0000757 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
758 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
Dan Gohman5a390b92008-11-13 21:21:28 +0000759 SDValue(LoadNode, 1));
Evan Cheng79e97132007-10-05 01:39:18 +0000760
Dan Gohmane52e0892008-11-11 21:34:44 +0000761 // LoadNode may already exist. This can happen when there is another
762 // load from the same location and producing the same type of value
763 // but it has different alignment or volatileness.
764 bool isNewLoad = true;
765 SUnit *LoadSU;
766 if (LoadNode->getNodeId() != -1) {
767 LoadSU = &SUnits[LoadNode->getNodeId()];
768 isNewLoad = false;
769 } else {
770 LoadSU = CreateNewSUnit(LoadNode);
771 LoadNode->setNodeId(LoadSU->NodeNum);
Andrew Trickd0548ae2011-02-04 03:18:17 +0000772
773 InitNumRegDefsLeft(LoadSU);
Dan Gohmane52e0892008-11-11 21:34:44 +0000774 ComputeLatency(LoadSU);
775 }
776
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000777 SUnit *NewSU = CreateNewSUnit(N);
Dan Gohman46520a22008-06-21 19:18:17 +0000778 assert(N->getNodeId() == -1 && "Node already inserted!");
779 N->setNodeId(NewSU->NodeNum);
Andrew Trick2085a962010-12-21 22:25:04 +0000780
Dan Gohman17059682008-07-17 19:10:17 +0000781 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
Dan Gohman856c0122008-02-16 00:25:40 +0000782 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
Chris Lattnerfd2e3382008-01-07 06:47:00 +0000783 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
Evan Cheng79e97132007-10-05 01:39:18 +0000784 NewSU->isTwoAddress = true;
785 break;
786 }
787 }
Chris Lattnerfd2e3382008-01-07 06:47:00 +0000788 if (TID.isCommutable())
Evan Cheng79e97132007-10-05 01:39:18 +0000789 NewSU->isCommutable = true;
Andrew Trickd0548ae2011-02-04 03:18:17 +0000790
791 InitNumRegDefsLeft(NewSU);
Evan Cheng79e97132007-10-05 01:39:18 +0000792 ComputeLatency(NewSU);
793
Dan Gohmaned0e8d42009-03-23 20:20:43 +0000794 // Record all the edges to and from the old SU, by category.
Dan Gohman15af5522009-03-06 02:23:01 +0000795 SmallVector<SDep, 4> ChainPreds;
Evan Cheng79e97132007-10-05 01:39:18 +0000796 SmallVector<SDep, 4> ChainSuccs;
797 SmallVector<SDep, 4> LoadPreds;
798 SmallVector<SDep, 4> NodePreds;
799 SmallVector<SDep, 4> NodeSuccs;
800 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
801 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000802 if (I->isCtrl())
Dan Gohman15af5522009-03-06 02:23:01 +0000803 ChainPreds.push_back(*I);
Evan Cheng3b245872010-02-05 01:27:11 +0000804 else if (isOperandOf(I->getSUnit(), LoadNode))
Dan Gohman2d170892008-12-09 22:54:47 +0000805 LoadPreds.push_back(*I);
Evan Cheng79e97132007-10-05 01:39:18 +0000806 else
Dan Gohman2d170892008-12-09 22:54:47 +0000807 NodePreds.push_back(*I);
Evan Cheng79e97132007-10-05 01:39:18 +0000808 }
809 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
810 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000811 if (I->isCtrl())
812 ChainSuccs.push_back(*I);
Evan Cheng79e97132007-10-05 01:39:18 +0000813 else
Dan Gohman2d170892008-12-09 22:54:47 +0000814 NodeSuccs.push_back(*I);
Evan Cheng79e97132007-10-05 01:39:18 +0000815 }
816
Dan Gohmaned0e8d42009-03-23 20:20:43 +0000817 // Now assign edges to the newly-created nodes.
Dan Gohman15af5522009-03-06 02:23:01 +0000818 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
819 const SDep &Pred = ChainPreds[i];
820 RemovePred(SU, Pred);
Dan Gohman4370f262008-04-15 01:22:18 +0000821 if (isNewLoad)
Dan Gohman15af5522009-03-06 02:23:01 +0000822 AddPred(LoadSU, Pred);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000823 }
Evan Cheng79e97132007-10-05 01:39:18 +0000824 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
Dan Gohman2d170892008-12-09 22:54:47 +0000825 const SDep &Pred = LoadPreds[i];
826 RemovePred(SU, Pred);
Dan Gohman15af5522009-03-06 02:23:01 +0000827 if (isNewLoad)
Dan Gohman2d170892008-12-09 22:54:47 +0000828 AddPred(LoadSU, Pred);
Evan Cheng79e97132007-10-05 01:39:18 +0000829 }
830 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
Dan Gohman2d170892008-12-09 22:54:47 +0000831 const SDep &Pred = NodePreds[i];
832 RemovePred(SU, Pred);
833 AddPred(NewSU, Pred);
Evan Cheng79e97132007-10-05 01:39:18 +0000834 }
835 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
Dan Gohman2d170892008-12-09 22:54:47 +0000836 SDep D = NodeSuccs[i];
837 SUnit *SuccDep = D.getSUnit();
838 D.setSUnit(SU);
839 RemovePred(SuccDep, D);
840 D.setSUnit(NewSU);
841 AddPred(SuccDep, D);
Andrew Trickd0548ae2011-02-04 03:18:17 +0000842 // Balance register pressure.
843 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
844 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
845 --NewSU->NumRegDefsLeft;
Evan Cheng79e97132007-10-05 01:39:18 +0000846 }
847 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
Dan Gohman2d170892008-12-09 22:54:47 +0000848 SDep D = ChainSuccs[i];
849 SUnit *SuccDep = D.getSUnit();
850 D.setSUnit(SU);
851 RemovePred(SuccDep, D);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000852 if (isNewLoad) {
Dan Gohman2d170892008-12-09 22:54:47 +0000853 D.setSUnit(LoadSU);
854 AddPred(SuccDep, D);
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000855 }
Andrew Trick2085a962010-12-21 22:25:04 +0000856 }
Dan Gohmaned0e8d42009-03-23 20:20:43 +0000857
858 // Add a data dependency to reflect that NewSU reads the value defined
859 // by LoadSU.
860 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
Evan Cheng79e97132007-10-05 01:39:18 +0000861
Evan Cheng91e0fc92007-12-18 08:42:10 +0000862 if (isNewLoad)
863 AvailableQueue->addNode(LoadSU);
Evan Cheng79e97132007-10-05 01:39:18 +0000864 AvailableQueue->addNode(NewSU);
865
866 ++NumUnfolds;
867
868 if (NewSU->NumSuccsLeft == 0) {
869 NewSU->isAvailable = true;
870 return NewSU;
Evan Cheng91e0fc92007-12-18 08:42:10 +0000871 }
872 SU = NewSU;
Evan Cheng79e97132007-10-05 01:39:18 +0000873 }
874
Evan Chengbdd062d2010-05-20 06:13:19 +0000875 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000876 NewSU = CreateClone(SU);
Evan Cheng5924bf72007-09-25 01:54:36 +0000877
878 // New SUnit has the exact same predecessors.
879 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
880 I != E; ++I)
Dan Gohmandddc1ac2008-12-16 03:25:46 +0000881 if (!I->isArtificial())
Dan Gohman2d170892008-12-09 22:54:47 +0000882 AddPred(NewSU, *I);
Evan Cheng5924bf72007-09-25 01:54:36 +0000883
884 // Only copy scheduled successors. Cut them from old node's successor
885 // list and move them over.
Dan Gohman2d170892008-12-09 22:54:47 +0000886 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
Evan Cheng5924bf72007-09-25 01:54:36 +0000887 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
888 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000889 if (I->isArtificial())
Evan Cheng5924bf72007-09-25 01:54:36 +0000890 continue;
Dan Gohman2d170892008-12-09 22:54:47 +0000891 SUnit *SuccSU = I->getSUnit();
892 if (SuccSU->isScheduled) {
Dan Gohman2d170892008-12-09 22:54:47 +0000893 SDep D = *I;
894 D.setSUnit(NewSU);
895 AddPred(SuccSU, D);
896 D.setSUnit(SU);
897 DelDeps.push_back(std::make_pair(SuccSU, D));
Evan Cheng5924bf72007-09-25 01:54:36 +0000898 }
899 }
Dan Gohmandddc1ac2008-12-16 03:25:46 +0000900 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
Dan Gohman2d170892008-12-09 22:54:47 +0000901 RemovePred(DelDeps[i].first, DelDeps[i].second);
Evan Cheng5924bf72007-09-25 01:54:36 +0000902
903 AvailableQueue->updateNode(SU);
904 AvailableQueue->addNode(NewSU);
905
Evan Cheng1ec79b42007-09-27 07:09:03 +0000906 ++NumDups;
Evan Cheng5924bf72007-09-25 01:54:36 +0000907 return NewSU;
908}
909
Evan Chengb2c42c62009-01-12 03:19:55 +0000910/// InsertCopiesAndMoveSuccs - Insert register copies and move all
911/// scheduled successors of the given SUnit to the last copy.
912void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
913 const TargetRegisterClass *DestRC,
914 const TargetRegisterClass *SrcRC,
Evan Cheng1ec79b42007-09-27 07:09:03 +0000915 SmallVector<SUnit*, 2> &Copies) {
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000916 SUnit *CopyFromSU = CreateNewSUnit(NULL);
Evan Cheng8e136a92007-09-26 21:36:17 +0000917 CopyFromSU->CopySrcRC = SrcRC;
918 CopyFromSU->CopyDstRC = DestRC;
Evan Cheng8e136a92007-09-26 21:36:17 +0000919
Roman Levenstein7e71b4b2008-03-26 09:18:09 +0000920 SUnit *CopyToSU = CreateNewSUnit(NULL);
Evan Cheng8e136a92007-09-26 21:36:17 +0000921 CopyToSU->CopySrcRC = DestRC;
922 CopyToSU->CopyDstRC = SrcRC;
923
924 // Only copy scheduled successors. Cut them from old node's successor
925 // list and move them over.
Dan Gohman2d170892008-12-09 22:54:47 +0000926 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
Evan Cheng8e136a92007-09-26 21:36:17 +0000927 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
928 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +0000929 if (I->isArtificial())
Evan Cheng8e136a92007-09-26 21:36:17 +0000930 continue;
Dan Gohman2d170892008-12-09 22:54:47 +0000931 SUnit *SuccSU = I->getSUnit();
932 if (SuccSU->isScheduled) {
933 SDep D = *I;
934 D.setSUnit(CopyToSU);
935 AddPred(SuccSU, D);
936 DelDeps.push_back(std::make_pair(SuccSU, *I));
Evan Cheng8e136a92007-09-26 21:36:17 +0000937 }
938 }
Evan Chengb2c42c62009-01-12 03:19:55 +0000939 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
Dan Gohman2d170892008-12-09 22:54:47 +0000940 RemovePred(DelDeps[i].first, DelDeps[i].second);
Evan Cheng8e136a92007-09-26 21:36:17 +0000941
Dan Gohman2d170892008-12-09 22:54:47 +0000942 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
943 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
Evan Cheng8e136a92007-09-26 21:36:17 +0000944
945 AvailableQueue->updateNode(SU);
946 AvailableQueue->addNode(CopyFromSU);
947 AvailableQueue->addNode(CopyToSU);
Evan Cheng1ec79b42007-09-27 07:09:03 +0000948 Copies.push_back(CopyFromSU);
949 Copies.push_back(CopyToSU);
Evan Cheng8e136a92007-09-26 21:36:17 +0000950
Evan Chengb2c42c62009-01-12 03:19:55 +0000951 ++NumPRCopies;
Evan Cheng8e136a92007-09-26 21:36:17 +0000952}
953
954/// getPhysicalRegisterVT - Returns the ValueType of the physical register
955/// definition of the specified node.
956/// FIXME: Move to SelectionDAG?
Owen Anderson53aa7a92009-08-10 22:56:29 +0000957static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
Duncan Sands13237ac2008-06-06 12:08:01 +0000958 const TargetInstrInfo *TII) {
Dan Gohman17059682008-07-17 19:10:17 +0000959 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
Evan Cheng8e136a92007-09-26 21:36:17 +0000960 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
Chris Lattnerb0d06b42008-01-07 03:13:06 +0000961 unsigned NumRes = TID.getNumDefs();
962 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
Evan Cheng8e136a92007-09-26 21:36:17 +0000963 if (Reg == *ImpDef)
964 break;
965 ++NumRes;
966 }
967 return N->getValueType(NumRes);
968}
969
Evan Chengb8905c42009-03-04 01:41:49 +0000970/// CheckForLiveRegDef - Return true and update live register vector if the
971/// specified register def of the specified SUnit clobbers any "live" registers.
Chris Lattner0cfe8842010-12-20 00:51:56 +0000972static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
Evan Chengb8905c42009-03-04 01:41:49 +0000973 std::vector<SUnit*> &LiveRegDefs,
974 SmallSet<unsigned, 4> &RegAdded,
975 SmallVector<unsigned, 4> &LRegs,
976 const TargetRegisterInfo *TRI) {
Andrew Trick12acde112010-12-23 03:43:21 +0000977 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
978
979 // Check if Ref is live.
980 if (!LiveRegDefs[Reg]) continue;
981
982 // Allow multiple uses of the same def.
983 if (LiveRegDefs[Reg] == SU) continue;
984
985 // Add Reg to the set of interfering live regs.
Chris Lattner0cfe8842010-12-20 00:51:56 +0000986 if (RegAdded.insert(Reg))
Evan Chengb8905c42009-03-04 01:41:49 +0000987 LRegs.push_back(Reg);
Evan Chengb8905c42009-03-04 01:41:49 +0000988 }
Evan Chengb8905c42009-03-04 01:41:49 +0000989}
990
Evan Cheng5924bf72007-09-25 01:54:36 +0000991/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
992/// scheduling of the given node to satisfy live physical register dependencies.
993/// If the specific node is the last one that's available to schedule, do
994/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
Chris Lattner0cfe8842010-12-20 00:51:56 +0000995bool ScheduleDAGRRList::
996DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
Dan Gohmanc07f6862008-09-23 18:50:48 +0000997 if (NumLiveRegs == 0)
Evan Cheng5924bf72007-09-25 01:54:36 +0000998 return false;
999
Evan Chenge6f92252007-09-27 18:46:06 +00001000 SmallSet<unsigned, 4> RegAdded;
Evan Cheng5924bf72007-09-25 01:54:36 +00001001 // If this node would clobber any "live" register, then it's not ready.
Andrew Trickfbb3ed82010-12-21 22:27:44 +00001002 //
1003 // If SU is the currently live definition of the same register that it uses,
1004 // then we are free to schedule it.
Evan Cheng5924bf72007-09-25 01:54:36 +00001005 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1006 I != E; ++I) {
Andrew Trickfbb3ed82010-12-21 22:27:44 +00001007 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
Evan Chengb8905c42009-03-04 01:41:49 +00001008 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1009 RegAdded, LRegs, TRI);
Evan Cheng5924bf72007-09-25 01:54:36 +00001010 }
1011
Chris Lattner11a33812010-12-23 17:24:32 +00001012 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
Evan Chengb8905c42009-03-04 01:41:49 +00001013 if (Node->getOpcode() == ISD::INLINEASM) {
1014 // Inline asm can clobber physical defs.
1015 unsigned NumOps = Node->getNumOperands();
Chris Lattner3e5fbd72010-12-21 02:38:05 +00001016 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
Chris Lattner11a33812010-12-23 17:24:32 +00001017 --NumOps; // Ignore the glue operand.
Evan Chengb8905c42009-03-04 01:41:49 +00001018
Chris Lattner3b9f02a2010-04-07 05:20:54 +00001019 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
Evan Chengb8905c42009-03-04 01:41:49 +00001020 unsigned Flags =
1021 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
Chris Lattner3b9f02a2010-04-07 05:20:54 +00001022 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
Evan Chengb8905c42009-03-04 01:41:49 +00001023
1024 ++i; // Skip the ID value.
Chris Lattner3b9f02a2010-04-07 05:20:54 +00001025 if (InlineAsm::isRegDefKind(Flags) ||
1026 InlineAsm::isRegDefEarlyClobberKind(Flags)) {
Evan Chengb8905c42009-03-04 01:41:49 +00001027 // Check for def of register or earlyclobber register.
1028 for (; NumVals; --NumVals, ++i) {
1029 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1030 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1031 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1032 }
1033 } else
1034 i += NumVals;
1035 }
1036 continue;
1037 }
1038
Dan Gohman072734e2008-11-13 23:24:17 +00001039 if (!Node->isMachineOpcode())
Evan Cheng5924bf72007-09-25 01:54:36 +00001040 continue;
Dan Gohman17059682008-07-17 19:10:17 +00001041 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
Evan Cheng5924bf72007-09-25 01:54:36 +00001042 if (!TID.ImplicitDefs)
1043 continue;
Evan Chengb8905c42009-03-04 01:41:49 +00001044 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
1045 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
Evan Cheng5924bf72007-09-25 01:54:36 +00001046 }
Andrew Trick2085a962010-12-21 22:25:04 +00001047
Evan Cheng5924bf72007-09-25 01:54:36 +00001048 return !LRegs.empty();
Evan Chengd38c22b2006-05-11 23:55:42 +00001049}
1050
Andrew Trick528fad92010-12-23 05:42:20 +00001051/// Return a node that can be scheduled in this cycle. Requirements:
1052/// (1) Ready: latency has been satisfied
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001053/// (2) No Hazards: resources are available
Andrew Trick528fad92010-12-23 05:42:20 +00001054/// (3) No Interferences: may unschedule to break register interferences.
1055SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1056 SmallVector<SUnit*, 4> Interferences;
1057 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1058
1059 SUnit *CurSU = AvailableQueue->pop();
1060 while (CurSU) {
1061 SmallVector<unsigned, 4> LRegs;
1062 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1063 break;
1064 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1065
1066 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1067 Interferences.push_back(CurSU);
1068 CurSU = AvailableQueue->pop();
1069 }
1070 if (CurSU) {
1071 // Add the nodes that aren't ready back onto the available list.
1072 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1073 Interferences[i]->isPending = false;
1074 assert(Interferences[i]->isAvailable && "must still be available");
1075 AvailableQueue->push(Interferences[i]);
1076 }
1077 return CurSU;
1078 }
1079
1080 // All candidates are delayed due to live physical reg dependencies.
1081 // Try backtracking, code duplication, or inserting cross class copies
1082 // to resolve it.
1083 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1084 SUnit *TrySU = Interferences[i];
1085 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1086
1087 // Try unscheduling up to the point where it's safe to schedule
1088 // this node.
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001089 SUnit *BtSU = NULL;
1090 unsigned LiveCycle = UINT_MAX;
Andrew Trick528fad92010-12-23 05:42:20 +00001091 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1092 unsigned Reg = LRegs[j];
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001093 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1094 BtSU = LiveRegGens[Reg];
1095 LiveCycle = BtSU->getHeight();
1096 }
Andrew Trick528fad92010-12-23 05:42:20 +00001097 }
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001098 if (!WillCreateCycle(TrySU, BtSU)) {
1099 BacktrackBottomUp(TrySU, BtSU);
Andrew Trick528fad92010-12-23 05:42:20 +00001100
1101 // Force the current node to be scheduled before the node that
1102 // requires the physical reg dep.
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001103 if (BtSU->isAvailable) {
1104 BtSU->isAvailable = false;
1105 if (!BtSU->isPending)
1106 AvailableQueue->remove(BtSU);
Andrew Trick528fad92010-12-23 05:42:20 +00001107 }
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001108 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
Andrew Trick528fad92010-12-23 05:42:20 +00001109 /*Reg=*/0, /*isNormalMemory=*/false,
1110 /*isMustAlias=*/false, /*isArtificial=*/true));
1111
1112 // If one or more successors has been unscheduled, then the current
1113 // node is no longer avaialable. Schedule a successor that's now
1114 // available instead.
1115 if (!TrySU->isAvailable) {
1116 CurSU = AvailableQueue->pop();
1117 }
1118 else {
1119 CurSU = TrySU;
1120 TrySU->isPending = false;
1121 Interferences.erase(Interferences.begin()+i);
1122 }
1123 break;
1124 }
1125 }
1126
1127 if (!CurSU) {
1128 // Can't backtrack. If it's too expensive to copy the value, then try
1129 // duplicate the nodes that produces these "too expensive to copy"
1130 // values to break the dependency. In case even that doesn't work,
1131 // insert cross class copies.
1132 // If it's not too expensive, i.e. cost != -1, issue copies.
1133 SUnit *TrySU = Interferences[0];
1134 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1135 assert(LRegs.size() == 1 && "Can't handle this yet!");
1136 unsigned Reg = LRegs[0];
1137 SUnit *LRDef = LiveRegDefs[Reg];
1138 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1139 const TargetRegisterClass *RC =
1140 TRI->getMinimalPhysRegClass(Reg, VT);
1141 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1142
1143 // If cross copy register class is null, then it must be possible copy
1144 // the value directly. Do not try duplicate the def.
1145 SUnit *NewDef = 0;
1146 if (DestRC)
1147 NewDef = CopyAndMoveSuccessors(LRDef);
1148 else
1149 DestRC = RC;
1150 if (!NewDef) {
1151 // Issue copies, these can be expensive cross register class copies.
1152 SmallVector<SUnit*, 2> Copies;
1153 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1154 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1155 << " to SU #" << Copies.front()->NodeNum << "\n");
1156 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1157 /*Reg=*/0, /*isNormalMemory=*/false,
1158 /*isMustAlias=*/false,
1159 /*isArtificial=*/true));
1160 NewDef = Copies.back();
1161 }
1162
1163 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1164 << " to SU #" << TrySU->NodeNum << "\n");
1165 LiveRegDefs[Reg] = NewDef;
1166 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1167 /*Reg=*/0, /*isNormalMemory=*/false,
1168 /*isMustAlias=*/false,
1169 /*isArtificial=*/true));
1170 TrySU->isAvailable = false;
1171 CurSU = NewDef;
1172 }
1173
1174 assert(CurSU && "Unable to resolve live physical register dependencies!");
1175
1176 // Add the nodes that aren't ready back onto the available list.
1177 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1178 Interferences[i]->isPending = false;
1179 // May no longer be available due to backtracking.
1180 if (Interferences[i]->isAvailable) {
1181 AvailableQueue->push(Interferences[i]);
1182 }
1183 }
1184 return CurSU;
1185}
Evan Cheng1ec79b42007-09-27 07:09:03 +00001186
Evan Chengd38c22b2006-05-11 23:55:42 +00001187/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1188/// schedulers.
1189void ScheduleDAGRRList::ListScheduleBottomUp() {
Dan Gohmanb9543432009-02-10 23:27:53 +00001190 // Release any predecessors of the special Exit node.
Andrew Tricka52f3252010-12-23 04:16:14 +00001191 ReleasePredecessors(&ExitSU);
Dan Gohmanb9543432009-02-10 23:27:53 +00001192
Evan Chengd38c22b2006-05-11 23:55:42 +00001193 // Add root to Available queue.
Dan Gohman4370f262008-04-15 01:22:18 +00001194 if (!SUnits.empty()) {
Dan Gohman5a390b92008-11-13 21:21:28 +00001195 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
Dan Gohman4370f262008-04-15 01:22:18 +00001196 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1197 RootSU->isAvailable = true;
1198 AvailableQueue->push(RootSU);
1199 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001200
1201 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +00001202 // priority. If it is not ready put it back. Schedule the node.
Dan Gohmane6e13482008-06-21 15:52:51 +00001203 Sequence.reserve(SUnits.size());
Evan Chengd38c22b2006-05-11 23:55:42 +00001204 while (!AvailableQueue->empty()) {
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001205 DEBUG(dbgs() << "\n*** Examining Available\n";
1206 AvailableQueue->dump(this));
1207
Andrew Trick528fad92010-12-23 05:42:20 +00001208 // Pick the best node to schedule taking all constraints into
1209 // consideration.
1210 SUnit *SU = PickNodeToScheduleBottomUp();
Evan Cheng1ec79b42007-09-27 07:09:03 +00001211
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001212 AdvancePastStalls(SU);
Evan Cheng1ec79b42007-09-27 07:09:03 +00001213
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001214 ScheduleNodeBottomUp(SU);
1215
1216 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1217 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1218 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1219 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1220 }
Evan Chengd38c22b2006-05-11 23:55:42 +00001221 }
1222
Evan Chengd38c22b2006-05-11 23:55:42 +00001223 // Reverse the order if it is bottom up.
1224 std::reverse(Sequence.begin(), Sequence.end());
Andrew Trick2085a962010-12-21 22:25:04 +00001225
Evan Chengd38c22b2006-05-11 23:55:42 +00001226#ifndef NDEBUG
Dan Gohman4ce15e12008-11-20 01:26:25 +00001227 VerifySchedule(isBottomUp);
Evan Chengd38c22b2006-05-11 23:55:42 +00001228#endif
1229}
1230
1231//===----------------------------------------------------------------------===//
1232// Top-Down Scheduling
1233//===----------------------------------------------------------------------===//
1234
1235/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Dan Gohman54a187e2007-08-20 19:28:38 +00001236/// the AvailableQueue if the count reaches zero. Also update its cycle bound.
Dan Gohman60d68442009-01-29 19:49:27 +00001237void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
Dan Gohman2d170892008-12-09 22:54:47 +00001238 SUnit *SuccSU = SuccEdge->getSUnit();
Reid Kleckner8ff5c192009-09-30 20:15:38 +00001239
Evan Chengd38c22b2006-05-11 23:55:42 +00001240#ifndef NDEBUG
Reid Kleckner8ff5c192009-09-30 20:15:38 +00001241 if (SuccSU->NumPredsLeft == 0) {
David Greenef34d7ac2010-01-05 01:24:54 +00001242 dbgs() << "*** Scheduling failed! ***\n";
Dan Gohman22d07b12008-11-18 02:06:40 +00001243 SuccSU->dump(this);
David Greenef34d7ac2010-01-05 01:24:54 +00001244 dbgs() << " has been released too many times!\n";
Torok Edwinfbcc6632009-07-14 16:55:14 +00001245 llvm_unreachable(0);
Evan Chengd38c22b2006-05-11 23:55:42 +00001246 }
1247#endif
Reid Kleckner8ff5c192009-09-30 20:15:38 +00001248 --SuccSU->NumPredsLeft;
1249
Dan Gohmanb9543432009-02-10 23:27:53 +00001250 // If all the node's predecessors are scheduled, this node is ready
1251 // to be scheduled. Ignore the special ExitSU node.
1252 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001253 SuccSU->isAvailable = true;
1254 AvailableQueue->push(SuccSU);
1255 }
1256}
1257
Dan Gohmanb9543432009-02-10 23:27:53 +00001258void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
1259 // Top down: release successors
1260 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1261 I != E; ++I) {
1262 assert(!I->isAssignedRegDep() &&
1263 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
1264
1265 ReleaseSucc(SU, &*I);
1266 }
1267}
1268
Evan Chengd38c22b2006-05-11 23:55:42 +00001269/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1270/// count of its successors. If a successor pending count is zero, add it to
1271/// the Available queue.
Andrew Trick528fad92010-12-23 05:42:20 +00001272void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
David Greenef34d7ac2010-01-05 01:24:54 +00001273 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
Dan Gohman22d07b12008-11-18 02:06:40 +00001274 DEBUG(SU->dump(this));
Evan Chengd38c22b2006-05-11 23:55:42 +00001275
Dan Gohmandddc1ac2008-12-16 03:25:46 +00001276 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1277 SU->setDepthToAtLeast(CurCycle);
Dan Gohman92a36d72008-11-17 21:31:02 +00001278 Sequence.push_back(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001279
Dan Gohmanb9543432009-02-10 23:27:53 +00001280 ReleaseSuccessors(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001281 SU->isScheduled = true;
Dan Gohman92a36d72008-11-17 21:31:02 +00001282 AvailableQueue->ScheduledNode(SU);
Evan Chengd38c22b2006-05-11 23:55:42 +00001283}
1284
Dan Gohman54a187e2007-08-20 19:28:38 +00001285/// ListScheduleTopDown - The main loop of list scheduling for top-down
1286/// schedulers.
Evan Chengd38c22b2006-05-11 23:55:42 +00001287void ScheduleDAGRRList::ListScheduleTopDown() {
Evan Chengbdd062d2010-05-20 06:13:19 +00001288 AvailableQueue->setCurCycle(CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +00001289
Dan Gohmanb9543432009-02-10 23:27:53 +00001290 // Release any successors of the special Entry node.
1291 ReleaseSuccessors(&EntrySU);
1292
Evan Chengd38c22b2006-05-11 23:55:42 +00001293 // All leaves to Available queue.
1294 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1295 // It is available if it has no predecessors.
Dan Gohman4370f262008-04-15 01:22:18 +00001296 if (SUnits[i].Preds.empty()) {
Evan Chengd38c22b2006-05-11 23:55:42 +00001297 AvailableQueue->push(&SUnits[i]);
1298 SUnits[i].isAvailable = true;
1299 }
1300 }
Andrew Trick2085a962010-12-21 22:25:04 +00001301
Evan Chengd38c22b2006-05-11 23:55:42 +00001302 // While Available queue is not empty, grab the node with the highest
Dan Gohman54a187e2007-08-20 19:28:38 +00001303 // priority. If it is not ready put it back. Schedule the node.
Dan Gohmane6e13482008-06-21 15:52:51 +00001304 Sequence.reserve(SUnits.size());
Evan Chengd38c22b2006-05-11 23:55:42 +00001305 while (!AvailableQueue->empty()) {
Evan Cheng5924bf72007-09-25 01:54:36 +00001306 SUnit *CurSU = AvailableQueue->pop();
Andrew Trick2085a962010-12-21 22:25:04 +00001307
Dan Gohmanc602dd42008-11-21 00:10:42 +00001308 if (CurSU)
Andrew Trick528fad92010-12-23 05:42:20 +00001309 ScheduleNodeTopDown(CurSU);
Dan Gohman4370f262008-04-15 01:22:18 +00001310 ++CurCycle;
Evan Chengbdd062d2010-05-20 06:13:19 +00001311 AvailableQueue->setCurCycle(CurCycle);
Evan Chengd38c22b2006-05-11 23:55:42 +00001312 }
Andrew Trick2085a962010-12-21 22:25:04 +00001313
Evan Chengd38c22b2006-05-11 23:55:42 +00001314#ifndef NDEBUG
Dan Gohman4ce15e12008-11-20 01:26:25 +00001315 VerifySchedule(isBottomUp);
Evan Chengd38c22b2006-05-11 23:55:42 +00001316#endif
1317}
1318
1319
Evan Chengd38c22b2006-05-11 23:55:42 +00001320//===----------------------------------------------------------------------===//
Andrew Trick9ccce772011-01-14 21:11:41 +00001321// RegReductionPriorityQueue Definition
Evan Chengd38c22b2006-05-11 23:55:42 +00001322//===----------------------------------------------------------------------===//
1323//
1324// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1325// to reduce register pressure.
Andrew Trick2085a962010-12-21 22:25:04 +00001326//
Evan Chengd38c22b2006-05-11 23:55:42 +00001327namespace {
Andrew Trick9ccce772011-01-14 21:11:41 +00001328class RegReductionPQBase;
Andrew Trick2085a962010-12-21 22:25:04 +00001329
Andrew Trick9ccce772011-01-14 21:11:41 +00001330struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1331 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1332};
1333
1334/// bu_ls_rr_sort - Priority function for bottom up register pressure
1335// reduction scheduler.
1336struct bu_ls_rr_sort : public queue_sort {
1337 enum {
1338 IsBottomUp = true,
1339 HasReadyFilter = false
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001340 };
1341
Andrew Trick9ccce772011-01-14 21:11:41 +00001342 RegReductionPQBase *SPQ;
1343 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1344 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001345
Andrew Trick9ccce772011-01-14 21:11:41 +00001346 bool operator()(SUnit* left, SUnit* right) const;
1347};
Andrew Trick2085a962010-12-21 22:25:04 +00001348
Andrew Trick9ccce772011-01-14 21:11:41 +00001349// td_ls_rr_sort - Priority function for top down register pressure reduction
1350// scheduler.
1351struct td_ls_rr_sort : public queue_sort {
1352 enum {
1353 IsBottomUp = false,
1354 HasReadyFilter = false
Evan Chengd38c22b2006-05-11 23:55:42 +00001355 };
1356
Andrew Trick9ccce772011-01-14 21:11:41 +00001357 RegReductionPQBase *SPQ;
1358 td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1359 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001360
Andrew Trick9ccce772011-01-14 21:11:41 +00001361 bool operator()(const SUnit* left, const SUnit* right) const;
1362};
Andrew Trick2085a962010-12-21 22:25:04 +00001363
Andrew Trick9ccce772011-01-14 21:11:41 +00001364// src_ls_rr_sort - Priority function for source order scheduler.
1365struct src_ls_rr_sort : public queue_sort {
1366 enum {
1367 IsBottomUp = true,
1368 HasReadyFilter = false
Evan Chengd38c22b2006-05-11 23:55:42 +00001369 };
Bill Wendling8cbc25d2010-01-23 10:26:57 +00001370
Andrew Trick9ccce772011-01-14 21:11:41 +00001371 RegReductionPQBase *SPQ;
1372 src_ls_rr_sort(RegReductionPQBase *spq)
1373 : SPQ(spq) {}
1374 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1375 : SPQ(RHS.SPQ) {}
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001376
Andrew Trick9ccce772011-01-14 21:11:41 +00001377 bool operator()(SUnit* left, SUnit* right) const;
1378};
Andrew Trick2085a962010-12-21 22:25:04 +00001379
Andrew Trick9ccce772011-01-14 21:11:41 +00001380// hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1381struct hybrid_ls_rr_sort : public queue_sort {
1382 enum {
1383 IsBottomUp = true,
Andrew Trickc88b7ec2011-03-04 02:03:45 +00001384 HasReadyFilter = false
Bill Wendling8cbc25d2010-01-23 10:26:57 +00001385 };
Evan Chengbdd062d2010-05-20 06:13:19 +00001386
Andrew Trick9ccce772011-01-14 21:11:41 +00001387 RegReductionPQBase *SPQ;
1388 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1389 : SPQ(spq) {}
1390 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1391 : SPQ(RHS.SPQ) {}
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001392
Andrew Trick9ccce772011-01-14 21:11:41 +00001393 bool isReady(SUnit *SU, unsigned CurCycle) const;
Evan Chenga77f3d32010-07-21 06:09:07 +00001394
Andrew Trick9ccce772011-01-14 21:11:41 +00001395 bool operator()(SUnit* left, SUnit* right) const;
1396};
1397
1398// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1399// scheduler.
1400struct ilp_ls_rr_sort : public queue_sort {
1401 enum {
1402 IsBottomUp = true,
Andrew Trickc88b7ec2011-03-04 02:03:45 +00001403 HasReadyFilter = false
Evan Chengbdd062d2010-05-20 06:13:19 +00001404 };
Evan Cheng37b740c2010-07-24 00:39:05 +00001405
Andrew Trick9ccce772011-01-14 21:11:41 +00001406 RegReductionPQBase *SPQ;
1407 ilp_ls_rr_sort(RegReductionPQBase *spq)
1408 : SPQ(spq) {}
1409 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1410 : SPQ(RHS.SPQ) {}
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001411
Andrew Trick9ccce772011-01-14 21:11:41 +00001412 bool isReady(SUnit *SU, unsigned CurCycle) const;
Evan Cheng37b740c2010-07-24 00:39:05 +00001413
Andrew Trick9ccce772011-01-14 21:11:41 +00001414 bool operator()(SUnit* left, SUnit* right) const;
1415};
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001416
Andrew Trick9ccce772011-01-14 21:11:41 +00001417class RegReductionPQBase : public SchedulingPriorityQueue {
1418protected:
1419 std::vector<SUnit*> Queue;
1420 unsigned CurQueueId;
1421 bool TracksRegPressure;
1422
1423 // SUnits - The SUnits for the current graph.
1424 std::vector<SUnit> *SUnits;
1425
1426 MachineFunction &MF;
1427 const TargetInstrInfo *TII;
1428 const TargetRegisterInfo *TRI;
1429 const TargetLowering *TLI;
1430 ScheduleDAGRRList *scheduleDAG;
1431
1432 // SethiUllmanNumbers - The SethiUllman number for each node.
1433 std::vector<unsigned> SethiUllmanNumbers;
1434
1435 /// RegPressure - Tracking current reg pressure per register class.
1436 ///
1437 std::vector<unsigned> RegPressure;
1438
1439 /// RegLimit - Tracking the number of allocatable registers per register
1440 /// class.
1441 std::vector<unsigned> RegLimit;
1442
1443public:
1444 RegReductionPQBase(MachineFunction &mf,
1445 bool hasReadyFilter,
1446 bool tracksrp,
1447 const TargetInstrInfo *tii,
1448 const TargetRegisterInfo *tri,
1449 const TargetLowering *tli)
1450 : SchedulingPriorityQueue(hasReadyFilter),
1451 CurQueueId(0), TracksRegPressure(tracksrp),
1452 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1453 if (TracksRegPressure) {
1454 unsigned NumRC = TRI->getNumRegClasses();
1455 RegLimit.resize(NumRC);
1456 RegPressure.resize(NumRC);
1457 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1458 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1459 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1460 E = TRI->regclass_end(); I != E; ++I)
1461 RegLimit[(*I)->getID()] = tli->getRegPressureLimit(*I, MF);
1462 }
1463 }
1464
1465 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1466 scheduleDAG = scheduleDag;
1467 }
1468
1469 ScheduleHazardRecognizer* getHazardRec() {
1470 return scheduleDAG->getHazardRec();
1471 }
1472
1473 void initNodes(std::vector<SUnit> &sunits);
1474
1475 void addNode(const SUnit *SU);
1476
1477 void updateNode(const SUnit *SU);
1478
1479 void releaseState() {
1480 SUnits = 0;
1481 SethiUllmanNumbers.clear();
1482 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1483 }
1484
1485 unsigned getNodePriority(const SUnit *SU) const;
1486
1487 unsigned getNodeOrdering(const SUnit *SU) const {
1488 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1489 }
1490
1491 bool empty() const { return Queue.empty(); }
1492
1493 void push(SUnit *U) {
1494 assert(!U->NodeQueueId && "Node in the queue already");
1495 U->NodeQueueId = ++CurQueueId;
1496 Queue.push_back(U);
1497 }
1498
1499 void remove(SUnit *SU) {
1500 assert(!Queue.empty() && "Queue is empty!");
1501 assert(SU->NodeQueueId != 0 && "Not in queue!");
1502 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1503 SU);
1504 if (I != prior(Queue.end()))
1505 std::swap(*I, Queue.back());
1506 Queue.pop_back();
1507 SU->NodeQueueId = 0;
1508 }
1509
Andrew Trickd0548ae2011-02-04 03:18:17 +00001510 bool tracksRegPressure() const { return TracksRegPressure; }
1511
Andrew Trick9ccce772011-01-14 21:11:41 +00001512 void dumpRegPressure() const;
1513
1514 bool HighRegPressure(const SUnit *SU) const;
1515
Andrew Trick641e2d42011-03-05 08:00:22 +00001516 bool MayReduceRegPressure(SUnit *SU) const;
1517
1518 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
Andrew Trick9ccce772011-01-14 21:11:41 +00001519
1520 void ScheduledNode(SUnit *SU);
1521
1522 void UnscheduledNode(SUnit *SU);
1523
1524protected:
1525 bool canClobber(const SUnit *SU, const SUnit *Op);
1526 void AddPseudoTwoAddrDeps();
1527 void PrescheduleNodesWithMultipleUses();
1528 void CalculateSethiUllmanNumbers();
1529};
1530
1531template<class SF>
1532class RegReductionPriorityQueue : public RegReductionPQBase {
1533 static SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker) {
1534 std::vector<SUnit *>::iterator Best = Q.begin();
1535 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1536 E = Q.end(); I != E; ++I)
1537 if (Picker(*Best, *I))
1538 Best = I;
1539 SUnit *V = *Best;
1540 if (Best != prior(Q.end()))
1541 std::swap(*Best, Q.back());
1542 Q.pop_back();
1543 return V;
1544 }
1545
1546 SF Picker;
1547
1548public:
1549 RegReductionPriorityQueue(MachineFunction &mf,
1550 bool tracksrp,
1551 const TargetInstrInfo *tii,
1552 const TargetRegisterInfo *tri,
1553 const TargetLowering *tli)
1554 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1555 Picker(this) {}
1556
1557 bool isBottomUp() const { return SF::IsBottomUp; }
1558
1559 bool isReady(SUnit *U) const {
1560 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1561 }
1562
1563 SUnit *pop() {
1564 if (Queue.empty()) return NULL;
1565
1566 SUnit *V = popFromQueue(Queue, Picker);
1567 V->NodeQueueId = 0;
1568 return V;
1569 }
1570
1571 void dump(ScheduleDAG *DAG) const {
1572 // Emulate pop() without clobbering NodeQueueIds.
1573 std::vector<SUnit*> DumpQueue = Queue;
1574 SF DumpPicker = Picker;
1575 while (!DumpQueue.empty()) {
1576 SUnit *SU = popFromQueue(DumpQueue, DumpPicker);
1577 if (isBottomUp())
1578 dbgs() << "Height " << SU->getHeight() << ": ";
1579 else
1580 dbgs() << "Depth " << SU->getDepth() << ": ";
1581 SU->dump(DAG);
1582 }
1583 }
1584};
1585
1586typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1587BURegReductionPriorityQueue;
1588
1589typedef RegReductionPriorityQueue<td_ls_rr_sort>
1590TDRegReductionPriorityQueue;
1591
1592typedef RegReductionPriorityQueue<src_ls_rr_sort>
1593SrcRegReductionPriorityQueue;
1594
1595typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1596HybridBURRPriorityQueue;
1597
1598typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1599ILPBURRPriorityQueue;
1600} // end anonymous namespace
1601
1602//===----------------------------------------------------------------------===//
1603// Static Node Priority for Register Pressure Reduction
1604//===----------------------------------------------------------------------===//
Evan Chengd38c22b2006-05-11 23:55:42 +00001605
Dan Gohman186f65d2008-11-20 03:30:37 +00001606/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1607/// Smaller number is the higher priority.
Evan Cheng7e4abde2008-07-02 09:23:51 +00001608static unsigned
Dan Gohman186f65d2008-11-20 03:30:37 +00001609CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
Evan Cheng7e4abde2008-07-02 09:23:51 +00001610 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1611 if (SethiUllmanNumber != 0)
1612 return SethiUllmanNumber;
1613
1614 unsigned Extra = 0;
1615 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1616 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +00001617 if (I->isCtrl()) continue; // ignore chain preds
1618 SUnit *PredSU = I->getSUnit();
Dan Gohman186f65d2008-11-20 03:30:37 +00001619 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
Evan Cheng7e4abde2008-07-02 09:23:51 +00001620 if (PredSethiUllman > SethiUllmanNumber) {
1621 SethiUllmanNumber = PredSethiUllman;
1622 Extra = 0;
Evan Cheng3a14efa2009-02-12 08:59:45 +00001623 } else if (PredSethiUllman == SethiUllmanNumber)
Evan Cheng7e4abde2008-07-02 09:23:51 +00001624 ++Extra;
1625 }
1626
1627 SethiUllmanNumber += Extra;
1628
1629 if (SethiUllmanNumber == 0)
1630 SethiUllmanNumber = 1;
Andrew Trick2085a962010-12-21 22:25:04 +00001631
Evan Cheng7e4abde2008-07-02 09:23:51 +00001632 return SethiUllmanNumber;
1633}
1634
Andrew Trick9ccce772011-01-14 21:11:41 +00001635/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1636/// scheduling units.
1637void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1638 SethiUllmanNumbers.assign(SUnits->size(), 0);
Andrew Trick10ffc2b2010-12-24 05:03:26 +00001639
Andrew Trick9ccce772011-01-14 21:11:41 +00001640 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1641 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
Evan Chengd38c22b2006-05-11 23:55:42 +00001642}
1643
Andrew Trick9ccce772011-01-14 21:11:41 +00001644void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
1645 SUnits = &sunits;
1646 // Add pseudo dependency edges for two-address nodes.
1647 AddPseudoTwoAddrDeps();
1648 // Reroute edges to nodes with multiple uses.
Andrew Trickd0548ae2011-02-04 03:18:17 +00001649 if (!TracksRegPressure)
1650 PrescheduleNodesWithMultipleUses();
Andrew Trick9ccce772011-01-14 21:11:41 +00001651 // Calculate node priorities.
1652 CalculateSethiUllmanNumbers();
1653}
1654
1655void RegReductionPQBase::addNode(const SUnit *SU) {
1656 unsigned SUSize = SethiUllmanNumbers.size();
1657 if (SUnits->size() > SUSize)
1658 SethiUllmanNumbers.resize(SUSize*2, 0);
1659 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1660}
1661
1662void RegReductionPQBase::updateNode(const SUnit *SU) {
1663 SethiUllmanNumbers[SU->NodeNum] = 0;
1664 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1665}
1666
Andrew Trick2cd1f0b2011-01-20 06:21:59 +00001667// Lower priority means schedule further down. For bottom-up scheduling, lower
1668// priority SUs are scheduled before higher priority SUs.
Andrew Trick9ccce772011-01-14 21:11:41 +00001669unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1670 assert(SU->NodeNum < SethiUllmanNumbers.size());
1671 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1672 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1673 // CopyToReg should be close to its uses to facilitate coalescing and
1674 // avoid spilling.
1675 return 0;
1676 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1677 Opc == TargetOpcode::SUBREG_TO_REG ||
1678 Opc == TargetOpcode::INSERT_SUBREG)
1679 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1680 // close to their uses to facilitate coalescing.
1681 return 0;
1682 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1683 // If SU does not have a register use, i.e. it doesn't produce a value
1684 // that would be consumed (e.g. store), then it terminates a chain of
1685 // computation. Give it a large SethiUllman number so it will be
1686 // scheduled right before its predecessors that it doesn't lengthen
1687 // their live ranges.
1688 return 0xffff;
1689 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1690 // If SU does not have a register def, schedule it close to its uses
1691 // because it does not lengthen any live ranges.
1692 return 0;
1693 return SethiUllmanNumbers[SU->NodeNum];
1694}
1695
1696//===----------------------------------------------------------------------===//
1697// Register Pressure Tracking
1698//===----------------------------------------------------------------------===//
1699
1700void RegReductionPQBase::dumpRegPressure() const {
1701 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1702 E = TRI->regclass_end(); I != E; ++I) {
1703 const TargetRegisterClass *RC = *I;
1704 unsigned Id = RC->getID();
1705 unsigned RP = RegPressure[Id];
1706 if (!RP) continue;
1707 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1708 << '\n');
1709 }
1710}
1711
1712bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1713 if (!TLI)
1714 return false;
1715
1716 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1717 I != E; ++I) {
1718 if (I->isCtrl())
1719 continue;
1720 SUnit *PredSU = I->getSUnit();
Andrew Trickd0548ae2011-02-04 03:18:17 +00001721 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1722 // to cover the number of registers defined (they are all live).
1723 if (PredSU->NumRegDefsLeft == 0) {
Andrew Trick2cd1f0b2011-01-20 06:21:59 +00001724 continue;
1725 }
Andrew Trickd0548ae2011-02-04 03:18:17 +00001726 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1727 RegDefPos.IsValid(); RegDefPos.Advance()) {
1728 EVT VT = RegDefPos.GetValue();
Andrew Trick9ccce772011-01-14 21:11:41 +00001729 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1730 unsigned Cost = TLI->getRepRegClassCostFor(VT);
Andrew Trick9ccce772011-01-14 21:11:41 +00001731 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1732 return true;
1733 }
1734 }
Andrew Trick9ccce772011-01-14 21:11:41 +00001735 return false;
1736}
1737
Andrew Trick641e2d42011-03-05 08:00:22 +00001738bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
Andrew Trick9ccce772011-01-14 21:11:41 +00001739 const SDNode *N = SU->getNode();
1740
1741 if (!N->isMachineOpcode() || !SU->NumSuccs)
1742 return false;
1743
1744 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1745 for (unsigned i = 0; i != NumDefs; ++i) {
1746 EVT VT = N->getValueType(i);
1747 if (!N->hasAnyUseOfValue(i))
1748 continue;
1749 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1750 if (RegPressure[RCId] >= RegLimit[RCId])
1751 return true;
1752 }
1753 return false;
1754}
1755
Andrew Trick641e2d42011-03-05 08:00:22 +00001756// Compute the register pressure contribution by this instruction by count up
1757// for uses that are not live and down for defs. Only count register classes
1758// that are already under high pressure. As a side effect, compute the number of
1759// uses of registers that are already live.
1760//
1761// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1762// so could probably be factored.
1763int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1764 LiveUses = 0;
1765 int PDiff = 0;
1766 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1767 I != E; ++I) {
1768 if (I->isCtrl())
1769 continue;
1770 SUnit *PredSU = I->getSUnit();
1771 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1772 // to cover the number of registers defined (they are all live).
1773 if (PredSU->NumRegDefsLeft == 0) {
1774 if (PredSU->getNode()->isMachineOpcode())
1775 ++LiveUses;
1776 continue;
1777 }
1778 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1779 RegDefPos.IsValid(); RegDefPos.Advance()) {
1780 EVT VT = RegDefPos.GetValue();
1781 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1782 if (RegPressure[RCId] >= RegLimit[RCId])
1783 ++PDiff;
1784 }
1785 }
1786 const SDNode *N = SU->getNode();
1787
1788 if (!N->isMachineOpcode() || !SU->NumSuccs)
1789 return PDiff;
1790
1791 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1792 for (unsigned i = 0; i != NumDefs; ++i) {
1793 EVT VT = N->getValueType(i);
1794 if (!N->hasAnyUseOfValue(i))
1795 continue;
1796 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1797 if (RegPressure[RCId] >= RegLimit[RCId])
1798 --PDiff;
1799 }
1800 return PDiff;
1801}
1802
Andrew Trick9ccce772011-01-14 21:11:41 +00001803void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1804 if (!TracksRegPressure)
1805 return;
1806
Andrew Trick9ccce772011-01-14 21:11:41 +00001807 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1808 I != E; ++I) {
1809 if (I->isCtrl())
1810 continue;
1811 SUnit *PredSU = I->getSUnit();
Andrew Trickd0548ae2011-02-04 03:18:17 +00001812 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1813 // to cover the number of registers defined (they are all live).
1814 if (PredSU->NumRegDefsLeft == 0) {
Andrew Trick9ccce772011-01-14 21:11:41 +00001815 continue;
1816 }
Andrew Trickd0548ae2011-02-04 03:18:17 +00001817 // FIXME: The ScheduleDAG currently loses information about which of a
1818 // node's values is consumed by each dependence. Consequently, if the node
1819 // defines multiple register classes, we don't know which to pressurize
1820 // here. Instead the following loop consumes the register defs in an
1821 // arbitrary order. At least it handles the common case of clustered loads
1822 // to the same class. For precise liveness, each SDep needs to indicate the
1823 // result number. But that tightly couples the ScheduleDAG with the
1824 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1825 // value type or register class to SDep.
1826 //
1827 // The most important aspect of register tracking is balancing the increase
1828 // here with the reduction further below. Note that this SU may use multiple
1829 // defs in PredSU. The can't be determined here, but we've already
1830 // compensated by reducing NumRegDefsLeft in PredSU during
1831 // ScheduleDAGSDNodes::AddSchedEdges.
1832 --PredSU->NumRegDefsLeft;
1833 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
1834 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1835 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1836 if (SkipRegDefs)
Andrew Trick9ccce772011-01-14 21:11:41 +00001837 continue;
Andrew Trickd0548ae2011-02-04 03:18:17 +00001838 EVT VT = RegDefPos.GetValue();
Andrew Trick9ccce772011-01-14 21:11:41 +00001839 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1840 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
Andrew Trickd0548ae2011-02-04 03:18:17 +00001841 break;
Andrew Trick9ccce772011-01-14 21:11:41 +00001842 }
1843 }
1844
Andrew Trickd0548ae2011-02-04 03:18:17 +00001845 // We should have this assert, but there may be dead SDNodes that never
1846 // materialize as SUnits, so they don't appear to generate liveness.
1847 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
1848 int SkipRegDefs = (int)SU->NumRegDefsLeft;
1849 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
1850 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1851 if (SkipRegDefs > 0)
1852 continue;
1853 EVT VT = RegDefPos.GetValue();
1854 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1855 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) {
1856 // Register pressure tracking is imprecise. This can happen. But we try
1857 // hard not to let it happen because it likely results in poor scheduling.
1858 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
1859 RegPressure[RCId] = 0;
1860 }
1861 else {
1862 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
Andrew Trick9ccce772011-01-14 21:11:41 +00001863 }
1864 }
Andrew Trick9ccce772011-01-14 21:11:41 +00001865 dumpRegPressure();
1866}
1867
1868void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
1869 if (!TracksRegPressure)
1870 return;
1871
1872 const SDNode *N = SU->getNode();
1873 if (!N->isMachineOpcode()) {
1874 if (N->getOpcode() != ISD::CopyToReg)
1875 return;
1876 } else {
1877 unsigned Opc = N->getMachineOpcode();
1878 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1879 Opc == TargetOpcode::INSERT_SUBREG ||
1880 Opc == TargetOpcode::SUBREG_TO_REG ||
1881 Opc == TargetOpcode::REG_SEQUENCE ||
1882 Opc == TargetOpcode::IMPLICIT_DEF)
1883 return;
1884 }
1885
1886 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1887 I != E; ++I) {
1888 if (I->isCtrl())
1889 continue;
1890 SUnit *PredSU = I->getSUnit();
Andrew Trick2cd1f0b2011-01-20 06:21:59 +00001891 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
1892 // counts data deps.
1893 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
Andrew Trick9ccce772011-01-14 21:11:41 +00001894 continue;
1895 const SDNode *PN = PredSU->getNode();
1896 if (!PN->isMachineOpcode()) {
1897 if (PN->getOpcode() == ISD::CopyFromReg) {
1898 EVT VT = PN->getValueType(0);
1899 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1900 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1901 }
1902 continue;
1903 }
1904 unsigned POpc = PN->getMachineOpcode();
1905 if (POpc == TargetOpcode::IMPLICIT_DEF)
1906 continue;
1907 if (POpc == TargetOpcode::EXTRACT_SUBREG) {
1908 EVT VT = PN->getOperand(0).getValueType();
1909 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1910 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1911 continue;
1912 } else if (POpc == TargetOpcode::INSERT_SUBREG ||
1913 POpc == TargetOpcode::SUBREG_TO_REG) {
1914 EVT VT = PN->getValueType(0);
1915 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1916 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1917 continue;
1918 }
1919 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
1920 for (unsigned i = 0; i != NumDefs; ++i) {
1921 EVT VT = PN->getValueType(i);
1922 if (!PN->hasAnyUseOfValue(i))
1923 continue;
1924 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1925 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
1926 // Register pressure tracking is imprecise. This can happen.
1927 RegPressure[RCId] = 0;
1928 else
1929 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
1930 }
1931 }
1932
1933 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
1934 // may transfer data dependencies to CopyToReg.
1935 if (SU->NumSuccs && N->isMachineOpcode()) {
1936 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1937 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1938 EVT VT = N->getValueType(i);
1939 if (VT == MVT::Glue || VT == MVT::Other)
1940 continue;
1941 if (!N->hasAnyUseOfValue(i))
1942 continue;
1943 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1944 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
1945 }
1946 }
1947
1948 dumpRegPressure();
1949}
1950
1951//===----------------------------------------------------------------------===//
1952// Dynamic Node Priority for Register Pressure Reduction
1953//===----------------------------------------------------------------------===//
1954
Evan Chengb9e3db62007-03-14 22:43:40 +00001955/// closestSucc - Returns the scheduled cycle of the successor which is
Dan Gohmana19c6622009-03-12 23:55:10 +00001956/// closest to the current cycle.
Evan Cheng28748552007-03-13 23:25:11 +00001957static unsigned closestSucc(const SUnit *SU) {
Dan Gohmandddc1ac2008-12-16 03:25:46 +00001958 unsigned MaxHeight = 0;
Evan Cheng28748552007-03-13 23:25:11 +00001959 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
Evan Chengb9e3db62007-03-14 22:43:40 +00001960 I != E; ++I) {
Evan Chengce3bbe52009-02-10 08:30:11 +00001961 if (I->isCtrl()) continue; // ignore chain succs
Dan Gohmandddc1ac2008-12-16 03:25:46 +00001962 unsigned Height = I->getSUnit()->getHeight();
Evan Chengb9e3db62007-03-14 22:43:40 +00001963 // If there are bunch of CopyToRegs stacked up, they should be considered
1964 // to be at the same position.
Dan Gohman2d170892008-12-09 22:54:47 +00001965 if (I->getSUnit()->getNode() &&
1966 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
Dan Gohmandddc1ac2008-12-16 03:25:46 +00001967 Height = closestSucc(I->getSUnit())+1;
1968 if (Height > MaxHeight)
1969 MaxHeight = Height;
Evan Chengb9e3db62007-03-14 22:43:40 +00001970 }
Dan Gohmandddc1ac2008-12-16 03:25:46 +00001971 return MaxHeight;
Evan Cheng28748552007-03-13 23:25:11 +00001972}
1973
Evan Cheng61bc51e2007-12-20 02:22:36 +00001974/// calcMaxScratches - Returns an cost estimate of the worse case requirement
Evan Cheng3a14efa2009-02-12 08:59:45 +00001975/// for scratch registers, i.e. number of data dependencies.
Evan Cheng61bc51e2007-12-20 02:22:36 +00001976static unsigned calcMaxScratches(const SUnit *SU) {
1977 unsigned Scratches = 0;
1978 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
Evan Chengb5704992009-02-12 09:52:13 +00001979 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +00001980 if (I->isCtrl()) continue; // ignore chain preds
Evan Chengb5704992009-02-12 09:52:13 +00001981 Scratches++;
1982 }
Evan Cheng61bc51e2007-12-20 02:22:36 +00001983 return Scratches;
1984}
1985
Evan Cheng6c1414f2010-10-29 18:09:28 +00001986/// hasOnlyLiveOutUse - Return true if SU has a single value successor that is a
1987/// CopyToReg to a virtual register. This SU def is probably a liveout and
1988/// it has no other use. It should be scheduled closer to the terminator.
1989static bool hasOnlyLiveOutUses(const SUnit *SU) {
1990 bool RetVal = false;
1991 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1992 I != E; ++I) {
1993 if (I->isCtrl()) continue;
1994 const SUnit *SuccSU = I->getSUnit();
1995 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
1996 unsigned Reg =
1997 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
1998 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
1999 RetVal = true;
2000 continue;
2001 }
2002 }
2003 return false;
2004 }
2005 return RetVal;
2006}
2007
2008/// UnitsSharePred - Return true if the two scheduling units share a common
2009/// data predecessor.
2010static bool UnitsSharePred(const SUnit *left, const SUnit *right) {
2011 SmallSet<const SUnit*, 4> Preds;
2012 for (SUnit::const_pred_iterator I = left->Preds.begin(),E = left->Preds.end();
2013 I != E; ++I) {
2014 if (I->isCtrl()) continue; // ignore chain preds
2015 Preds.insert(I->getSUnit());
2016 }
2017 for (SUnit::const_pred_iterator I = right->Preds.begin(),E = right->Preds.end();
2018 I != E; ++I) {
2019 if (I->isCtrl()) continue; // ignore chain preds
2020 if (Preds.count(I->getSUnit()))
2021 return true;
2022 }
2023 return false;
2024}
2025
Andrew Trick9ccce772011-01-14 21:11:41 +00002026// Check for either a dependence (latency) or resource (hazard) stall.
2027//
2028// Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2029static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2030 if ((int)SPQ->getCurCycle() < Height) return true;
2031 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2032 != ScheduleHazardRecognizer::NoHazard)
2033 return true;
2034 return false;
2035}
2036
2037// Return -1 if left has higher priority, 1 if right has higher priority.
2038// Return 0 if latency-based priority is equivalent.
2039static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2040 RegReductionPQBase *SPQ) {
2041 // If the two nodes share an operand and one of them has a single
2042 // use that is a live out copy, favor the one that is live out. Otherwise
2043 // it will be difficult to eliminate the copy if the instruction is a
2044 // loop induction variable update. e.g.
2045 // BB:
2046 // sub r1, r3, #1
2047 // str r0, [r2, r3]
2048 // mov r3, r1
2049 // cmp
2050 // bne BB
2051 bool SharePred = UnitsSharePred(left, right);
2052 // FIXME: Only adjust if BB is a loop back edge.
2053 // FIXME: What's the cost of a copy?
2054 int LBonus = (SharePred && hasOnlyLiveOutUses(left)) ? 1 : 0;
2055 int RBonus = (SharePred && hasOnlyLiveOutUses(right)) ? 1 : 0;
2056 int LHeight = (int)left->getHeight() - LBonus;
2057 int RHeight = (int)right->getHeight() - RBonus;
2058
2059 bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
2060 BUHasStall(left, LHeight, SPQ);
2061 bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
2062 BUHasStall(right, RHeight, SPQ);
2063
2064 // If scheduling one of the node will cause a pipeline stall, delay it.
2065 // If scheduling either one of the node will cause a pipeline stall, sort
2066 // them according to their height.
2067 if (LStall) {
2068 if (!RStall)
2069 return 1;
2070 if (LHeight != RHeight)
2071 return LHeight > RHeight ? 1 : -1;
2072 } else if (RStall)
2073 return -1;
2074
Andrew Trick47ff14b2011-01-21 05:51:33 +00002075 // If either node is scheduling for latency, sort them by height/depth
Andrew Trick9ccce772011-01-14 21:11:41 +00002076 // and latency.
2077 if (!checkPref || (left->SchedulingPref == Sched::Latency ||
2078 right->SchedulingPref == Sched::Latency)) {
Andrew Trick47ff14b2011-01-21 05:51:33 +00002079 if (DisableSchedCycles) {
Andrew Trick9ccce772011-01-14 21:11:41 +00002080 if (LHeight != RHeight)
2081 return LHeight > RHeight ? 1 : -1;
2082 }
Andrew Trick47ff14b2011-01-21 05:51:33 +00002083 else {
2084 // If neither instruction stalls (!LStall && !RStall) then
Eric Christopher9cb33de2011-03-06 21:13:45 +00002085 // its height is already covered so only its depth matters. We also reach
Andrew Trick47ff14b2011-01-21 05:51:33 +00002086 // this if both stall but have the same height.
2087 unsigned LDepth = left->getDepth();
2088 unsigned RDepth = right->getDepth();
2089 if (LDepth != RDepth) {
2090 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2091 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2092 << ") depth " << RDepth << "\n");
2093 return LDepth < RDepth ? 1 : -1;
2094 }
2095 }
Andrew Trick9ccce772011-01-14 21:11:41 +00002096 if (left->Latency != right->Latency)
2097 return left->Latency > right->Latency ? 1 : -1;
2098 }
2099 return 0;
2100}
2101
2102static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
Evan Cheng6730f032007-01-08 23:55:53 +00002103 unsigned LPriority = SPQ->getNodePriority(left);
2104 unsigned RPriority = SPQ->getNodePriority(right);
Andrew Trick641e2d42011-03-05 08:00:22 +00002105 if (LPriority != RPriority) {
2106 DEBUG(++FactorCount[FactUllman]);
Evan Cheng73bdf042008-03-01 00:39:47 +00002107 return LPriority > RPriority;
Andrew Trick641e2d42011-03-05 08:00:22 +00002108 }
Evan Cheng73bdf042008-03-01 00:39:47 +00002109 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2110 // e.g.
2111 // t1 = op t2, c1
2112 // t3 = op t4, c2
2113 //
2114 // and the following instructions are both ready.
2115 // t2 = op c3
2116 // t4 = op c4
2117 //
2118 // Then schedule t2 = op first.
2119 // i.e.
2120 // t4 = op c4
2121 // t2 = op c3
2122 // t1 = op t2, c1
2123 // t3 = op t4, c2
2124 //
2125 // This creates more short live intervals.
2126 unsigned LDist = closestSucc(left);
2127 unsigned RDist = closestSucc(right);
2128 if (LDist != RDist)
2129 return LDist < RDist;
2130
Evan Cheng3a14efa2009-02-12 08:59:45 +00002131 // How many registers becomes live when the node is scheduled.
Evan Cheng73bdf042008-03-01 00:39:47 +00002132 unsigned LScratch = calcMaxScratches(left);
2133 unsigned RScratch = calcMaxScratches(right);
2134 if (LScratch != RScratch)
2135 return LScratch > RScratch;
2136
Andrew Trick47ff14b2011-01-21 05:51:33 +00002137 if (!DisableSchedCycles) {
Andrew Trick9ccce772011-01-14 21:11:41 +00002138 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2139 if (result != 0)
2140 return result > 0;
2141 }
2142 else {
2143 if (left->getHeight() != right->getHeight())
2144 return left->getHeight() > right->getHeight();
Andrew Trick2085a962010-12-21 22:25:04 +00002145
Andrew Trick9ccce772011-01-14 21:11:41 +00002146 if (left->getDepth() != right->getDepth())
2147 return left->getDepth() < right->getDepth();
2148 }
Evan Cheng73bdf042008-03-01 00:39:47 +00002149
Andrew Trick2085a962010-12-21 22:25:04 +00002150 assert(left->NodeQueueId && right->NodeQueueId &&
Roman Levenstein6b371142008-04-29 09:07:59 +00002151 "NodeQueueId cannot be zero");
2152 return (left->NodeQueueId > right->NodeQueueId);
Evan Chengd38c22b2006-05-11 23:55:42 +00002153}
2154
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002155// Bottom up
Andrew Trick9ccce772011-01-14 21:11:41 +00002156bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002157 return BURRSort(left, right, SPQ);
2158}
2159
2160// Source order, otherwise bottom up.
Andrew Trick9ccce772011-01-14 21:11:41 +00002161bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002162 unsigned LOrder = SPQ->getNodeOrdering(left);
2163 unsigned ROrder = SPQ->getNodeOrdering(right);
2164
2165 // Prefer an ordering where the lower the non-zero order number, the higher
2166 // the preference.
2167 if ((LOrder || ROrder) && LOrder != ROrder)
2168 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2169
2170 return BURRSort(left, right, SPQ);
2171}
2172
Andrew Trick9ccce772011-01-14 21:11:41 +00002173// If the time between now and when the instruction will be ready can cover
2174// the spill code, then avoid adding it to the ready queue. This gives long
2175// stalls highest priority and allows hoisting across calls. It should also
2176// speed up processing the available queue.
2177bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2178 static const unsigned ReadyDelay = 3;
2179
2180 if (SPQ->MayReduceRegPressure(SU)) return true;
2181
2182 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2183
2184 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2185 != ScheduleHazardRecognizer::NoHazard)
2186 return false;
2187
2188 return true;
2189}
2190
2191// Return true if right should be scheduled with higher priority than left.
2192bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
Evan Chengdebf9c52010-11-03 00:45:17 +00002193 if (left->isCall || right->isCall)
2194 // No way to compute latency of calls.
2195 return BURRSort(left, right, SPQ);
2196
Evan Chenge6d6c5d2010-07-26 21:49:07 +00002197 bool LHigh = SPQ->HighRegPressure(left);
2198 bool RHigh = SPQ->HighRegPressure(right);
Evan Cheng37b740c2010-07-24 00:39:05 +00002199 // Avoid causing spills. If register pressure is high, schedule for
2200 // register pressure reduction.
Andrew Trick2cd1f0b2011-01-20 06:21:59 +00002201 if (LHigh && !RHigh) {
2202 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2203 << right->NodeNum << ")\n");
Evan Cheng28590382010-07-21 23:53:58 +00002204 return true;
Andrew Trick2cd1f0b2011-01-20 06:21:59 +00002205 }
2206 else if (!LHigh && RHigh) {
2207 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2208 << left->NodeNum << ")\n");
Evan Cheng28590382010-07-21 23:53:58 +00002209 return false;
Andrew Trick2cd1f0b2011-01-20 06:21:59 +00002210 }
Evan Chenge6d6c5d2010-07-26 21:49:07 +00002211 else if (!LHigh && !RHigh) {
Andrew Trick9ccce772011-01-14 21:11:41 +00002212 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2213 if (result != 0)
2214 return result > 0;
Evan Chengcc2efe12010-05-28 23:26:21 +00002215 }
Evan Chengbdd062d2010-05-20 06:13:19 +00002216 return BURRSort(left, right, SPQ);
2217}
2218
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002219// Schedule as many instructions in each cycle as possible. So don't make an
2220// instruction available unless it is ready in the current cycle.
2221bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
Andrew Trick9ccce772011-01-14 21:11:41 +00002222 if (SU->getHeight() > CurCycle) return false;
2223
2224 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2225 != ScheduleHazardRecognizer::NoHazard)
2226 return false;
2227
Andrew Trickc88b7ec2011-03-04 02:03:45 +00002228 return true;
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002229}
2230
Andrew Trickb8390b72011-03-05 08:04:11 +00002231// list-ilp is currently an experimental scheduler that allows various
2232// heuristics to be enabled prior to the normal register reduction logic.
Andrew Trick9ccce772011-01-14 21:11:41 +00002233bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
Evan Chengdebf9c52010-11-03 00:45:17 +00002234 if (left->isCall || right->isCall)
2235 // No way to compute latency of calls.
2236 return BURRSort(left, right, SPQ);
2237
Andrew Trick641e2d42011-03-05 08:00:22 +00002238 unsigned LLiveUses, RLiveUses;
2239 int LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2240 int RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2241 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2242 DEBUG(++FactorCount[FactPressureDiff]);
2243 return LPDiff > RPDiff;
2244 }
2245
2246 if (!DisableSchedLiveUses && LLiveUses != RLiveUses) {
2247 DEBUG(dbgs() << "Live uses " << left->NodeNum << " = " << LLiveUses
2248 << " != " << right->NodeNum << " = " << RLiveUses << "\n");
2249 DEBUG(++FactorCount[FactRegUses]);
2250 return LLiveUses < RLiveUses;
2251 }
2252
2253 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2254 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2255 if (!DisableSchedStalls && LStall != RStall) {
2256 DEBUG(++FactorCount[FactHeight]);
2257 return left->getHeight() > right->getHeight();
2258 }
2259
Andrew Trick25cedf32011-03-05 10:29:25 +00002260 if (!DisableSchedCriticalPath) {
2261 int spread = (int)left->getDepth() - (int)right->getDepth();
2262 if (std::abs(spread) > MaxReorderWindow) {
2263 DEBUG(++FactorCount[FactDepth]);
2264 return left->getDepth() < right->getDepth();
2265 }
Andrew Trick641e2d42011-03-05 08:00:22 +00002266 }
2267
2268 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2269 DEBUG(++FactorCount[FactHeight]);
2270 return left->getHeight() > right->getHeight();
Evan Cheng37b740c2010-07-24 00:39:05 +00002271 }
2272
2273 return BURRSort(left, right, SPQ);
2274}
2275
Andrew Trick9ccce772011-01-14 21:11:41 +00002276//===----------------------------------------------------------------------===//
2277// Preschedule for Register Pressure
2278//===----------------------------------------------------------------------===//
2279
2280bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002281 if (SU->isTwoAddress) {
Dan Gohman1ddfcba2008-11-13 21:36:12 +00002282 unsigned Opc = SU->getNode()->getMachineOpcode();
Chris Lattner03ad8852008-01-07 07:27:27 +00002283 const TargetInstrDesc &TID = TII->get(Opc);
Chris Lattnerfd2e3382008-01-07 06:47:00 +00002284 unsigned NumRes = TID.getNumDefs();
Dan Gohman0340d1e2008-02-15 20:50:13 +00002285 unsigned NumOps = TID.getNumOperands() - NumRes;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002286 for (unsigned i = 0; i != NumOps; ++i) {
Chris Lattnerfd2e3382008-01-07 06:47:00 +00002287 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
Dan Gohman1ddfcba2008-11-13 21:36:12 +00002288 SDNode *DU = SU->getNode()->getOperand(i).getNode();
Dan Gohman46520a22008-06-21 19:18:17 +00002289 if (DU->getNodeId() != -1 &&
2290 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002291 return true;
2292 }
2293 }
Evan Chengd38c22b2006-05-11 23:55:42 +00002294 }
Evan Chengd38c22b2006-05-11 23:55:42 +00002295 return false;
2296}
2297
Evan Chengf9891412007-12-20 09:25:31 +00002298/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
Dan Gohmanea045202008-06-21 22:05:24 +00002299/// physical register defs.
Dan Gohmane955c482008-08-05 14:45:15 +00002300static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
Evan Chengf9891412007-12-20 09:25:31 +00002301 const TargetInstrInfo *TII,
Dan Gohman3a4be0f2008-02-10 18:45:23 +00002302 const TargetRegisterInfo *TRI) {
Dan Gohman1ddfcba2008-11-13 21:36:12 +00002303 SDNode *N = SuccSU->getNode();
Dan Gohman17059682008-07-17 19:10:17 +00002304 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2305 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
Dan Gohmanea045202008-06-21 22:05:24 +00002306 assert(ImpDefs && "Caller should check hasPhysRegDefs");
Dan Gohmana366da12009-03-23 16:23:01 +00002307 for (const SDNode *SUNode = SU->getNode(); SUNode;
Chris Lattner11a33812010-12-23 17:24:32 +00002308 SUNode = SUNode->getGluedNode()) {
Dan Gohmana366da12009-03-23 16:23:01 +00002309 if (!SUNode->isMachineOpcode())
Evan Chengf9891412007-12-20 09:25:31 +00002310 continue;
Dan Gohmana366da12009-03-23 16:23:01 +00002311 const unsigned *SUImpDefs =
2312 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2313 if (!SUImpDefs)
2314 return false;
2315 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
Owen Anderson53aa7a92009-08-10 22:56:29 +00002316 EVT VT = N->getValueType(i);
Chris Lattner3e5fbd72010-12-21 02:38:05 +00002317 if (VT == MVT::Glue || VT == MVT::Other)
Dan Gohmana366da12009-03-23 16:23:01 +00002318 continue;
2319 if (!N->hasAnyUseOfValue(i))
2320 continue;
2321 unsigned Reg = ImpDefs[i - NumDefs];
2322 for (;*SUImpDefs; ++SUImpDefs) {
2323 unsigned SUReg = *SUImpDefs;
2324 if (TRI->regsOverlap(Reg, SUReg))
2325 return true;
2326 }
Evan Chengf9891412007-12-20 09:25:31 +00002327 }
2328 }
2329 return false;
2330}
2331
Dan Gohman9a658d72009-03-24 00:49:12 +00002332/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2333/// are not handled well by the general register pressure reduction
2334/// heuristics. When presented with code like this:
2335///
2336/// N
2337/// / |
2338/// / |
2339/// U store
2340/// |
2341/// ...
2342///
2343/// the heuristics tend to push the store up, but since the
2344/// operand of the store has another use (U), this would increase
2345/// the length of that other use (the U->N edge).
2346///
2347/// This function transforms code like the above to route U's
2348/// dependence through the store when possible, like this:
2349///
2350/// N
2351/// ||
2352/// ||
2353/// store
2354/// |
2355/// U
2356/// |
2357/// ...
2358///
2359/// This results in the store being scheduled immediately
2360/// after N, which shortens the U->N live range, reducing
2361/// register pressure.
2362///
Andrew Trick9ccce772011-01-14 21:11:41 +00002363void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
Dan Gohman9a658d72009-03-24 00:49:12 +00002364 // Visit all the nodes in topological order, working top-down.
2365 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2366 SUnit *SU = &(*SUnits)[i];
2367 // For now, only look at nodes with no data successors, such as stores.
2368 // These are especially important, due to the heuristics in
2369 // getNodePriority for nodes with no data successors.
2370 if (SU->NumSuccs != 0)
2371 continue;
2372 // For now, only look at nodes with exactly one data predecessor.
2373 if (SU->NumPreds != 1)
2374 continue;
2375 // Avoid prescheduling copies to virtual registers, which don't behave
2376 // like other nodes from the perspective of scheduling heuristics.
2377 if (SDNode *N = SU->getNode())
2378 if (N->getOpcode() == ISD::CopyToReg &&
2379 TargetRegisterInfo::isVirtualRegister
2380 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2381 continue;
2382
2383 // Locate the single data predecessor.
2384 SUnit *PredSU = 0;
2385 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2386 EE = SU->Preds.end(); II != EE; ++II)
2387 if (!II->isCtrl()) {
2388 PredSU = II->getSUnit();
2389 break;
2390 }
2391 assert(PredSU);
2392
2393 // Don't rewrite edges that carry physregs, because that requires additional
2394 // support infrastructure.
2395 if (PredSU->hasPhysRegDefs)
2396 continue;
2397 // Short-circuit the case where SU is PredSU's only data successor.
2398 if (PredSU->NumSuccs == 1)
2399 continue;
2400 // Avoid prescheduling to copies from virtual registers, which don't behave
Andrew Trickd0548ae2011-02-04 03:18:17 +00002401 // like other nodes from the perspective of scheduling heuristics.
Dan Gohman9a658d72009-03-24 00:49:12 +00002402 if (SDNode *N = SU->getNode())
2403 if (N->getOpcode() == ISD::CopyFromReg &&
2404 TargetRegisterInfo::isVirtualRegister
2405 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2406 continue;
2407
2408 // Perform checks on the successors of PredSU.
2409 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2410 EE = PredSU->Succs.end(); II != EE; ++II) {
2411 SUnit *PredSuccSU = II->getSUnit();
2412 if (PredSuccSU == SU) continue;
2413 // If PredSU has another successor with no data successors, for
2414 // now don't attempt to choose either over the other.
2415 if (PredSuccSU->NumSuccs == 0)
2416 goto outer_loop_continue;
2417 // Don't break physical register dependencies.
2418 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2419 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2420 goto outer_loop_continue;
2421 // Don't introduce graph cycles.
2422 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2423 goto outer_loop_continue;
2424 }
2425
2426 // Ok, the transformation is safe and the heuristics suggest it is
2427 // profitable. Update the graph.
Evan Chengbdd062d2010-05-20 06:13:19 +00002428 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2429 << " next to PredSU #" << PredSU->NodeNum
Chris Lattner4dc3edd2009-08-23 06:35:02 +00002430 << " to guide scheduling in the presence of multiple uses\n");
Dan Gohman9a658d72009-03-24 00:49:12 +00002431 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2432 SDep Edge = PredSU->Succs[i];
2433 assert(!Edge.isAssignedRegDep());
2434 SUnit *SuccSU = Edge.getSUnit();
2435 if (SuccSU != SU) {
2436 Edge.setSUnit(PredSU);
2437 scheduleDAG->RemovePred(SuccSU, Edge);
2438 scheduleDAG->AddPred(SU, Edge);
2439 Edge.setSUnit(SU);
2440 scheduleDAG->AddPred(SuccSU, Edge);
2441 --i;
2442 }
2443 }
2444 outer_loop_continue:;
2445 }
2446}
2447
Evan Chengd38c22b2006-05-11 23:55:42 +00002448/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2449/// it as a def&use operand. Add a pseudo control edge from it to the other
2450/// node (if it won't create a cycle) so the two-address one will be scheduled
Evan Chenga5e595d2007-09-28 22:32:30 +00002451/// first (lower in the schedule). If both nodes are two-address, favor the
2452/// one that has a CopyToReg use (more likely to be a loop induction update).
2453/// If both are two-address, but one is commutable while the other is not
2454/// commutable, favor the one that's not commutable.
Andrew Trick9ccce772011-01-14 21:11:41 +00002455void RegReductionPQBase::AddPseudoTwoAddrDeps() {
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002456 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
Dan Gohmane955c482008-08-05 14:45:15 +00002457 SUnit *SU = &(*SUnits)[i];
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002458 if (!SU->isTwoAddress)
2459 continue;
2460
Dan Gohman1ddfcba2008-11-13 21:36:12 +00002461 SDNode *Node = SU->getNode();
Chris Lattner11a33812010-12-23 17:24:32 +00002462 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002463 continue;
2464
Evan Cheng6c1414f2010-10-29 18:09:28 +00002465 bool isLiveOut = hasOnlyLiveOutUses(SU);
Dan Gohman17059682008-07-17 19:10:17 +00002466 unsigned Opc = Node->getMachineOpcode();
Chris Lattner03ad8852008-01-07 07:27:27 +00002467 const TargetInstrDesc &TID = TII->get(Opc);
Chris Lattnerfd2e3382008-01-07 06:47:00 +00002468 unsigned NumRes = TID.getNumDefs();
Dan Gohman0340d1e2008-02-15 20:50:13 +00002469 unsigned NumOps = TID.getNumOperands() - NumRes;
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002470 for (unsigned j = 0; j != NumOps; ++j) {
Dan Gohman82016c22008-11-19 02:00:32 +00002471 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
2472 continue;
2473 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2474 if (DU->getNodeId() == -1)
2475 continue;
2476 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2477 if (!DUSU) continue;
2478 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2479 E = DUSU->Succs.end(); I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +00002480 if (I->isCtrl()) continue;
2481 SUnit *SuccSU = I->getSUnit();
Dan Gohman82016c22008-11-19 02:00:32 +00002482 if (SuccSU == SU)
Evan Cheng1bf166312007-11-09 01:27:11 +00002483 continue;
Dan Gohman82016c22008-11-19 02:00:32 +00002484 // Be conservative. Ignore if nodes aren't at roughly the same
2485 // depth and height.
Dan Gohmandddc1ac2008-12-16 03:25:46 +00002486 if (SuccSU->getHeight() < SU->getHeight() &&
2487 (SU->getHeight() - SuccSU->getHeight()) > 1)
Dan Gohman82016c22008-11-19 02:00:32 +00002488 continue;
Dan Gohmaneefba6b2009-04-16 20:59:02 +00002489 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2490 // constrains whatever is using the copy, instead of the copy
2491 // itself. In the case that the copy is coalesced, this
2492 // preserves the intent of the pseudo two-address heurietics.
2493 while (SuccSU->Succs.size() == 1 &&
2494 SuccSU->getNode()->isMachineOpcode() &&
2495 SuccSU->getNode()->getMachineOpcode() ==
Chris Lattnerb06015a2010-02-09 19:54:29 +00002496 TargetOpcode::COPY_TO_REGCLASS)
Dan Gohmaneefba6b2009-04-16 20:59:02 +00002497 SuccSU = SuccSU->Succs.front().getSUnit();
2498 // Don't constrain non-instruction nodes.
Dan Gohman82016c22008-11-19 02:00:32 +00002499 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2500 continue;
2501 // Don't constrain nodes with physical register defs if the
2502 // predecessor can clobber them.
Dan Gohmanf3746cb2009-03-24 00:50:07 +00002503 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
Dan Gohman82016c22008-11-19 02:00:32 +00002504 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
Evan Cheng5924bf72007-09-25 01:54:36 +00002505 continue;
Dan Gohman82016c22008-11-19 02:00:32 +00002506 }
Dan Gohman3027bb62009-04-16 20:57:10 +00002507 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2508 // these may be coalesced away. We want them close to their uses.
Dan Gohman82016c22008-11-19 02:00:32 +00002509 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
Chris Lattnerb06015a2010-02-09 19:54:29 +00002510 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2511 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2512 SuccOpc == TargetOpcode::SUBREG_TO_REG)
Dan Gohman82016c22008-11-19 02:00:32 +00002513 continue;
2514 if ((!canClobber(SuccSU, DUSU) ||
Evan Cheng6c1414f2010-10-29 18:09:28 +00002515 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
Dan Gohman82016c22008-11-19 02:00:32 +00002516 (!SU->isCommutable && SuccSU->isCommutable)) &&
2517 !scheduleDAG->IsReachable(SuccSU, SU)) {
Evan Chengbdd062d2010-05-20 06:13:19 +00002518 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
Chris Lattner4dc3edd2009-08-23 06:35:02 +00002519 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
Dan Gohman79c35162009-01-06 01:19:04 +00002520 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
Dan Gohmanbf8e5202009-01-06 01:28:56 +00002521 /*Reg=*/0, /*isNormalMemory=*/false,
2522 /*isMustAlias=*/false,
Dan Gohman2d170892008-12-09 22:54:47 +00002523 /*isArtificial=*/true));
Evan Chengfd2c5dd2006-11-04 09:44:31 +00002524 }
2525 }
2526 }
2527 }
Evan Chengd38c22b2006-05-11 23:55:42 +00002528}
2529
Roman Levenstein30d09512008-03-27 09:44:37 +00002530/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
Roman Levensteinbc674502008-03-27 09:14:57 +00002531/// predecessors of the successors of the SUnit SU. Stop when the provided
2532/// limit is exceeded.
Andrew Trick2085a962010-12-21 22:25:04 +00002533static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
Roman Levensteinbc674502008-03-27 09:14:57 +00002534 unsigned Limit) {
2535 unsigned Sum = 0;
2536 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2537 I != E; ++I) {
Dan Gohman2d170892008-12-09 22:54:47 +00002538 const SUnit *SuccSU = I->getSUnit();
Roman Levensteinbc674502008-03-27 09:14:57 +00002539 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
2540 EE = SuccSU->Preds.end(); II != EE; ++II) {
Dan Gohman2d170892008-12-09 22:54:47 +00002541 SUnit *PredSU = II->getSUnit();
Evan Cheng16d72072008-03-29 18:34:22 +00002542 if (!PredSU->isScheduled)
2543 if (++Sum > Limit)
2544 return Sum;
Roman Levensteinbc674502008-03-27 09:14:57 +00002545 }
2546 }
2547 return Sum;
2548}
2549
Evan Chengd38c22b2006-05-11 23:55:42 +00002550
2551// Top down
2552bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
Evan Cheng6730f032007-01-08 23:55:53 +00002553 unsigned LPriority = SPQ->getNodePriority(left);
2554 unsigned RPriority = SPQ->getNodePriority(right);
Dan Gohman1ddfcba2008-11-13 21:36:12 +00002555 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
2556 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
Evan Chengd38c22b2006-05-11 23:55:42 +00002557 bool LIsFloater = LIsTarget && left->NumPreds == 0;
2558 bool RIsFloater = RIsTarget && right->NumPreds == 0;
Roman Levensteinbc674502008-03-27 09:14:57 +00002559 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
2560 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
Evan Chengd38c22b2006-05-11 23:55:42 +00002561
2562 if (left->NumSuccs == 0 && right->NumSuccs != 0)
2563 return false;
2564 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
2565 return true;
2566
Evan Chengd38c22b2006-05-11 23:55:42 +00002567 if (LIsFloater)
2568 LBonus -= 2;
2569 if (RIsFloater)
2570 RBonus -= 2;
2571 if (left->NumSuccs == 1)
2572 LBonus += 2;
2573 if (right->NumSuccs == 1)
2574 RBonus += 2;
2575
Evan Cheng73bdf042008-03-01 00:39:47 +00002576 if (LPriority+LBonus != RPriority+RBonus)
2577 return LPriority+LBonus < RPriority+RBonus;
Anton Korobeynikov035eaac2008-02-20 11:10:28 +00002578
Dan Gohmandddc1ac2008-12-16 03:25:46 +00002579 if (left->getDepth() != right->getDepth())
2580 return left->getDepth() < right->getDepth();
Evan Cheng73bdf042008-03-01 00:39:47 +00002581
2582 if (left->NumSuccsLeft != right->NumSuccsLeft)
2583 return left->NumSuccsLeft > right->NumSuccsLeft;
2584
Andrew Trick2085a962010-12-21 22:25:04 +00002585 assert(left->NodeQueueId && right->NodeQueueId &&
Roman Levenstein6b371142008-04-29 09:07:59 +00002586 "NodeQueueId cannot be zero");
2587 return (left->NodeQueueId > right->NodeQueueId);
Evan Chengd38c22b2006-05-11 23:55:42 +00002588}
2589
Evan Chengd38c22b2006-05-11 23:55:42 +00002590//===----------------------------------------------------------------------===//
2591// Public Constructor Functions
2592//===----------------------------------------------------------------------===//
2593
Dan Gohmandfaf6462009-02-11 04:27:20 +00002594llvm::ScheduleDAGSDNodes *
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002595llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2596 CodeGenOpt::Level OptLevel) {
Dan Gohman619ef482009-01-15 19:20:50 +00002597 const TargetMachine &TM = IS->TM;
2598 const TargetInstrInfo *TII = TM.getInstrInfo();
2599 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
Andrew Trick2085a962010-12-21 22:25:04 +00002600
Evan Chenga77f3d32010-07-21 06:09:07 +00002601 BURegReductionPriorityQueue *PQ =
Evan Chengbf32e542010-07-22 06:24:48 +00002602 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002603 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
Evan Cheng7e4abde2008-07-02 09:23:51 +00002604 PQ->setScheduleDAG(SD);
Andrew Trick2085a962010-12-21 22:25:04 +00002605 return SD;
Evan Chengd38c22b2006-05-11 23:55:42 +00002606}
2607
Dan Gohmandfaf6462009-02-11 04:27:20 +00002608llvm::ScheduleDAGSDNodes *
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002609llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
2610 CodeGenOpt::Level OptLevel) {
Dan Gohman619ef482009-01-15 19:20:50 +00002611 const TargetMachine &TM = IS->TM;
2612 const TargetInstrInfo *TII = TM.getInstrInfo();
2613 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
Andrew Trick2085a962010-12-21 22:25:04 +00002614
Evan Chenga77f3d32010-07-21 06:09:07 +00002615 TDRegReductionPriorityQueue *PQ =
2616 new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002617 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
Dan Gohman3f656df2008-11-20 02:45:51 +00002618 PQ->setScheduleDAG(SD);
2619 return SD;
Evan Chengd38c22b2006-05-11 23:55:42 +00002620}
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002621
2622llvm::ScheduleDAGSDNodes *
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002623llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2624 CodeGenOpt::Level OptLevel) {
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002625 const TargetMachine &TM = IS->TM;
2626 const TargetInstrInfo *TII = TM.getInstrInfo();
2627 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
Andrew Trick2085a962010-12-21 22:25:04 +00002628
Evan Chenga77f3d32010-07-21 06:09:07 +00002629 SrcRegReductionPriorityQueue *PQ =
Evan Chengbf32e542010-07-22 06:24:48 +00002630 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002631 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
Evan Chengbdd062d2010-05-20 06:13:19 +00002632 PQ->setScheduleDAG(SD);
Andrew Trick2085a962010-12-21 22:25:04 +00002633 return SD;
Evan Chengbdd062d2010-05-20 06:13:19 +00002634}
2635
2636llvm::ScheduleDAGSDNodes *
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002637llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2638 CodeGenOpt::Level OptLevel) {
Evan Chengbdd062d2010-05-20 06:13:19 +00002639 const TargetMachine &TM = IS->TM;
2640 const TargetInstrInfo *TII = TM.getInstrInfo();
2641 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
Evan Chenga77f3d32010-07-21 06:09:07 +00002642 const TargetLowering *TLI = &IS->getTargetLowering();
Andrew Trick2085a962010-12-21 22:25:04 +00002643
Evan Chenga77f3d32010-07-21 06:09:07 +00002644 HybridBURRPriorityQueue *PQ =
Evan Chengdf907f42010-07-23 22:39:59 +00002645 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002646
2647 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002648 PQ->setScheduleDAG(SD);
Andrew Trick2085a962010-12-21 22:25:04 +00002649 return SD;
Bill Wendling8cbc25d2010-01-23 10:26:57 +00002650}
Evan Cheng37b740c2010-07-24 00:39:05 +00002651
2652llvm::ScheduleDAGSDNodes *
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002653llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2654 CodeGenOpt::Level OptLevel) {
Evan Cheng37b740c2010-07-24 00:39:05 +00002655 const TargetMachine &TM = IS->TM;
2656 const TargetInstrInfo *TII = TM.getInstrInfo();
2657 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2658 const TargetLowering *TLI = &IS->getTargetLowering();
Andrew Trick2085a962010-12-21 22:25:04 +00002659
Evan Cheng37b740c2010-07-24 00:39:05 +00002660 ILPBURRPriorityQueue *PQ =
2661 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
Andrew Trick10ffc2b2010-12-24 05:03:26 +00002662 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
Evan Cheng37b740c2010-07-24 00:39:05 +00002663 PQ->setScheduleDAG(SD);
Andrew Trick2085a962010-12-21 22:25:04 +00002664 return SD;
Evan Cheng37b740c2010-07-24 00:39:05 +00002665}