blob: a1dd83354faffef963e0a05c1a469a6827f4370b [file] [log] [blame]
Dale Johannesen72f15962007-07-13 17:31:29 +00001//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
David Goodwind94a4e52009-08-10 15:55:25 +000022#include "ExactHazardRecognizer.h"
23#include "SimpleHazardRecognizer.h"
Dan Gohman6dc75fe2009-02-06 17:12:10 +000024#include "ScheduleDAGInstrs.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000025#include "llvm/CodeGen/Passes.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000026#include "llvm/CodeGen/LatencyPriorityQueue.h"
27#include "llvm/CodeGen/SchedulerRegistry.h"
Dan Gohman3f237442008-12-16 03:25:46 +000028#include "llvm/CodeGen/MachineDominators.h"
David Goodwinc7951f82009-10-01 19:45:32 +000029#include "llvm/CodeGen/MachineFrameInfo.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000030#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohman3f237442008-12-16 03:25:46 +000031#include "llvm/CodeGen/MachineLoopInfo.h"
Dan Gohman21d90032008-11-25 00:52:40 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman2836c282009-01-16 01:33:36 +000033#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Dan Gohmana70dca12009-10-09 23:27:56 +000034#include "llvm/Analysis/AliasAnalysis.h"
Dan Gohmanbed353d2009-02-10 23:29:38 +000035#include "llvm/Target/TargetLowering.h"
Dan Gohman79ce2762009-01-15 19:20:50 +000036#include "llvm/Target/TargetMachine.h"
Dan Gohman21d90032008-11-25 00:52:40 +000037#include "llvm/Target/TargetInstrInfo.h"
38#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin0dad89f2009-09-30 00:10:16 +000039#include "llvm/Target/TargetSubtarget.h"
Chris Lattner459525d2008-01-14 19:00:06 +000040#include "llvm/Support/Compiler.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000041#include "llvm/Support/Debug.h"
Torok Edwinc25e7582009-07-11 20:10:48 +000042#include "llvm/Support/ErrorHandling.h"
David Goodwin3a5f0d42009-08-11 01:44:26 +000043#include "llvm/Support/raw_ostream.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000044#include "llvm/ADT/Statistic.h"
Dan Gohman21d90032008-11-25 00:52:40 +000045#include <map>
David Goodwin88a589c2009-08-25 17:03:05 +000046#include <set>
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000047using namespace llvm;
48
Dan Gohman2836c282009-01-16 01:33:36 +000049STATISTIC(NumNoops, "Number of noops inserted");
Dan Gohman343f0c02008-11-19 23:18:57 +000050STATISTIC(NumStalls, "Number of pipeline stalls");
David Goodwin480c5292009-10-20 19:54:44 +000051STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
Dan Gohman343f0c02008-11-19 23:18:57 +000052
David Goodwin471850a2009-10-01 21:46:35 +000053// Post-RA scheduling is enabled with
54// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
55// override the target.
56static cl::opt<bool>
57EnablePostRAScheduler("post-RA-scheduler",
58 cl::desc("Enable scheduling after register allocation"),
David Goodwin9843a932009-10-01 22:19:57 +000059 cl::init(false), cl::Hidden);
David Goodwin480c5292009-10-20 19:54:44 +000060static cl::opt<std::string>
Dan Gohman21d90032008-11-25 00:52:40 +000061EnableAntiDepBreaking("break-anti-dependencies",
David Goodwin480c5292009-10-20 19:54:44 +000062 cl::desc("Break post-RA scheduling anti-dependencies: "
63 "\"critical\", \"all\", or \"none\""),
64 cl::init("critical"), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000065static cl::opt<bool>
66EnablePostRAHazardAvoidance("avoid-hazards",
David Goodwind94a4e52009-08-10 15:55:25 +000067 cl::desc("Enable exact hazard avoidance"),
David Goodwin5e411782009-09-03 22:15:25 +000068 cl::init(true), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000069
David Goodwin1f152282009-09-01 18:34:03 +000070// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
71static cl::opt<int>
72DebugDiv("postra-sched-debugdiv",
73 cl::desc("Debug control MBBs that are scheduled"),
74 cl::init(0), cl::Hidden);
75static cl::opt<int>
76DebugMod("postra-sched-debugmod",
77 cl::desc("Debug control MBBs that are scheduled"),
78 cl::init(0), cl::Hidden);
79
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000080namespace {
Dan Gohman343f0c02008-11-19 23:18:57 +000081 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
Dan Gohmana70dca12009-10-09 23:27:56 +000082 AliasAnalysis *AA;
Evan Chengfa163542009-10-16 21:06:15 +000083 CodeGenOpt::Level OptLevel;
Dan Gohmana70dca12009-10-09 23:27:56 +000084
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000085 public:
86 static char ID;
Evan Chengfa163542009-10-16 21:06:15 +000087 PostRAScheduler(CodeGenOpt::Level ol) :
88 MachineFunctionPass(&ID), OptLevel(ol) {}
Dan Gohman21d90032008-11-25 00:52:40 +000089
Dan Gohman3f237442008-12-16 03:25:46 +000090 void getAnalysisUsage(AnalysisUsage &AU) const {
Dan Gohman845012e2009-07-31 23:37:33 +000091 AU.setPreservesCFG();
Dan Gohmana70dca12009-10-09 23:27:56 +000092 AU.addRequired<AliasAnalysis>();
Dan Gohman3f237442008-12-16 03:25:46 +000093 AU.addRequired<MachineDominatorTree>();
94 AU.addPreserved<MachineDominatorTree>();
95 AU.addRequired<MachineLoopInfo>();
96 AU.addPreserved<MachineLoopInfo>();
97 MachineFunctionPass::getAnalysisUsage(AU);
98 }
99
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000100 const char *getPassName() const {
Dan Gohman21d90032008-11-25 00:52:40 +0000101 return "Post RA top-down list latency scheduler";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000102 }
103
104 bool runOnMachineFunction(MachineFunction &Fn);
105 };
Dan Gohman343f0c02008-11-19 23:18:57 +0000106 char PostRAScheduler::ID = 0;
107
108 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
David Goodwin480c5292009-10-20 19:54:44 +0000109 /// RegisterReference - Information about a register reference
110 /// within a liverange
111 typedef struct {
112 /// Operand - The registers operand
113 MachineOperand *Operand;
114 /// RC - The register class
115 const TargetRegisterClass *RC;
116 } RegisterReference;
117
Dan Gohman343f0c02008-11-19 23:18:57 +0000118 /// AvailableQueue - The priority queue to use for the available SUnits.
Dan Gohman343f0c02008-11-19 23:18:57 +0000119 LatencyPriorityQueue AvailableQueue;
120
121 /// PendingQueue - This contains all of the instructions whose operands have
122 /// been issued, but their results are not ready yet (due to the latency of
123 /// the operation). Once the operands becomes available, the instruction is
124 /// added to the AvailableQueue.
125 std::vector<SUnit*> PendingQueue;
126
Dan Gohman21d90032008-11-25 00:52:40 +0000127 /// Topo - A topological ordering for SUnits.
128 ScheduleDAGTopologicalSort Topo;
Dan Gohman343f0c02008-11-19 23:18:57 +0000129
Dan Gohman2836c282009-01-16 01:33:36 +0000130 /// HazardRec - The hazard recognizer to use.
131 ScheduleHazardRecognizer *HazardRec;
132
Dan Gohmana70dca12009-10-09 23:27:56 +0000133 /// AA - AliasAnalysis for making memory reference queries.
134 AliasAnalysis *AA;
135
David Goodwin480c5292009-10-20 19:54:44 +0000136 /// AllocatableSet - The set of allocatable registers.
137 /// We'll be ignoring anti-dependencies on non-allocatable registers,
138 /// because they may not be safe to break.
139 const BitVector AllocatableSet;
140
141 /// GroupNodes - Implements a disjoint-union data structure to
142 /// form register groups. A node is represented by an index into
143 /// the vector. A node can "point to" itself to indicate that it
144 /// is the parent of a group, or point to another node to indicate
145 /// that it is a member of the same group as that node.
146 std::vector<unsigned> GroupNodes;
147
148 /// GroupNodeIndices - For each register, the index of the GroupNode
149 /// currently representing the group that the register belongs to.
150 /// Register 0 is always represented by the 0 group, a group
151 /// composed of registers that are not eligible for anti-aliasing.
152 unsigned GroupNodeIndices[TargetRegisterInfo::FirstVirtualRegister];
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000153
154 /// RegRegs - Map registers to all their references within a live range.
David Goodwin480c5292009-10-20 19:54:44 +0000155 std::multimap<unsigned, RegisterReference> RegRefs;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000156
David Goodwin480c5292009-10-20 19:54:44 +0000157 /// KillIndices - The index of the most recent kill (proceding
158 /// bottom-up), or ~0u if no kill of the register has been
159 /// seen. The register is live if this index != ~0u and DefIndices
160 /// == ~0u.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000161 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
162
Evan Cheng714e8bc2009-10-01 08:26:23 +0000163 /// DefIndices - The index of the most recent complete def (proceding bottom
164 /// up), or ~0u if the register is live.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000165 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
166
Dan Gohman21d90032008-11-25 00:52:40 +0000167 public:
Dan Gohman79ce2762009-01-15 19:20:50 +0000168 SchedulePostRATDList(MachineFunction &MF,
Dan Gohman3f237442008-12-16 03:25:46 +0000169 const MachineLoopInfo &MLI,
Dan Gohman2836c282009-01-16 01:33:36 +0000170 const MachineDominatorTree &MDT,
Dan Gohmana70dca12009-10-09 23:27:56 +0000171 ScheduleHazardRecognizer *HR,
172 AliasAnalysis *aa)
Dan Gohman79ce2762009-01-15 19:20:50 +0000173 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
David Goodwin480c5292009-10-20 19:54:44 +0000174 HazardRec(HR), AA(aa),
175 AllocatableSet(TRI->getAllocatableSet(MF)),
176 GroupNodes(TargetRegisterInfo::FirstVirtualRegister, 0) {}
Dan Gohman2836c282009-01-16 01:33:36 +0000177
178 ~SchedulePostRATDList() {
179 delete HazardRec;
180 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000181
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000182 /// StartBlock - Initialize register live-range state for scheduling in
183 /// this block.
184 ///
185 void StartBlock(MachineBasicBlock *BB);
186
David Goodwin480c5292009-10-20 19:54:44 +0000187 /// FinishBlock - Clean up register live-range state.
188 ///
189 void FinishBlock();
190
191 /// Observe - Update liveness information to account for the current
192 /// instruction, which will not be scheduled.
193 ///
194 void Observe(MachineInstr *MI, unsigned Count);
195
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000196 /// Schedule - Schedule the instruction range using list scheduling.
197 ///
Dan Gohman343f0c02008-11-19 23:18:57 +0000198 void Schedule();
David Goodwin88a589c2009-08-25 17:03:05 +0000199
200 /// FixupKills - Fix register kill flags that have been made
201 /// invalid due to scheduling
202 ///
203 void FixupKills(MachineBasicBlock *MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000204
205 private:
David Goodwin480c5292009-10-20 19:54:44 +0000206 /// IsLive - Return true if Reg is live
207 bool IsLive(unsigned Reg);
208
209 void PrescanInstruction(MachineInstr *MI, unsigned Count);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000210 void ScanInstruction(MachineInstr *MI, unsigned Count);
David Goodwin480c5292009-10-20 19:54:44 +0000211 bool BreakAntiDependencies(bool CriticalPathOnly);
David Goodwin7441d142009-10-20 22:50:43 +0000212 unsigned FindSuitableFreeRegister(unsigned AntiDepReg);
David Goodwin480c5292009-10-20 19:54:44 +0000213
Dan Gohman54e4c362008-12-09 22:54:47 +0000214 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000215 void ReleaseSuccessors(SUnit *SU);
Dan Gohman343f0c02008-11-19 23:18:57 +0000216 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
217 void ListScheduleTopDown();
David Goodwin480c5292009-10-20 19:54:44 +0000218
David Goodwin5e411782009-09-03 22:15:25 +0000219 void StartBlockForKills(MachineBasicBlock *BB);
David Goodwin8f909342009-09-23 16:35:25 +0000220
221 // ToggleKillFlag - Toggle a register operand kill flag. Other
222 // adjustments may be made to the instruction if necessary. Return
223 // true if the operand has been deleted, false if not.
224 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
David Goodwin480c5292009-10-20 19:54:44 +0000225
226 // GetGroup - Get the group for a register. The returned value is
227 // the index of the GroupNode representing the group.
228 unsigned GetGroup(unsigned Reg);
229
230 // GetGroupRegs - Return a vector of the registers belonging to a
231 // group.
232 void GetGroupRegs(unsigned Group, std::vector<unsigned> &Regs);
233
234 // UnionGroups - Union Reg1's and Reg2's groups to form a new
235 // group. Return the index of the GroupNode representing the
236 // group.
237 unsigned UnionGroups(unsigned Reg1, unsigned Reg2);
238
239 // LeaveGroup - Remove a register from its current group and place
240 // it alone in its own group. Return the index of the GroupNode
241 // representing the registers new group.
242 unsigned LeaveGroup(unsigned Reg);
Dan Gohman343f0c02008-11-19 23:18:57 +0000243 };
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000244}
245
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000246/// isSchedulingBoundary - Test if the given instruction should be
247/// considered a scheduling boundary. This primarily includes labels
248/// and terminators.
249///
250static bool isSchedulingBoundary(const MachineInstr *MI,
251 const MachineFunction &MF) {
252 // Terminators and labels can't be scheduled around.
253 if (MI->getDesc().isTerminator() || MI->isLabel())
254 return true;
255
Dan Gohmanbed353d2009-02-10 23:29:38 +0000256 // Don't attempt to schedule around any instruction that modifies
257 // a stack-oriented pointer, as it's unlikely to be profitable. This
258 // saves compile time, because it doesn't require every single
259 // stack slot reference to depend on the instruction that does the
260 // modification.
261 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
262 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
263 return true;
264
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000265 return false;
266}
267
Dan Gohman343f0c02008-11-19 23:18:57 +0000268bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
Dan Gohman5bf7c2a2009-10-10 00:15:38 +0000269 AA = &getAnalysis<AliasAnalysis>();
270
David Goodwin471850a2009-10-01 21:46:35 +0000271 // Check for explicit enable/disable of post-ra scheduling.
272 if (EnablePostRAScheduler.getPosition() > 0) {
273 if (!EnablePostRAScheduler)
Evan Chengc83da2f92009-10-16 06:10:34 +0000274 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000275 } else {
Evan Chengc83da2f92009-10-16 06:10:34 +0000276 // Check that post-RA scheduling is enabled for this target.
David Goodwin471850a2009-10-01 21:46:35 +0000277 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
Evan Chengfa163542009-10-16 21:06:15 +0000278 if (!ST.enablePostRAScheduler(OptLevel))
Evan Chengc83da2f92009-10-16 06:10:34 +0000279 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000280 }
David Goodwin0dad89f2009-09-30 00:10:16 +0000281
David Goodwin3a5f0d42009-08-11 01:44:26 +0000282 DEBUG(errs() << "PostRAScheduler\n");
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000283
Dan Gohman3f237442008-12-16 03:25:46 +0000284 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
285 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
David Goodwind94a4e52009-08-10 15:55:25 +0000286 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
Dan Gohman2836c282009-01-16 01:33:36 +0000287 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
David Goodwind94a4e52009-08-10 15:55:25 +0000288 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
289 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
Dan Gohman3f237442008-12-16 03:25:46 +0000290
Dan Gohmana70dca12009-10-09 23:27:56 +0000291 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA);
Dan Gohman79ce2762009-01-15 19:20:50 +0000292
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000293 // Loop over all of the basic blocks
294 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
Dan Gohman343f0c02008-11-19 23:18:57 +0000295 MBB != MBBe; ++MBB) {
David Goodwin1f152282009-09-01 18:34:03 +0000296#ifndef NDEBUG
297 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
298 if (DebugDiv > 0) {
299 static int bbcnt = 0;
300 if (bbcnt++ % DebugDiv != DebugMod)
301 continue;
302 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
303 ":MBB ID#" << MBB->getNumber() << " ***\n";
304 }
305#endif
306
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000307 // Initialize register live-range state for scheduling in this block.
308 Scheduler.StartBlock(MBB);
309
Dan Gohmanf7119392009-01-16 22:10:20 +0000310 // Schedule each sequence of instructions not interrupted by a label
311 // or anything else that effectively needs to shut down scheduling.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000312 MachineBasicBlock::iterator Current = MBB->end();
Dan Gohman47ac0f02009-02-11 04:27:20 +0000313 unsigned Count = MBB->size(), CurrentCount = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000314 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
315 MachineInstr *MI = prior(I);
316 if (isSchedulingBoundary(MI, Fn)) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000317 Scheduler.Run(MBB, I, Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000318 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000319 Current = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000320 CurrentCount = Count - 1;
Dan Gohman1274ced2009-03-10 18:10:43 +0000321 Scheduler.Observe(MI, CurrentCount);
Dan Gohmanf7119392009-01-16 22:10:20 +0000322 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000323 I = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000324 --Count;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000325 }
Dan Gohman47ac0f02009-02-11 04:27:20 +0000326 assert(Count == 0 && "Instruction count mismatch!");
Duncan Sands9e8bd0b2009-03-11 09:04:34 +0000327 assert((MBB->begin() == Current || CurrentCount != 0) &&
Dan Gohman1274ced2009-03-10 18:10:43 +0000328 "Instruction count mismatch!");
329 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000330 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000331
332 // Clean up register live-range state.
333 Scheduler.FinishBlock();
David Goodwin88a589c2009-08-25 17:03:05 +0000334
David Goodwin5e411782009-09-03 22:15:25 +0000335 // Update register kills
David Goodwin88a589c2009-08-25 17:03:05 +0000336 Scheduler.FixupKills(MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000337 }
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000338
339 return true;
340}
David Goodwin480c5292009-10-20 19:54:44 +0000341
342unsigned SchedulePostRATDList::GetGroup(unsigned Reg)
343{
344 unsigned Node = GroupNodeIndices[Reg];
345 while (GroupNodes[Node] != Node)
346 Node = GroupNodes[Node];
347
348 return Node;
349}
350
351void SchedulePostRATDList::GetGroupRegs(unsigned Group, std::vector<unsigned> &Regs)
352{
353 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg) {
354 if (GetGroup(Reg) == Group)
355 Regs.push_back(Reg);
356 }
357}
358
359unsigned SchedulePostRATDList::UnionGroups(unsigned Reg1, unsigned Reg2)
360{
361 assert(GroupNodes[0] == 0 && "GroupNode 0 not parent!");
362 assert(GroupNodeIndices[0] == 0 && "Reg 0 not in Group 0!");
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000363
David Goodwin480c5292009-10-20 19:54:44 +0000364 // find group for each register
365 unsigned Group1 = GetGroup(Reg1);
366 unsigned Group2 = GetGroup(Reg2);
367
368 // if either group is 0, then that must become the parent
369 unsigned Parent = (Group1 == 0) ? Group1 : Group2;
370 unsigned Other = (Parent == Group1) ? Group2 : Group1;
371 GroupNodes.at(Other) = Parent;
372 return Parent;
373}
374
375unsigned SchedulePostRATDList::LeaveGroup(unsigned Reg)
376{
377 // Create a new GroupNode for Reg. Reg's existing GroupNode must
378 // stay as is because there could be other GroupNodes referring to
379 // it.
380 unsigned idx = GroupNodes.size();
381 GroupNodes.push_back(idx);
382 GroupNodeIndices[Reg] = idx;
383 return idx;
384}
385
386bool SchedulePostRATDList::IsLive(unsigned Reg)
387{
388 // KillIndex must be defined and DefIndex not defined for a register
389 // to be live.
390 return((KillIndices[Reg] != ~0u) && (DefIndices[Reg] == ~0u));
391}
392
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000393/// StartBlock - Initialize register live-range state for scheduling in
394/// this block.
Dan Gohman21d90032008-11-25 00:52:40 +0000395///
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000396void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
397 // Call the superclass.
398 ScheduleDAGInstrs::StartBlock(BB);
Dan Gohman21d90032008-11-25 00:52:40 +0000399
David Goodwind94a4e52009-08-10 15:55:25 +0000400 // Reset the hazard recognizer.
401 HazardRec->Reset();
402
David Goodwin480c5292009-10-20 19:54:44 +0000403 // Initialize all registers to be in their own group. Initially we
404 // assign the register to the same-indexed GroupNode.
405 for (unsigned i = 0; i < TargetRegisterInfo::FirstVirtualRegister; ++i)
406 GroupNodeIndices[i] = i;
Dan Gohman21d90032008-11-25 00:52:40 +0000407
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000408 // Initialize the indices to indicate that no registers are live.
Dan Gohman6c3643c2008-12-19 22:23:43 +0000409 std::fill(KillIndices, array_endof(KillIndices), ~0u);
Dan Gohman21d90032008-11-25 00:52:40 +0000410 std::fill(DefIndices, array_endof(DefIndices), BB->size());
411
David Goodwin63bcbb72009-10-01 23:28:47 +0000412 bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
413
Dan Gohman21d90032008-11-25 00:52:40 +0000414 // Determine the live-out physregs for this block.
David Goodwin63bcbb72009-10-01 23:28:47 +0000415 if (IsReturnBlock) {
Dan Gohman21d90032008-11-25 00:52:40 +0000416 // In a return block, examine the function live-out regs.
417 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
418 E = MRI.liveout_end(); I != E; ++I) {
419 unsigned Reg = *I;
David Goodwin480c5292009-10-20 19:54:44 +0000420 UnionGroups(Reg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000421 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000422 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000423 // Repeat, for all aliases.
424 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
425 unsigned AliasReg = *Alias;
David Goodwin480c5292009-10-20 19:54:44 +0000426 UnionGroups(AliasReg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000427 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000428 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000429 }
430 }
David Goodwinc7951f82009-10-01 19:45:32 +0000431 } else {
Dan Gohman21d90032008-11-25 00:52:40 +0000432 // In a non-return block, examine the live-in regs of all successors.
433 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
Dan Gohman47ac0f02009-02-11 04:27:20 +0000434 SE = BB->succ_end(); SI != SE; ++SI)
Dan Gohman21d90032008-11-25 00:52:40 +0000435 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
436 E = (*SI)->livein_end(); I != E; ++I) {
437 unsigned Reg = *I;
David Goodwin480c5292009-10-20 19:54:44 +0000438 UnionGroups(Reg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000439 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000440 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000441 // Repeat, for all aliases.
442 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
443 unsigned AliasReg = *Alias;
David Goodwin480c5292009-10-20 19:54:44 +0000444 UnionGroups(AliasReg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000445 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000446 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000447 }
448 }
David Goodwin63bcbb72009-10-01 23:28:47 +0000449 }
Dan Gohman21d90032008-11-25 00:52:40 +0000450
David Goodwin63bcbb72009-10-01 23:28:47 +0000451 // Mark live-out callee-saved registers. In a return block this is
452 // all callee-saved registers. In non-return this is any
453 // callee-saved register that is not saved in the prolog.
454 const MachineFrameInfo *MFI = MF.getFrameInfo();
455 BitVector Pristine = MFI->getPristineRegs(BB);
456 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
457 unsigned Reg = *I;
458 if (!IsReturnBlock && !Pristine.test(Reg)) continue;
David Goodwin480c5292009-10-20 19:54:44 +0000459 UnionGroups(Reg, 0);
David Goodwin63bcbb72009-10-01 23:28:47 +0000460 KillIndices[Reg] = BB->size();
461 DefIndices[Reg] = ~0u;
462 // Repeat, for all aliases.
463 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
464 unsigned AliasReg = *Alias;
David Goodwin480c5292009-10-20 19:54:44 +0000465 UnionGroups(AliasReg, 0);
David Goodwin63bcbb72009-10-01 23:28:47 +0000466 KillIndices[AliasReg] = BB->size();
467 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000468 }
469 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000470}
471
472/// Schedule - Schedule the instruction range using list scheduling.
473///
474void SchedulePostRATDList::Schedule() {
David Goodwin3a5f0d42009-08-11 01:44:26 +0000475 DEBUG(errs() << "********** List Scheduling **********\n");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000476
477 // Build the scheduling graph.
Dan Gohmana70dca12009-10-09 23:27:56 +0000478 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000479
David Goodwin480c5292009-10-20 19:54:44 +0000480 if (EnableAntiDepBreaking != "none") {
481 if (BreakAntiDependencies((EnableAntiDepBreaking == "all") ? false : true)) {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000482 // We made changes. Update the dependency graph.
483 // Theoretically we could update the graph in place:
484 // When a live range is changed to use a different register, remove
485 // the def's anti-dependence *and* output-dependence edges due to
486 // that register, and add new anti-dependence and output-dependence
487 // edges based on the next live range of the register.
488 SUnits.clear();
489 EntrySU = SUnit();
490 ExitSU = SUnit();
Dan Gohmana70dca12009-10-09 23:27:56 +0000491 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000492 }
493 }
494
David Goodwind94a4e52009-08-10 15:55:25 +0000495 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
496 SUnits[su].dumpAll(this));
497
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000498 AvailableQueue.initNodes(SUnits);
499
500 ListScheduleTopDown();
501
502 AvailableQueue.releaseState();
503}
504
505/// Observe - Update liveness information to account for the current
506/// instruction, which will not be scheduled.
507///
Dan Gohman47ac0f02009-02-11 04:27:20 +0000508void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000509 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
510
David Goodwin480c5292009-10-20 19:54:44 +0000511 DEBUG(errs() << "Observe: ");
512 DEBUG(MI->dump());
Dan Gohman1274ced2009-03-10 18:10:43 +0000513
David Goodwin480c5292009-10-20 19:54:44 +0000514 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg) {
515 // If Reg is current live, then mark that it can't be renamed as
516 // we don't know the extent of its live-range anymore (now that it
517 // has been scheduled). If it is not live but was defined in the
518 // previous schedule region, then set its def index to the most
519 // conservative location (i.e. the beginning of the previous
520 // schedule region).
521 if (IsLive(Reg)) {
522 DEBUG(if (GetGroup(Reg) != 0)
523 errs() << " " << TRI->getName(Reg) << "=g" <<
524 GetGroup(Reg) << "->g0(region live-out)");
525 UnionGroups(Reg, 0);
526 } else if ((DefIndices[Reg] < InsertPosIndex) && (DefIndices[Reg] >= Count)) {
527 DefIndices[Reg] = Count;
528 }
529 }
530
531 PrescanInstruction(MI, Count);
Dan Gohman47ac0f02009-02-11 04:27:20 +0000532 ScanInstruction(MI, Count);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000533}
534
535/// FinishBlock - Clean up register live-range state.
536///
537void SchedulePostRATDList::FinishBlock() {
538 RegRefs.clear();
539
540 // Call the superclass.
541 ScheduleDAGInstrs::FinishBlock();
542}
543
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000544/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
545/// critical path.
546static SDep *CriticalPathStep(SUnit *SU) {
547 SDep *Next = 0;
548 unsigned NextDepth = 0;
549 // Find the predecessor edge with the greatest depth.
550 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
551 P != PE; ++P) {
552 SUnit *PredSU = P->getSUnit();
553 unsigned PredLatency = P->getLatency();
554 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
555 // In the case of a latency tie, prefer an anti-dependency edge over
556 // other types of edges.
557 if (NextDepth < PredTotalLatency ||
558 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
559 NextDepth = PredTotalLatency;
560 Next = &*P;
561 }
562 }
563 return Next;
564}
565
David Goodwin480c5292009-10-20 19:54:44 +0000566/// AntiDepPathStep - Return SUnit that SU has an anti-dependence on.
567static SDep *AntiDepPathStep(SUnit *SU) {
568 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
569 P != PE; ++P) {
570 if (P->getKind() == SDep::Anti) {
571 return &*P;
David Goodwinc7951f82009-10-01 19:45:32 +0000572 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000573 }
David Goodwin480c5292009-10-20 19:54:44 +0000574 return 0;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000575}
576
David Goodwin480c5292009-10-20 19:54:44 +0000577void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI, unsigned Count) {
578 // Scan the register defs for this instruction and update
579 // live-ranges, groups and RegRefs.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000580 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
581 MachineOperand &MO = MI->getOperand(i);
David Goodwin480c5292009-10-20 19:54:44 +0000582 if (!MO.isReg() || !MO.isDef()) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000583 unsigned Reg = MO.getReg();
584 if (Reg == 0) continue;
David Goodwin480c5292009-10-20 19:54:44 +0000585 // Ignore two-addr defs for liveness...
Bob Wilsond9df5012009-04-09 17:16:43 +0000586 if (MI->isRegTiedToUseOperand(i)) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000587
David Goodwin480c5292009-10-20 19:54:44 +0000588 // Update Def for Reg and subregs.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000589 DefIndices[Reg] = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000590 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
591 *Subreg; ++Subreg) {
592 unsigned SubregReg = *Subreg;
593 DefIndices[SubregReg] = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000594 }
595 }
David Goodwin480c5292009-10-20 19:54:44 +0000596
David Goodwin7441d142009-10-20 22:50:43 +0000597 DEBUG(errs() << "\tDef Groups:");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000598 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
599 MachineOperand &MO = MI->getOperand(i);
David Goodwin480c5292009-10-20 19:54:44 +0000600 if (!MO.isReg() || !MO.isDef()) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000601 unsigned Reg = MO.getReg();
602 if (Reg == 0) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000603
David Goodwin480c5292009-10-20 19:54:44 +0000604 DEBUG(errs() << " " << TRI->getName(Reg) << "=g" << GetGroup(Reg));
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000605
David Goodwin480c5292009-10-20 19:54:44 +0000606 // If MI's defs have special allocation requirement, don't allow
607 // any def registers to be changed. Also assume all registers
608 // defined in a call must not be changed (ABI).
609 if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq()) {
610 DEBUG(if (GetGroup(Reg) != 0) errs() << "->g0(alloc-req)");
611 UnionGroups(Reg, 0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000612 }
David Goodwin480c5292009-10-20 19:54:44 +0000613
614 // Any subregisters that are live at this point are defined here,
615 // so group those subregisters with Reg.
616 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
617 *Subreg; ++Subreg) {
618 unsigned SubregReg = *Subreg;
619 if (IsLive(SubregReg)) {
620 UnionGroups(Reg, SubregReg);
621 DEBUG(errs() << "->g" << GetGroup(Reg) << "(via " <<
622 TRI->getName(SubregReg) << ")");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000623 }
624 }
David Goodwin480c5292009-10-20 19:54:44 +0000625
626 // Note register reference...
627 const TargetRegisterClass *RC = NULL;
628 if (i < MI->getDesc().getNumOperands())
629 RC = MI->getDesc().OpInfo[i].getRegClass(TRI);
630 RegisterReference RR = { &MO, RC };
631 RegRefs.insert(std::make_pair(Reg, RR));
632 }
633
634 DEBUG(errs() << '\n');
635}
636
637void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
638 unsigned Count) {
David Goodwin7441d142009-10-20 22:50:43 +0000639 DEBUG(errs() << "\tUse Groups:");
640
David Goodwin480c5292009-10-20 19:54:44 +0000641 // Scan the register uses for this instruction and update
642 // live-ranges, groups and RegRefs.
643 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
644 MachineOperand &MO = MI->getOperand(i);
645 if (!MO.isReg() || !MO.isUse()) continue;
646 unsigned Reg = MO.getReg();
647 if (Reg == 0) continue;
648
David Goodwin7441d142009-10-20 22:50:43 +0000649 DEBUG(errs() << " " << TRI->getName(Reg) << "=g" << GetGroup(Reg));
650
David Goodwin480c5292009-10-20 19:54:44 +0000651 // It wasn't previously live but now it is, this is a kill. Forget
652 // the previous live-range information and start a new live-range
653 // for the register.
654 if (!IsLive(Reg)) {
655 KillIndices[Reg] = Count;
656 DefIndices[Reg] = ~0u;
657 RegRefs.erase(Reg);
658 LeaveGroup(Reg);
David Goodwin7441d142009-10-20 22:50:43 +0000659 DEBUG(errs() << "->g" << GetGroup(Reg) << "(last-use)");
David Goodwin480c5292009-10-20 19:54:44 +0000660 }
661 // Repeat, for subregisters.
662 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
663 *Subreg; ++Subreg) {
664 unsigned SubregReg = *Subreg;
665 if (!IsLive(SubregReg)) {
666 KillIndices[SubregReg] = Count;
667 DefIndices[SubregReg] = ~0u;
668 RegRefs.erase(SubregReg);
669 LeaveGroup(SubregReg);
David Goodwin7441d142009-10-20 22:50:43 +0000670 DEBUG(errs() << "->g" << GetGroup(SubregReg) << "(last-use)");
David Goodwin480c5292009-10-20 19:54:44 +0000671 }
672 }
673
David Goodwin7441d142009-10-20 22:50:43 +0000674 // If MI's uses have special allocation requirement, don't allow
675 // any use registers to be changed. Also assume all registers
676 // used in a call must not be changed (ABI).
677 if (MI->getDesc().isCall() || MI->getDesc().hasExtraSrcRegAllocReq()) {
678 DEBUG(if (GetGroup(Reg) != 0) errs() << "->g0(alloc-req)");
679 UnionGroups(Reg, 0);
680 }
681
David Goodwin480c5292009-10-20 19:54:44 +0000682 // Note register reference...
683 const TargetRegisterClass *RC = NULL;
684 if (i < MI->getDesc().getNumOperands())
685 RC = MI->getDesc().OpInfo[i].getRegClass(TRI);
686 RegisterReference RR = { &MO, RC };
687 RegRefs.insert(std::make_pair(Reg, RR));
688 }
689
David Goodwin7441d142009-10-20 22:50:43 +0000690 DEBUG(errs() << '\n');
691
David Goodwin480c5292009-10-20 19:54:44 +0000692 // Form a group of all defs and uses of a KILL instruction to ensure
693 // that all registers are renamed as a group.
694 if (MI->getOpcode() == TargetInstrInfo::KILL) {
695 unsigned FirstReg = 0;
696 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
697 MachineOperand &MO = MI->getOperand(i);
698 if (!MO.isReg()) continue;
699 unsigned Reg = MO.getReg();
700 if (Reg == 0) continue;
701
702 if (FirstReg != 0)
703 UnionGroups(FirstReg, Reg);
704 FirstReg = Reg;
705 }
706
707 DEBUG(if (FirstReg != 0) errs() << "\tKill Group: g" <<
708 GetGroup(FirstReg) << '\n');
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000709 }
710}
711
David Goodwin7441d142009-10-20 22:50:43 +0000712unsigned SchedulePostRATDList::FindSuitableFreeRegister(unsigned AntiDepReg) {
David Goodwin480c5292009-10-20 19:54:44 +0000713 // Collect all registers in the same group as AntiDepReg. These all
714 // need to be renamed together if we are to break the
715 // anti-dependence.
716 std::vector<unsigned> Regs;
717 GetGroupRegs(GetGroup(AntiDepReg), Regs);
718
719 DEBUG(errs() << "\tRename Register Group:");
720 DEBUG(for (unsigned i = 0, e = Regs.size(); i != e; ++i)
721 DEBUG(errs() << " " << TRI->getName(Regs[i])));
722 DEBUG(errs() << "\n");
723
724 // If there is a single register that needs to be renamed then we
725 // can do it ourselves.
726 if (Regs.size() == 1) {
727 assert(Regs[0] == AntiDepReg && "Register group does not contain register!");
728
729 // Check all references that need rewriting. Gather up all the
730 // register classes for the register references.
731 const TargetRegisterClass *FirstRC = NULL;
732 std::set<const TargetRegisterClass *> RCs;
733 std::pair<std::multimap<unsigned, RegisterReference>::iterator,
734 std::multimap<unsigned, RegisterReference>::iterator>
735 Range = RegRefs.equal_range(AntiDepReg);
736 for (std::multimap<unsigned, RegisterReference>::iterator
737 Q = Range.first, QE = Range.second; Q != QE; ++Q) {
738 const TargetRegisterClass *RC = Q->second.RC;
739 if (RC == NULL) continue;
740 if (FirstRC == NULL)
741 FirstRC = RC;
742 else if (FirstRC != RC)
743 RCs.insert(RC);
744 }
745
746 if (FirstRC == NULL)
747 return 0;
748
749 DEBUG(errs() << "\tChecking Regclasses: " << FirstRC->getName());
750 DEBUG(for (std::set<const TargetRegisterClass *>::iterator S =
751 RCs.begin(), E = RCs.end(); S != E; ++S)
752 errs() << " " << (*S)->getName());
753 DEBUG(errs() << '\n');
754
755 // Using the allocation order for one of the register classes,
756 // find the first register that belongs to all the register
757 // classes that is available over the liverange of the register.
758 DEBUG(errs() << "\tFind Register:");
759 for (TargetRegisterClass::iterator R = FirstRC->allocation_order_begin(MF),
760 RE = FirstRC->allocation_order_end(MF); R != RE; ++R) {
761 unsigned NewReg = *R;
762
763 // Don't replace a register with itself.
764 if (NewReg == AntiDepReg) continue;
765
766 DEBUG(errs() << " " << TRI->getName(NewReg));
767
768 // Make sure NewReg is in all required register classes.
769 for (std::set<const TargetRegisterClass *>::iterator S =
770 RCs.begin(), E = RCs.end(); S != E; ++S) {
771 const TargetRegisterClass *RC = *S;
772 if (!RC->contains(NewReg)) {
773 DEBUG(errs() << "(not in " << RC->getName() << ")");
774 NewReg = 0;
775 break;
776 }
777 }
778
779 // If NewReg is dead and NewReg's most recent def is not before
780 // AntiDepReg's kill, it's safe to replace AntiDepReg with
781 // NewReg. We must also check all subregisters of NewReg.
782 if (IsLive(NewReg) || (KillIndices[AntiDepReg] > DefIndices[NewReg])) {
783 DEBUG(errs() << "(live)");
784 continue;
785 }
786 {
787 bool found = false;
788 for (const unsigned *Subreg = TRI->getSubRegisters(NewReg);
789 *Subreg; ++Subreg) {
790 unsigned SubregReg = *Subreg;
791 if (IsLive(SubregReg) || (KillIndices[AntiDepReg] > DefIndices[SubregReg])) {
792 DEBUG(errs() << "(subreg " << TRI->getName(SubregReg) << " live)");
793 found = true;
794 }
795 }
796 if (found)
797 continue;
798 }
799
800 if (NewReg != 0) {
801 DEBUG(errs() << '\n');
802 return NewReg;
803 }
804 }
805
806 DEBUG(errs() << '\n');
Dan Gohman26255ad2009-08-12 01:33:27 +0000807 }
808
809 // No registers are free and available!
810 return 0;
811}
812
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000813/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
814/// of the ScheduleDAG and break them by renaming registers.
815///
David Goodwin480c5292009-10-20 19:54:44 +0000816bool SchedulePostRATDList::BreakAntiDependencies(bool CriticalPathOnly) {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000817 // The code below assumes that there is at least one instruction,
818 // so just duck out immediately if the block is empty.
819 if (SUnits.empty()) return false;
820
David Goodwin480c5292009-10-20 19:54:44 +0000821 // If breaking anti-dependencies only along the critical path, track
822 // progress along the critical path through the SUnit graph as we
823 // walk the instructions.
824 SUnit *CriticalPathSU = 0;
825 MachineInstr *CriticalPathMI = 0;
826
827 // If breaking all anti-dependencies need a map from MI to SUnit.
828 std::map<MachineInstr *, SUnit *> MISUnitMap;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000829
David Goodwin480c5292009-10-20 19:54:44 +0000830 // Find the node at the bottom of the critical path.
831 if (CriticalPathOnly) {
832 SUnit *Max = 0;
833 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
834 SUnit *SU = &SUnits[i];
835 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
836 Max = SU;
837 }
838
David Goodwind452ea62009-10-13 19:16:03 +0000839 DEBUG(errs() << "Critical path has total latency "
840 << (Max->getDepth() + Max->Latency) << "\n");
David Goodwin480c5292009-10-20 19:54:44 +0000841 CriticalPathSU = Max;
842 CriticalPathMI = CriticalPathSU->getInstr();
843 } else {
844 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
845 SUnit *SU = &SUnits[i];
846 MISUnitMap.insert(std::pair<MachineInstr *, SUnit *>(SU->getInstr(), SU));
847 }
848 DEBUG(errs() << "Breaking all anti-dependencies\n");
849 }
850
851#ifndef NDEBUG
852 {
David Goodwind452ea62009-10-13 19:16:03 +0000853 DEBUG(errs() << "Available regs:");
854 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
David Goodwin480c5292009-10-20 19:54:44 +0000855 if (!IsLive(Reg))
David Goodwind452ea62009-10-13 19:16:03 +0000856 DEBUG(errs() << " " << TRI->getName(Reg));
857 }
858 DEBUG(errs() << '\n');
859 }
David Goodwin480c5292009-10-20 19:54:44 +0000860 std::string dbgStr;
David Goodwind452ea62009-10-13 19:16:03 +0000861#endif
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000862
David Goodwin480c5292009-10-20 19:54:44 +0000863 // Attempt to break anti-dependence edges. Walk the instructions
864 // from the bottom up, tracking information about liveness as we go
865 // to help determine which registers are available.
Dan Gohman21d90032008-11-25 00:52:40 +0000866 bool Changed = false;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000867 unsigned Count = InsertPosIndex - 1;
868 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000869 I != E; --Count) {
870 MachineInstr *MI = --I;
Dan Gohman21d90032008-11-25 00:52:40 +0000871
David Goodwin480c5292009-10-20 19:54:44 +0000872 DEBUG(errs() << "Anti: ");
873 DEBUG(MI->dump());
874
875 // Process the defs in MI...
876 PrescanInstruction(MI, Count);
877
878 // Check if this instruction has an anti-dependence that we may be
879 // able to break. If it is, set AntiDepReg to the non-zero
880 // register associated with the anti-dependence.
Dan Gohman00dc84a2008-12-16 19:27:52 +0000881 //
David Goodwin480c5292009-10-20 19:54:44 +0000882 unsigned AntiDepReg = 0;
883
884 // Limiting our attention to the critical path is a heuristic to avoid
Dan Gohman00dc84a2008-12-16 19:27:52 +0000885 // breaking anti-dependence edges that aren't going to significantly
886 // impact the overall schedule. There are a limited number of registers
887 // and we want to save them for the important edges.
888 //
David Goodwin480c5292009-10-20 19:54:44 +0000889 // We can also break all anti-dependencies because they can
890 // occur along the non-critical path but are still detrimental for
891 // scheduling.
892 //
Dan Gohman00dc84a2008-12-16 19:27:52 +0000893 // TODO: Instructions with multiple defs could have multiple
894 // anti-dependencies. The current code here only knows how to break one
895 // edge per instruction. Note that we'd have to be able to break all of
896 // the anti-dependencies in an instruction in order to be effective.
David Goodwin480c5292009-10-20 19:54:44 +0000897 if (!CriticalPathOnly || (MI == CriticalPathMI)) {
898 DEBUG(dbgStr.clear());
899
900 SUnit *PathSU;
901 SDep *Edge;
902 if (CriticalPathOnly) {
903 PathSU = CriticalPathSU;
904 Edge = CriticalPathStep(PathSU);
905 } else {
906 PathSU = MISUnitMap[MI];
907 Edge = (PathSU) ? AntiDepPathStep(PathSU) : 0;
908 }
909
910 if (Edge) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000911 SUnit *NextSU = Edge->getSUnit();
912
David Goodwin480c5292009-10-20 19:54:44 +0000913 // Only consider anti-dependence edges, and ignore KILL
914 // instructions (they form a group in ScanInstruction but
915 // don't cause any anti-dependence breaking themselves)
916 if ((Edge->getKind() == SDep::Anti) &&
917 (MI->getOpcode() != TargetInstrInfo::KILL)) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000918 AntiDepReg = Edge->getReg();
David Goodwin480c5292009-10-20 19:54:44 +0000919 DEBUG(dbgStr += "\tAntidep reg: ");
920 DEBUG(dbgStr += TRI->getName(AntiDepReg));
Dan Gohman00dc84a2008-12-16 19:27:52 +0000921 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
David Goodwin480c5292009-10-20 19:54:44 +0000922 if (!AllocatableSet.test(AntiDepReg)) {
Evan Cheng714e8bc2009-10-01 08:26:23 +0000923 // Don't break anti-dependencies on non-allocatable registers.
David Goodwin480c5292009-10-20 19:54:44 +0000924 DEBUG(dbgStr += " (non-allocatable)");
Evan Cheng714e8bc2009-10-01 08:26:23 +0000925 AntiDepReg = 0;
David Goodwin480c5292009-10-20 19:54:44 +0000926 } else {
927 int OpIdx = MI->findRegisterDefOperandIdx(AntiDepReg);
928 assert(OpIdx != -1 && "Can't find index for defined register operand");
929 if (MI->isRegTiedToUseOperand(OpIdx)) {
930 // If the anti-dep register is tied to a use, then don't try to
931 // change it. It will be changed along with the use if required
932 // to break an earlier antidep.
933 DEBUG(dbgStr += " (tied-to-use)");
934 AntiDepReg = 0;
935 } else {
936 // If the SUnit has other dependencies on the SUnit that
937 // it anti-depends on, don't bother breaking the
938 // anti-dependency since those edges would prevent such
939 // units from being scheduled past each other
940 // regardless.
941 //
942 // Also, if there are dependencies on other SUnits with
943 // the same register as the anti-dependency, don't
944 // attempt to break it.
945 for (SUnit::pred_iterator P = PathSU->Preds.begin(),
946 PE = PathSU->Preds.end(); P != PE; ++P) {
947 if (P->getSUnit() == NextSU ?
Dan Gohman00dc84a2008-12-16 19:27:52 +0000948 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
949 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
David Goodwin480c5292009-10-20 19:54:44 +0000950 DEBUG(dbgStr += " (real dependency)");
951 AntiDepReg = 0;
952 break;
953 }
Dan Gohman00dc84a2008-12-16 19:27:52 +0000954 }
David Goodwin480c5292009-10-20 19:54:44 +0000955 }
Dan Gohman00dc84a2008-12-16 19:27:52 +0000956 }
957 }
David Goodwin480c5292009-10-20 19:54:44 +0000958
959 if (CriticalPathOnly) {
960 CriticalPathSU = NextSU;
961 CriticalPathMI = CriticalPathSU->getInstr();
962 }
Dan Gohman00dc84a2008-12-16 19:27:52 +0000963 } else {
964 // We've reached the end of the critical path.
965 CriticalPathSU = 0;
966 CriticalPathMI = 0;
967 }
968 }
Dan Gohman21d90032008-11-25 00:52:40 +0000969
David Goodwin480c5292009-10-20 19:54:44 +0000970 // Determine AntiDepReg's register group.
971 const unsigned GroupIndex = AntiDepReg != 0 ? GetGroup(AntiDepReg) : 0;
972 if (GroupIndex == 0) {
973 DEBUG(if (AntiDepReg != 0) dbgStr += " (zero group)");
Evan Cheng714e8bc2009-10-01 08:26:23 +0000974 AntiDepReg = 0;
Dan Gohman21d90032008-11-25 00:52:40 +0000975 }
976
David Goodwin480c5292009-10-20 19:54:44 +0000977 DEBUG(if (!dbgStr.empty()) errs() << dbgStr << '\n');
Dan Gohman21d90032008-11-25 00:52:40 +0000978
David Goodwin480c5292009-10-20 19:54:44 +0000979 // Look for a suitable register to use to break the anti-dependence.
Dan Gohman21d90032008-11-25 00:52:40 +0000980 if (AntiDepReg != 0) {
David Goodwin7441d142009-10-20 22:50:43 +0000981 if (unsigned NewReg = FindSuitableFreeRegister(AntiDepReg)) {
David Goodwin480c5292009-10-20 19:54:44 +0000982 DEBUG(errs() << "\tBreaking anti-dependence edge on "
Dan Gohman26255ad2009-08-12 01:33:27 +0000983 << TRI->getName(AntiDepReg)
984 << " with " << RegRefs.count(AntiDepReg) << " references"
985 << " using " << TRI->getName(NewReg) << "!\n");
Dan Gohman21d90032008-11-25 00:52:40 +0000986
Dan Gohman26255ad2009-08-12 01:33:27 +0000987 // Update the references to the old register to refer to the new
988 // register.
David Goodwin480c5292009-10-20 19:54:44 +0000989 std::pair<std::multimap<unsigned, RegisterReference>::iterator,
990 std::multimap<unsigned, RegisterReference>::iterator>
Dan Gohman26255ad2009-08-12 01:33:27 +0000991 Range = RegRefs.equal_range(AntiDepReg);
David Goodwin480c5292009-10-20 19:54:44 +0000992 for (std::multimap<unsigned, RegisterReference>::iterator
Dan Gohman26255ad2009-08-12 01:33:27 +0000993 Q = Range.first, QE = Range.second; Q != QE; ++Q)
David Goodwin480c5292009-10-20 19:54:44 +0000994 Q->second.Operand->setReg(NewReg);
Dan Gohman21d90032008-11-25 00:52:40 +0000995
Dan Gohman26255ad2009-08-12 01:33:27 +0000996 // We just went back in time and modified history; the
David Goodwin480c5292009-10-20 19:54:44 +0000997 // liveness information for the anti-dependence reg is now
Dan Gohman26255ad2009-08-12 01:33:27 +0000998 // inconsistent. Set the state as if it were dead.
David Goodwin480c5292009-10-20 19:54:44 +0000999 // FIXME forall in group
1000 UnionGroups(NewReg, 0);
1001 RegRefs.erase(NewReg);
Dan Gohman26255ad2009-08-12 01:33:27 +00001002 DefIndices[NewReg] = DefIndices[AntiDepReg];
1003 KillIndices[NewReg] = KillIndices[AntiDepReg];
Dan Gohman21d90032008-11-25 00:52:40 +00001004
David Goodwin480c5292009-10-20 19:54:44 +00001005 // FIXME forall in group
1006 UnionGroups(AntiDepReg, 0);
1007 RegRefs.erase(AntiDepReg);
Dan Gohman26255ad2009-08-12 01:33:27 +00001008 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
1009 KillIndices[AntiDepReg] = ~0u;
1010 assert(((KillIndices[AntiDepReg] == ~0u) !=
1011 (DefIndices[AntiDepReg] == ~0u)) &&
1012 "Kill and Def maps aren't consistent for AntiDepReg!");
Dan Gohman21d90032008-11-25 00:52:40 +00001013
Dan Gohman26255ad2009-08-12 01:33:27 +00001014 Changed = true;
David Goodwin480c5292009-10-20 19:54:44 +00001015 ++NumFixedAnti;
Dan Gohman21d90032008-11-25 00:52:40 +00001016 }
1017 }
1018
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001019 ScanInstruction(MI, Count);
Dan Gohman21d90032008-11-25 00:52:40 +00001020 }
Dan Gohman21d90032008-11-25 00:52:40 +00001021
1022 return Changed;
1023}
1024
David Goodwin5e411782009-09-03 22:15:25 +00001025/// StartBlockForKills - Initialize register live-range state for updating kills
1026///
1027void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
1028 // Initialize the indices to indicate that no registers are live.
1029 std::fill(KillIndices, array_endof(KillIndices), ~0u);
1030
1031 // Determine the live-out physregs for this block.
1032 if (!BB->empty() && BB->back().getDesc().isReturn()) {
1033 // In a return block, examine the function live-out regs.
1034 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
1035 E = MRI.liveout_end(); I != E; ++I) {
1036 unsigned Reg = *I;
1037 KillIndices[Reg] = BB->size();
1038 // Repeat, for all subregs.
1039 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1040 *Subreg; ++Subreg) {
1041 KillIndices[*Subreg] = BB->size();
1042 }
1043 }
1044 }
1045 else {
1046 // In a non-return block, examine the live-in regs of all successors.
1047 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1048 SE = BB->succ_end(); SI != SE; ++SI) {
1049 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
1050 E = (*SI)->livein_end(); I != E; ++I) {
1051 unsigned Reg = *I;
1052 KillIndices[Reg] = BB->size();
1053 // Repeat, for all subregs.
1054 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1055 *Subreg; ++Subreg) {
1056 KillIndices[*Subreg] = BB->size();
1057 }
1058 }
1059 }
1060 }
1061}
1062
David Goodwin8f909342009-09-23 16:35:25 +00001063bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
1064 MachineOperand &MO) {
1065 // Setting kill flag...
1066 if (!MO.isKill()) {
1067 MO.setIsKill(true);
1068 return false;
1069 }
1070
1071 // If MO itself is live, clear the kill flag...
1072 if (KillIndices[MO.getReg()] != ~0u) {
1073 MO.setIsKill(false);
1074 return false;
1075 }
1076
1077 // If any subreg of MO is live, then create an imp-def for that
1078 // subreg and keep MO marked as killed.
Benjamin Kramer8bff4af2009-10-02 15:59:52 +00001079 MO.setIsKill(false);
David Goodwin8f909342009-09-23 16:35:25 +00001080 bool AllDead = true;
1081 const unsigned SuperReg = MO.getReg();
1082 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
1083 *Subreg; ++Subreg) {
1084 if (KillIndices[*Subreg] != ~0u) {
1085 MI->addOperand(MachineOperand::CreateReg(*Subreg,
1086 true /*IsDef*/,
1087 true /*IsImp*/,
1088 false /*IsKill*/,
1089 false /*IsDead*/));
1090 AllDead = false;
1091 }
1092 }
1093
David Goodwin480c5292009-10-20 19:54:44 +00001094 if (AllDead)
Benjamin Kramer8bff4af2009-10-02 15:59:52 +00001095 MO.setIsKill(true);
David Goodwin8f909342009-09-23 16:35:25 +00001096 return false;
1097}
1098
David Goodwin88a589c2009-08-25 17:03:05 +00001099/// FixupKills - Fix the register kill flags, they may have been made
1100/// incorrect by instruction reordering.
1101///
1102void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
1103 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
1104
1105 std::set<unsigned> killedRegs;
1106 BitVector ReservedRegs = TRI->getReservedRegs(MF);
David Goodwin5e411782009-09-03 22:15:25 +00001107
1108 StartBlockForKills(MBB);
David Goodwin7886cd82009-08-29 00:11:13 +00001109
1110 // Examine block from end to start...
David Goodwin88a589c2009-08-25 17:03:05 +00001111 unsigned Count = MBB->size();
1112 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
1113 I != E; --Count) {
1114 MachineInstr *MI = --I;
1115
David Goodwin7886cd82009-08-29 00:11:13 +00001116 // Update liveness. Registers that are defed but not used in this
1117 // instruction are now dead. Mark register and all subregs as they
1118 // are completely defined.
1119 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1120 MachineOperand &MO = MI->getOperand(i);
1121 if (!MO.isReg()) continue;
1122 unsigned Reg = MO.getReg();
1123 if (Reg == 0) continue;
1124 if (!MO.isDef()) continue;
1125 // Ignore two-addr defs.
1126 if (MI->isRegTiedToUseOperand(i)) continue;
1127
David Goodwin7886cd82009-08-29 00:11:13 +00001128 KillIndices[Reg] = ~0u;
1129
1130 // Repeat for all subregs.
1131 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1132 *Subreg; ++Subreg) {
1133 KillIndices[*Subreg] = ~0u;
1134 }
1135 }
David Goodwin88a589c2009-08-25 17:03:05 +00001136
David Goodwin8f909342009-09-23 16:35:25 +00001137 // Examine all used registers and set/clear kill flag. When a
1138 // register is used multiple times we only set the kill flag on
1139 // the first use.
David Goodwin88a589c2009-08-25 17:03:05 +00001140 killedRegs.clear();
1141 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1142 MachineOperand &MO = MI->getOperand(i);
1143 if (!MO.isReg() || !MO.isUse()) continue;
1144 unsigned Reg = MO.getReg();
1145 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1146
David Goodwin7886cd82009-08-29 00:11:13 +00001147 bool kill = false;
1148 if (killedRegs.find(Reg) == killedRegs.end()) {
1149 kill = true;
1150 // A register is not killed if any subregs are live...
1151 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1152 *Subreg; ++Subreg) {
1153 if (KillIndices[*Subreg] != ~0u) {
1154 kill = false;
1155 break;
1156 }
1157 }
1158
1159 // If subreg is not live, then register is killed if it became
1160 // live in this instruction
1161 if (kill)
1162 kill = (KillIndices[Reg] == ~0u);
1163 }
1164
David Goodwin88a589c2009-08-25 17:03:05 +00001165 if (MO.isKill() != kill) {
David Goodwin8f909342009-09-23 16:35:25 +00001166 bool removed = ToggleKillFlag(MI, MO);
1167 if (removed) {
1168 DEBUG(errs() << "Fixed <removed> in ");
1169 } else {
1170 DEBUG(errs() << "Fixed " << MO << " in ");
1171 }
David Goodwin88a589c2009-08-25 17:03:05 +00001172 DEBUG(MI->dump());
1173 }
David Goodwin7886cd82009-08-29 00:11:13 +00001174
David Goodwin88a589c2009-08-25 17:03:05 +00001175 killedRegs.insert(Reg);
1176 }
David Goodwin7886cd82009-08-29 00:11:13 +00001177
David Goodwina3251db2009-08-31 20:47:02 +00001178 // Mark any used register (that is not using undef) and subregs as
1179 // now live...
David Goodwin7886cd82009-08-29 00:11:13 +00001180 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1181 MachineOperand &MO = MI->getOperand(i);
David Goodwina3251db2009-08-31 20:47:02 +00001182 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
David Goodwin7886cd82009-08-29 00:11:13 +00001183 unsigned Reg = MO.getReg();
1184 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1185
David Goodwin7886cd82009-08-29 00:11:13 +00001186 KillIndices[Reg] = Count;
1187
1188 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1189 *Subreg; ++Subreg) {
1190 KillIndices[*Subreg] = Count;
1191 }
1192 }
David Goodwin88a589c2009-08-25 17:03:05 +00001193 }
1194}
1195
Dan Gohman343f0c02008-11-19 23:18:57 +00001196//===----------------------------------------------------------------------===//
1197// Top-Down Scheduling
1198//===----------------------------------------------------------------------===//
1199
1200/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1201/// the PendingQueue if the count reaches zero. Also update its cycle bound.
Dan Gohman54e4c362008-12-09 22:54:47 +00001202void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1203 SUnit *SuccSU = SuccEdge->getSUnit();
Reid Klecknerc277ab02009-09-30 20:15:38 +00001204
Dan Gohman343f0c02008-11-19 23:18:57 +00001205#ifndef NDEBUG
Reid Klecknerc277ab02009-09-30 20:15:38 +00001206 if (SuccSU->NumPredsLeft == 0) {
Chris Lattner103289e2009-08-23 07:19:13 +00001207 errs() << "*** Scheduling failed! ***\n";
Dan Gohman343f0c02008-11-19 23:18:57 +00001208 SuccSU->dump(this);
Chris Lattner103289e2009-08-23 07:19:13 +00001209 errs() << " has been released too many times!\n";
Torok Edwinc23197a2009-07-14 16:55:14 +00001210 llvm_unreachable(0);
Dan Gohman343f0c02008-11-19 23:18:57 +00001211 }
1212#endif
Reid Klecknerc277ab02009-09-30 20:15:38 +00001213 --SuccSU->NumPredsLeft;
1214
Dan Gohman343f0c02008-11-19 23:18:57 +00001215 // Compute how many cycles it will be before this actually becomes
1216 // available. This is the max of the start time of all predecessors plus
1217 // their latencies.
Dan Gohman3f237442008-12-16 03:25:46 +00001218 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
Dan Gohman343f0c02008-11-19 23:18:57 +00001219
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001220 // If all the node's predecessors are scheduled, this node is ready
1221 // to be scheduled. Ignore the special ExitSU node.
1222 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
Dan Gohman343f0c02008-11-19 23:18:57 +00001223 PendingQueue.push_back(SuccSU);
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001224}
1225
1226/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1227void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1228 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1229 I != E; ++I)
1230 ReleaseSucc(SU, &*I);
Dan Gohman343f0c02008-11-19 23:18:57 +00001231}
1232
1233/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1234/// count of its successors. If a successor pending count is zero, add it to
1235/// the Available queue.
1236void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
David Goodwin3a5f0d42009-08-11 01:44:26 +00001237 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
Dan Gohman343f0c02008-11-19 23:18:57 +00001238 DEBUG(SU->dump(this));
1239
1240 Sequence.push_back(SU);
Dan Gohman3f237442008-12-16 03:25:46 +00001241 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1242 SU->setDepthToAtLeast(CurCycle);
Dan Gohman343f0c02008-11-19 23:18:57 +00001243
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001244 ReleaseSuccessors(SU);
Dan Gohman343f0c02008-11-19 23:18:57 +00001245 SU->isScheduled = true;
1246 AvailableQueue.ScheduledNode(SU);
1247}
1248
1249/// ListScheduleTopDown - The main loop of list scheduling for top-down
1250/// schedulers.
1251void SchedulePostRATDList::ListScheduleTopDown() {
1252 unsigned CurCycle = 0;
1253
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001254 // Release any successors of the special Entry node.
1255 ReleaseSuccessors(&EntrySU);
1256
Dan Gohman343f0c02008-11-19 23:18:57 +00001257 // All leaves to Available queue.
1258 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1259 // It is available if it has no predecessors.
1260 if (SUnits[i].Preds.empty()) {
1261 AvailableQueue.push(&SUnits[i]);
1262 SUnits[i].isAvailable = true;
1263 }
1264 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001265
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001266 // In any cycle where we can't schedule any instructions, we must
1267 // stall or emit a noop, depending on the target.
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001268 bool CycleHasInsts = false;
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001269
Dan Gohman343f0c02008-11-19 23:18:57 +00001270 // While Available queue is not empty, grab the node with the highest
1271 // priority. If it is not ready put it back. Schedule the node.
Dan Gohman2836c282009-01-16 01:33:36 +00001272 std::vector<SUnit*> NotReady;
Dan Gohman343f0c02008-11-19 23:18:57 +00001273 Sequence.reserve(SUnits.size());
1274 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1275 // Check to see if any of the pending instructions are ready to issue. If
1276 // so, add them to the available queue.
Dan Gohman3f237442008-12-16 03:25:46 +00001277 unsigned MinDepth = ~0u;
Dan Gohman343f0c02008-11-19 23:18:57 +00001278 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
Dan Gohman3f237442008-12-16 03:25:46 +00001279 if (PendingQueue[i]->getDepth() <= CurCycle) {
Dan Gohman343f0c02008-11-19 23:18:57 +00001280 AvailableQueue.push(PendingQueue[i]);
1281 PendingQueue[i]->isAvailable = true;
1282 PendingQueue[i] = PendingQueue.back();
1283 PendingQueue.pop_back();
1284 --i; --e;
Dan Gohman3f237442008-12-16 03:25:46 +00001285 } else if (PendingQueue[i]->getDepth() < MinDepth)
1286 MinDepth = PendingQueue[i]->getDepth();
Dan Gohman343f0c02008-11-19 23:18:57 +00001287 }
David Goodwinc93d8372009-08-11 17:35:23 +00001288
David Goodwin7cd01182009-08-11 17:56:42 +00001289 DEBUG(errs() << "\n*** Examining Available\n";
1290 LatencyPriorityQueue q = AvailableQueue;
1291 while (!q.empty()) {
1292 SUnit *su = q.pop();
1293 errs() << "Height " << su->getHeight() << ": ";
1294 su->dump(this);
1295 });
David Goodwinc93d8372009-08-11 17:35:23 +00001296
Dan Gohman2836c282009-01-16 01:33:36 +00001297 SUnit *FoundSUnit = 0;
1298
1299 bool HasNoopHazards = false;
1300 while (!AvailableQueue.empty()) {
1301 SUnit *CurSUnit = AvailableQueue.pop();
1302
1303 ScheduleHazardRecognizer::HazardType HT =
1304 HazardRec->getHazardType(CurSUnit);
1305 if (HT == ScheduleHazardRecognizer::NoHazard) {
1306 FoundSUnit = CurSUnit;
1307 break;
1308 }
1309
1310 // Remember if this is a noop hazard.
1311 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1312
1313 NotReady.push_back(CurSUnit);
1314 }
1315
1316 // Add the nodes that aren't ready back onto the available list.
1317 if (!NotReady.empty()) {
1318 AvailableQueue.push_all(NotReady);
1319 NotReady.clear();
1320 }
1321
Dan Gohman343f0c02008-11-19 23:18:57 +00001322 // If we found a node to schedule, do it now.
1323 if (FoundSUnit) {
1324 ScheduleNodeTopDown(FoundSUnit, CurCycle);
Dan Gohman2836c282009-01-16 01:33:36 +00001325 HazardRec->EmitInstruction(FoundSUnit);
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001326 CycleHasInsts = true;
Dan Gohman343f0c02008-11-19 23:18:57 +00001327
David Goodwind94a4e52009-08-10 15:55:25 +00001328 // If we are using the target-specific hazards, then don't
1329 // advance the cycle time just because we schedule a node. If
1330 // the target allows it we can schedule multiple nodes in the
1331 // same cycle.
1332 if (!EnablePostRAHazardAvoidance) {
1333 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1334 ++CurCycle;
1335 }
Dan Gohman2836c282009-01-16 01:33:36 +00001336 } else {
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001337 if (CycleHasInsts) {
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001338 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1339 HazardRec->AdvanceCycle();
1340 } else if (!HasNoopHazards) {
1341 // Otherwise, we have a pipeline stall, but no other problem,
1342 // just advance the current cycle and try again.
1343 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1344 HazardRec->AdvanceCycle();
1345 ++NumStalls;
1346 } else {
1347 // Otherwise, we have no instructions to issue and we have instructions
1348 // that will fault if we don't do this right. This is the case for
1349 // processors without pipeline interlocks and other cases.
1350 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1351 HazardRec->EmitNoop();
1352 Sequence.push_back(0); // NULL here means noop
1353 ++NumNoops;
1354 }
1355
Dan Gohman2836c282009-01-16 01:33:36 +00001356 ++CurCycle;
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001357 CycleHasInsts = false;
Dan Gohman343f0c02008-11-19 23:18:57 +00001358 }
1359 }
1360
1361#ifndef NDEBUG
Dan Gohmana1e6d362008-11-20 01:26:25 +00001362 VerifySchedule(/*isBottomUp=*/false);
Dan Gohman343f0c02008-11-19 23:18:57 +00001363#endif
1364}
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001365
1366//===----------------------------------------------------------------------===//
1367// Public Constructor Functions
1368//===----------------------------------------------------------------------===//
1369
Evan Chengfa163542009-10-16 21:06:15 +00001370FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1371 return new PostRAScheduler(OptLevel);
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001372}