blob: 2d62d56e3f96e994701584d15c3b5b11bef1d148 [file] [log] [blame]
Dale Johannesen72f15962007-07-13 17:31:29 +00001//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
David Goodwind94a4e52009-08-10 15:55:25 +000022#include "ExactHazardRecognizer.h"
23#include "SimpleHazardRecognizer.h"
Dan Gohman6dc75fe2009-02-06 17:12:10 +000024#include "ScheduleDAGInstrs.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000025#include "llvm/CodeGen/Passes.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000026#include "llvm/CodeGen/LatencyPriorityQueue.h"
27#include "llvm/CodeGen/SchedulerRegistry.h"
Dan Gohman3f237442008-12-16 03:25:46 +000028#include "llvm/CodeGen/MachineDominators.h"
David Goodwinc7951f82009-10-01 19:45:32 +000029#include "llvm/CodeGen/MachineFrameInfo.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000030#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohman3f237442008-12-16 03:25:46 +000031#include "llvm/CodeGen/MachineLoopInfo.h"
Dan Gohman21d90032008-11-25 00:52:40 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman2836c282009-01-16 01:33:36 +000033#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Dan Gohmana70dca12009-10-09 23:27:56 +000034#include "llvm/Analysis/AliasAnalysis.h"
Dan Gohmanbed353d2009-02-10 23:29:38 +000035#include "llvm/Target/TargetLowering.h"
Dan Gohman79ce2762009-01-15 19:20:50 +000036#include "llvm/Target/TargetMachine.h"
Dan Gohman21d90032008-11-25 00:52:40 +000037#include "llvm/Target/TargetInstrInfo.h"
38#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin0dad89f2009-09-30 00:10:16 +000039#include "llvm/Target/TargetSubtarget.h"
Chris Lattner459525d2008-01-14 19:00:06 +000040#include "llvm/Support/Compiler.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000041#include "llvm/Support/Debug.h"
Torok Edwinc25e7582009-07-11 20:10:48 +000042#include "llvm/Support/ErrorHandling.h"
David Goodwin3a5f0d42009-08-11 01:44:26 +000043#include "llvm/Support/raw_ostream.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000044#include "llvm/ADT/Statistic.h"
Dan Gohman21d90032008-11-25 00:52:40 +000045#include <map>
David Goodwin88a589c2009-08-25 17:03:05 +000046#include <set>
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000047using namespace llvm;
48
Dan Gohman2836c282009-01-16 01:33:36 +000049STATISTIC(NumNoops, "Number of noops inserted");
Dan Gohman343f0c02008-11-19 23:18:57 +000050STATISTIC(NumStalls, "Number of pipeline stalls");
David Goodwin480c5292009-10-20 19:54:44 +000051STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
Dan Gohman343f0c02008-11-19 23:18:57 +000052
David Goodwin471850a2009-10-01 21:46:35 +000053// Post-RA scheduling is enabled with
54// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
55// override the target.
56static cl::opt<bool>
57EnablePostRAScheduler("post-RA-scheduler",
58 cl::desc("Enable scheduling after register allocation"),
David Goodwin9843a932009-10-01 22:19:57 +000059 cl::init(false), cl::Hidden);
David Goodwin480c5292009-10-20 19:54:44 +000060static cl::opt<std::string>
Dan Gohman21d90032008-11-25 00:52:40 +000061EnableAntiDepBreaking("break-anti-dependencies",
David Goodwin480c5292009-10-20 19:54:44 +000062 cl::desc("Break post-RA scheduling anti-dependencies: "
63 "\"critical\", \"all\", or \"none\""),
64 cl::init("critical"), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000065static cl::opt<bool>
66EnablePostRAHazardAvoidance("avoid-hazards",
David Goodwind94a4e52009-08-10 15:55:25 +000067 cl::desc("Enable exact hazard avoidance"),
David Goodwin5e411782009-09-03 22:15:25 +000068 cl::init(true), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000069
David Goodwin1f152282009-09-01 18:34:03 +000070// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
71static cl::opt<int>
72DebugDiv("postra-sched-debugdiv",
73 cl::desc("Debug control MBBs that are scheduled"),
74 cl::init(0), cl::Hidden);
75static cl::opt<int>
76DebugMod("postra-sched-debugmod",
77 cl::desc("Debug control MBBs that are scheduled"),
78 cl::init(0), cl::Hidden);
79
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000080namespace {
Dan Gohman343f0c02008-11-19 23:18:57 +000081 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
Dan Gohmana70dca12009-10-09 23:27:56 +000082 AliasAnalysis *AA;
Evan Chengfa163542009-10-16 21:06:15 +000083 CodeGenOpt::Level OptLevel;
Dan Gohmana70dca12009-10-09 23:27:56 +000084
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000085 public:
86 static char ID;
Evan Chengfa163542009-10-16 21:06:15 +000087 PostRAScheduler(CodeGenOpt::Level ol) :
88 MachineFunctionPass(&ID), OptLevel(ol) {}
Dan Gohman21d90032008-11-25 00:52:40 +000089
Dan Gohman3f237442008-12-16 03:25:46 +000090 void getAnalysisUsage(AnalysisUsage &AU) const {
Dan Gohman845012e2009-07-31 23:37:33 +000091 AU.setPreservesCFG();
Dan Gohmana70dca12009-10-09 23:27:56 +000092 AU.addRequired<AliasAnalysis>();
Dan Gohman3f237442008-12-16 03:25:46 +000093 AU.addRequired<MachineDominatorTree>();
94 AU.addPreserved<MachineDominatorTree>();
95 AU.addRequired<MachineLoopInfo>();
96 AU.addPreserved<MachineLoopInfo>();
97 MachineFunctionPass::getAnalysisUsage(AU);
98 }
99
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000100 const char *getPassName() const {
Dan Gohman21d90032008-11-25 00:52:40 +0000101 return "Post RA top-down list latency scheduler";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000102 }
103
104 bool runOnMachineFunction(MachineFunction &Fn);
105 };
Dan Gohman343f0c02008-11-19 23:18:57 +0000106 char PostRAScheduler::ID = 0;
107
108 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
David Goodwin480c5292009-10-20 19:54:44 +0000109 /// RegisterReference - Information about a register reference
110 /// within a liverange
111 typedef struct {
112 /// Operand - The registers operand
113 MachineOperand *Operand;
114 /// RC - The register class
115 const TargetRegisterClass *RC;
116 } RegisterReference;
117
Dan Gohman343f0c02008-11-19 23:18:57 +0000118 /// AvailableQueue - The priority queue to use for the available SUnits.
Dan Gohman343f0c02008-11-19 23:18:57 +0000119 LatencyPriorityQueue AvailableQueue;
120
121 /// PendingQueue - This contains all of the instructions whose operands have
122 /// been issued, but their results are not ready yet (due to the latency of
123 /// the operation). Once the operands becomes available, the instruction is
124 /// added to the AvailableQueue.
125 std::vector<SUnit*> PendingQueue;
126
Dan Gohman21d90032008-11-25 00:52:40 +0000127 /// Topo - A topological ordering for SUnits.
128 ScheduleDAGTopologicalSort Topo;
Dan Gohman343f0c02008-11-19 23:18:57 +0000129
Dan Gohman2836c282009-01-16 01:33:36 +0000130 /// HazardRec - The hazard recognizer to use.
131 ScheduleHazardRecognizer *HazardRec;
132
Dan Gohmana70dca12009-10-09 23:27:56 +0000133 /// AA - AliasAnalysis for making memory reference queries.
134 AliasAnalysis *AA;
135
David Goodwin480c5292009-10-20 19:54:44 +0000136 /// AllocatableSet - The set of allocatable registers.
137 /// We'll be ignoring anti-dependencies on non-allocatable registers,
138 /// because they may not be safe to break.
139 const BitVector AllocatableSet;
140
141 /// GroupNodes - Implements a disjoint-union data structure to
142 /// form register groups. A node is represented by an index into
143 /// the vector. A node can "point to" itself to indicate that it
144 /// is the parent of a group, or point to another node to indicate
145 /// that it is a member of the same group as that node.
146 std::vector<unsigned> GroupNodes;
147
148 /// GroupNodeIndices - For each register, the index of the GroupNode
149 /// currently representing the group that the register belongs to.
150 /// Register 0 is always represented by the 0 group, a group
151 /// composed of registers that are not eligible for anti-aliasing.
152 unsigned GroupNodeIndices[TargetRegisterInfo::FirstVirtualRegister];
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000153
154 /// RegRegs - Map registers to all their references within a live range.
David Goodwin480c5292009-10-20 19:54:44 +0000155 std::multimap<unsigned, RegisterReference> RegRefs;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000156
David Goodwin480c5292009-10-20 19:54:44 +0000157 /// KillIndices - The index of the most recent kill (proceding
158 /// bottom-up), or ~0u if no kill of the register has been
159 /// seen. The register is live if this index != ~0u and DefIndices
160 /// == ~0u.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000161 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
162
Evan Cheng714e8bc2009-10-01 08:26:23 +0000163 /// DefIndices - The index of the most recent complete def (proceding bottom
164 /// up), or ~0u if the register is live.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000165 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
166
Dan Gohman21d90032008-11-25 00:52:40 +0000167 public:
Dan Gohman79ce2762009-01-15 19:20:50 +0000168 SchedulePostRATDList(MachineFunction &MF,
Dan Gohman3f237442008-12-16 03:25:46 +0000169 const MachineLoopInfo &MLI,
Dan Gohman2836c282009-01-16 01:33:36 +0000170 const MachineDominatorTree &MDT,
Dan Gohmana70dca12009-10-09 23:27:56 +0000171 ScheduleHazardRecognizer *HR,
172 AliasAnalysis *aa)
Dan Gohman79ce2762009-01-15 19:20:50 +0000173 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
David Goodwin480c5292009-10-20 19:54:44 +0000174 HazardRec(HR), AA(aa),
175 AllocatableSet(TRI->getAllocatableSet(MF)),
176 GroupNodes(TargetRegisterInfo::FirstVirtualRegister, 0) {}
Dan Gohman2836c282009-01-16 01:33:36 +0000177
178 ~SchedulePostRATDList() {
179 delete HazardRec;
180 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000181
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000182 /// StartBlock - Initialize register live-range state for scheduling in
183 /// this block.
184 ///
185 void StartBlock(MachineBasicBlock *BB);
186
David Goodwin480c5292009-10-20 19:54:44 +0000187 /// FinishBlock - Clean up register live-range state.
188 ///
189 void FinishBlock();
190
191 /// Observe - Update liveness information to account for the current
192 /// instruction, which will not be scheduled.
193 ///
194 void Observe(MachineInstr *MI, unsigned Count);
195
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000196 /// Schedule - Schedule the instruction range using list scheduling.
197 ///
Dan Gohman343f0c02008-11-19 23:18:57 +0000198 void Schedule();
David Goodwin88a589c2009-08-25 17:03:05 +0000199
200 /// FixupKills - Fix register kill flags that have been made
201 /// invalid due to scheduling
202 ///
203 void FixupKills(MachineBasicBlock *MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000204
205 private:
David Goodwin480c5292009-10-20 19:54:44 +0000206 /// IsLive - Return true if Reg is live
207 bool IsLive(unsigned Reg);
208
209 void PrescanInstruction(MachineInstr *MI, unsigned Count);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000210 void ScanInstruction(MachineInstr *MI, unsigned Count);
David Goodwin480c5292009-10-20 19:54:44 +0000211 bool BreakAntiDependencies(bool CriticalPathOnly);
212 unsigned FindSuitableFreeRegister(unsigned AntiDepReg,
213 unsigned LastNewReg);
214
Dan Gohman54e4c362008-12-09 22:54:47 +0000215 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000216 void ReleaseSuccessors(SUnit *SU);
Dan Gohman343f0c02008-11-19 23:18:57 +0000217 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
218 void ListScheduleTopDown();
David Goodwin480c5292009-10-20 19:54:44 +0000219
David Goodwin5e411782009-09-03 22:15:25 +0000220 void StartBlockForKills(MachineBasicBlock *BB);
David Goodwin8f909342009-09-23 16:35:25 +0000221
222 // ToggleKillFlag - Toggle a register operand kill flag. Other
223 // adjustments may be made to the instruction if necessary. Return
224 // true if the operand has been deleted, false if not.
225 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
David Goodwin480c5292009-10-20 19:54:44 +0000226
227 // GetGroup - Get the group for a register. The returned value is
228 // the index of the GroupNode representing the group.
229 unsigned GetGroup(unsigned Reg);
230
231 // GetGroupRegs - Return a vector of the registers belonging to a
232 // group.
233 void GetGroupRegs(unsigned Group, std::vector<unsigned> &Regs);
234
235 // UnionGroups - Union Reg1's and Reg2's groups to form a new
236 // group. Return the index of the GroupNode representing the
237 // group.
238 unsigned UnionGroups(unsigned Reg1, unsigned Reg2);
239
240 // LeaveGroup - Remove a register from its current group and place
241 // it alone in its own group. Return the index of the GroupNode
242 // representing the registers new group.
243 unsigned LeaveGroup(unsigned Reg);
Dan Gohman343f0c02008-11-19 23:18:57 +0000244 };
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000245}
246
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000247/// isSchedulingBoundary - Test if the given instruction should be
248/// considered a scheduling boundary. This primarily includes labels
249/// and terminators.
250///
251static bool isSchedulingBoundary(const MachineInstr *MI,
252 const MachineFunction &MF) {
253 // Terminators and labels can't be scheduled around.
254 if (MI->getDesc().isTerminator() || MI->isLabel())
255 return true;
256
Dan Gohmanbed353d2009-02-10 23:29:38 +0000257 // Don't attempt to schedule around any instruction that modifies
258 // a stack-oriented pointer, as it's unlikely to be profitable. This
259 // saves compile time, because it doesn't require every single
260 // stack slot reference to depend on the instruction that does the
261 // modification.
262 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
263 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
264 return true;
265
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000266 return false;
267}
268
Dan Gohman343f0c02008-11-19 23:18:57 +0000269bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
Dan Gohman5bf7c2a2009-10-10 00:15:38 +0000270 AA = &getAnalysis<AliasAnalysis>();
271
David Goodwin471850a2009-10-01 21:46:35 +0000272 // Check for explicit enable/disable of post-ra scheduling.
273 if (EnablePostRAScheduler.getPosition() > 0) {
274 if (!EnablePostRAScheduler)
Evan Chengc83da2f92009-10-16 06:10:34 +0000275 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000276 } else {
Evan Chengc83da2f92009-10-16 06:10:34 +0000277 // Check that post-RA scheduling is enabled for this target.
David Goodwin471850a2009-10-01 21:46:35 +0000278 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
Evan Chengfa163542009-10-16 21:06:15 +0000279 if (!ST.enablePostRAScheduler(OptLevel))
Evan Chengc83da2f92009-10-16 06:10:34 +0000280 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000281 }
David Goodwin0dad89f2009-09-30 00:10:16 +0000282
David Goodwin3a5f0d42009-08-11 01:44:26 +0000283 DEBUG(errs() << "PostRAScheduler\n");
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000284
Dan Gohman3f237442008-12-16 03:25:46 +0000285 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
286 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
David Goodwind94a4e52009-08-10 15:55:25 +0000287 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
Dan Gohman2836c282009-01-16 01:33:36 +0000288 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
David Goodwind94a4e52009-08-10 15:55:25 +0000289 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
290 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
Dan Gohman3f237442008-12-16 03:25:46 +0000291
Dan Gohmana70dca12009-10-09 23:27:56 +0000292 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA);
Dan Gohman79ce2762009-01-15 19:20:50 +0000293
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000294 // Loop over all of the basic blocks
295 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
Dan Gohman343f0c02008-11-19 23:18:57 +0000296 MBB != MBBe; ++MBB) {
David Goodwin1f152282009-09-01 18:34:03 +0000297#ifndef NDEBUG
298 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
299 if (DebugDiv > 0) {
300 static int bbcnt = 0;
301 if (bbcnt++ % DebugDiv != DebugMod)
302 continue;
303 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
304 ":MBB ID#" << MBB->getNumber() << " ***\n";
305 }
306#endif
307
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000308 // Initialize register live-range state for scheduling in this block.
309 Scheduler.StartBlock(MBB);
310
Dan Gohmanf7119392009-01-16 22:10:20 +0000311 // Schedule each sequence of instructions not interrupted by a label
312 // or anything else that effectively needs to shut down scheduling.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000313 MachineBasicBlock::iterator Current = MBB->end();
Dan Gohman47ac0f02009-02-11 04:27:20 +0000314 unsigned Count = MBB->size(), CurrentCount = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000315 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
316 MachineInstr *MI = prior(I);
317 if (isSchedulingBoundary(MI, Fn)) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000318 Scheduler.Run(MBB, I, Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000319 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000320 Current = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000321 CurrentCount = Count - 1;
Dan Gohman1274ced2009-03-10 18:10:43 +0000322 Scheduler.Observe(MI, CurrentCount);
Dan Gohmanf7119392009-01-16 22:10:20 +0000323 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000324 I = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000325 --Count;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000326 }
Dan Gohman47ac0f02009-02-11 04:27:20 +0000327 assert(Count == 0 && "Instruction count mismatch!");
Duncan Sands9e8bd0b2009-03-11 09:04:34 +0000328 assert((MBB->begin() == Current || CurrentCount != 0) &&
Dan Gohman1274ced2009-03-10 18:10:43 +0000329 "Instruction count mismatch!");
330 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000331 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000332
333 // Clean up register live-range state.
334 Scheduler.FinishBlock();
David Goodwin88a589c2009-08-25 17:03:05 +0000335
David Goodwin5e411782009-09-03 22:15:25 +0000336 // Update register kills
David Goodwin88a589c2009-08-25 17:03:05 +0000337 Scheduler.FixupKills(MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000338 }
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000339
340 return true;
341}
David Goodwin480c5292009-10-20 19:54:44 +0000342
343unsigned SchedulePostRATDList::GetGroup(unsigned Reg)
344{
345 unsigned Node = GroupNodeIndices[Reg];
346 while (GroupNodes[Node] != Node)
347 Node = GroupNodes[Node];
348
349 return Node;
350}
351
352void SchedulePostRATDList::GetGroupRegs(unsigned Group, std::vector<unsigned> &Regs)
353{
354 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg) {
355 if (GetGroup(Reg) == Group)
356 Regs.push_back(Reg);
357 }
358}
359
360unsigned SchedulePostRATDList::UnionGroups(unsigned Reg1, unsigned Reg2)
361{
362 assert(GroupNodes[0] == 0 && "GroupNode 0 not parent!");
363 assert(GroupNodeIndices[0] == 0 && "Reg 0 not in Group 0!");
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000364
David Goodwin480c5292009-10-20 19:54:44 +0000365 // find group for each register
366 unsigned Group1 = GetGroup(Reg1);
367 unsigned Group2 = GetGroup(Reg2);
368
369 // if either group is 0, then that must become the parent
370 unsigned Parent = (Group1 == 0) ? Group1 : Group2;
371 unsigned Other = (Parent == Group1) ? Group2 : Group1;
372 GroupNodes.at(Other) = Parent;
373 return Parent;
374}
375
376unsigned SchedulePostRATDList::LeaveGroup(unsigned Reg)
377{
378 // Create a new GroupNode for Reg. Reg's existing GroupNode must
379 // stay as is because there could be other GroupNodes referring to
380 // it.
381 unsigned idx = GroupNodes.size();
382 GroupNodes.push_back(idx);
383 GroupNodeIndices[Reg] = idx;
384 return idx;
385}
386
387bool SchedulePostRATDList::IsLive(unsigned Reg)
388{
389 // KillIndex must be defined and DefIndex not defined for a register
390 // to be live.
391 return((KillIndices[Reg] != ~0u) && (DefIndices[Reg] == ~0u));
392}
393
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000394/// StartBlock - Initialize register live-range state for scheduling in
395/// this block.
Dan Gohman21d90032008-11-25 00:52:40 +0000396///
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000397void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
398 // Call the superclass.
399 ScheduleDAGInstrs::StartBlock(BB);
Dan Gohman21d90032008-11-25 00:52:40 +0000400
David Goodwind94a4e52009-08-10 15:55:25 +0000401 // Reset the hazard recognizer.
402 HazardRec->Reset();
403
David Goodwin480c5292009-10-20 19:54:44 +0000404 // Initialize all registers to be in their own group. Initially we
405 // assign the register to the same-indexed GroupNode.
406 for (unsigned i = 0; i < TargetRegisterInfo::FirstVirtualRegister; ++i)
407 GroupNodeIndices[i] = i;
Dan Gohman21d90032008-11-25 00:52:40 +0000408
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000409 // Initialize the indices to indicate that no registers are live.
Dan Gohman6c3643c2008-12-19 22:23:43 +0000410 std::fill(KillIndices, array_endof(KillIndices), ~0u);
Dan Gohman21d90032008-11-25 00:52:40 +0000411 std::fill(DefIndices, array_endof(DefIndices), BB->size());
412
David Goodwin63bcbb72009-10-01 23:28:47 +0000413 bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
414
Dan Gohman21d90032008-11-25 00:52:40 +0000415 // Determine the live-out physregs for this block.
David Goodwin63bcbb72009-10-01 23:28:47 +0000416 if (IsReturnBlock) {
Dan Gohman21d90032008-11-25 00:52:40 +0000417 // In a return block, examine the function live-out regs.
418 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
419 E = MRI.liveout_end(); I != E; ++I) {
420 unsigned Reg = *I;
David Goodwin480c5292009-10-20 19:54:44 +0000421 UnionGroups(Reg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000422 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000423 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000424 // Repeat, for all aliases.
425 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
426 unsigned AliasReg = *Alias;
David Goodwin480c5292009-10-20 19:54:44 +0000427 UnionGroups(AliasReg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000428 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000429 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000430 }
431 }
David Goodwinc7951f82009-10-01 19:45:32 +0000432 } else {
Dan Gohman21d90032008-11-25 00:52:40 +0000433 // In a non-return block, examine the live-in regs of all successors.
434 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
Dan Gohman47ac0f02009-02-11 04:27:20 +0000435 SE = BB->succ_end(); SI != SE; ++SI)
Dan Gohman21d90032008-11-25 00:52:40 +0000436 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
437 E = (*SI)->livein_end(); I != E; ++I) {
438 unsigned Reg = *I;
David Goodwin480c5292009-10-20 19:54:44 +0000439 UnionGroups(Reg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000440 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000441 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000442 // Repeat, for all aliases.
443 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
444 unsigned AliasReg = *Alias;
David Goodwin480c5292009-10-20 19:54:44 +0000445 UnionGroups(AliasReg, 0);
Dan Gohman21d90032008-11-25 00:52:40 +0000446 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000447 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000448 }
449 }
David Goodwin63bcbb72009-10-01 23:28:47 +0000450 }
Dan Gohman21d90032008-11-25 00:52:40 +0000451
David Goodwin63bcbb72009-10-01 23:28:47 +0000452 // Mark live-out callee-saved registers. In a return block this is
453 // all callee-saved registers. In non-return this is any
454 // callee-saved register that is not saved in the prolog.
455 const MachineFrameInfo *MFI = MF.getFrameInfo();
456 BitVector Pristine = MFI->getPristineRegs(BB);
457 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
458 unsigned Reg = *I;
459 if (!IsReturnBlock && !Pristine.test(Reg)) continue;
David Goodwin480c5292009-10-20 19:54:44 +0000460 UnionGroups(Reg, 0);
David Goodwin63bcbb72009-10-01 23:28:47 +0000461 KillIndices[Reg] = BB->size();
462 DefIndices[Reg] = ~0u;
463 // Repeat, for all aliases.
464 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
465 unsigned AliasReg = *Alias;
David Goodwin480c5292009-10-20 19:54:44 +0000466 UnionGroups(AliasReg, 0);
David Goodwin63bcbb72009-10-01 23:28:47 +0000467 KillIndices[AliasReg] = BB->size();
468 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000469 }
470 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000471}
472
473/// Schedule - Schedule the instruction range using list scheduling.
474///
475void SchedulePostRATDList::Schedule() {
David Goodwin3a5f0d42009-08-11 01:44:26 +0000476 DEBUG(errs() << "********** List Scheduling **********\n");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000477
478 // Build the scheduling graph.
Dan Gohmana70dca12009-10-09 23:27:56 +0000479 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000480
David Goodwin480c5292009-10-20 19:54:44 +0000481 if (EnableAntiDepBreaking != "none") {
482 if (BreakAntiDependencies((EnableAntiDepBreaking == "all") ? false : true)) {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000483 // We made changes. Update the dependency graph.
484 // Theoretically we could update the graph in place:
485 // When a live range is changed to use a different register, remove
486 // the def's anti-dependence *and* output-dependence edges due to
487 // that register, and add new anti-dependence and output-dependence
488 // edges based on the next live range of the register.
489 SUnits.clear();
490 EntrySU = SUnit();
491 ExitSU = SUnit();
Dan Gohmana70dca12009-10-09 23:27:56 +0000492 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000493 }
494 }
495
David Goodwind94a4e52009-08-10 15:55:25 +0000496 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
497 SUnits[su].dumpAll(this));
498
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000499 AvailableQueue.initNodes(SUnits);
500
501 ListScheduleTopDown();
502
503 AvailableQueue.releaseState();
504}
505
506/// Observe - Update liveness information to account for the current
507/// instruction, which will not be scheduled.
508///
Dan Gohman47ac0f02009-02-11 04:27:20 +0000509void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000510 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
511
David Goodwin480c5292009-10-20 19:54:44 +0000512 DEBUG(errs() << "Observe: ");
513 DEBUG(MI->dump());
Dan Gohman1274ced2009-03-10 18:10:43 +0000514
David Goodwin480c5292009-10-20 19:54:44 +0000515 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg) {
516 // If Reg is current live, then mark that it can't be renamed as
517 // we don't know the extent of its live-range anymore (now that it
518 // has been scheduled). If it is not live but was defined in the
519 // previous schedule region, then set its def index to the most
520 // conservative location (i.e. the beginning of the previous
521 // schedule region).
522 if (IsLive(Reg)) {
523 DEBUG(if (GetGroup(Reg) != 0)
524 errs() << " " << TRI->getName(Reg) << "=g" <<
525 GetGroup(Reg) << "->g0(region live-out)");
526 UnionGroups(Reg, 0);
527 } else if ((DefIndices[Reg] < InsertPosIndex) && (DefIndices[Reg] >= Count)) {
528 DefIndices[Reg] = Count;
529 }
530 }
531
532 PrescanInstruction(MI, Count);
Dan Gohman47ac0f02009-02-11 04:27:20 +0000533 ScanInstruction(MI, Count);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000534}
535
536/// FinishBlock - Clean up register live-range state.
537///
538void SchedulePostRATDList::FinishBlock() {
539 RegRefs.clear();
540
541 // Call the superclass.
542 ScheduleDAGInstrs::FinishBlock();
543}
544
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000545/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
546/// critical path.
547static SDep *CriticalPathStep(SUnit *SU) {
548 SDep *Next = 0;
549 unsigned NextDepth = 0;
550 // Find the predecessor edge with the greatest depth.
551 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
552 P != PE; ++P) {
553 SUnit *PredSU = P->getSUnit();
554 unsigned PredLatency = P->getLatency();
555 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
556 // In the case of a latency tie, prefer an anti-dependency edge over
557 // other types of edges.
558 if (NextDepth < PredTotalLatency ||
559 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
560 NextDepth = PredTotalLatency;
561 Next = &*P;
562 }
563 }
564 return Next;
565}
566
David Goodwin480c5292009-10-20 19:54:44 +0000567/// AntiDepPathStep - Return SUnit that SU has an anti-dependence on.
568static SDep *AntiDepPathStep(SUnit *SU) {
569 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
570 P != PE; ++P) {
571 if (P->getKind() == SDep::Anti) {
572 return &*P;
David Goodwinc7951f82009-10-01 19:45:32 +0000573 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000574 }
David Goodwin480c5292009-10-20 19:54:44 +0000575 return 0;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000576}
577
David Goodwin480c5292009-10-20 19:54:44 +0000578void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI, unsigned Count) {
579 // Scan the register defs for this instruction and update
580 // live-ranges, groups and RegRefs.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000581 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
582 MachineOperand &MO = MI->getOperand(i);
David Goodwin480c5292009-10-20 19:54:44 +0000583 if (!MO.isReg() || !MO.isDef()) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000584 unsigned Reg = MO.getReg();
585 if (Reg == 0) continue;
David Goodwin480c5292009-10-20 19:54:44 +0000586 // Ignore two-addr defs for liveness...
Bob Wilsond9df5012009-04-09 17:16:43 +0000587 if (MI->isRegTiedToUseOperand(i)) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000588
David Goodwin480c5292009-10-20 19:54:44 +0000589 // Update Def for Reg and subregs.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000590 DefIndices[Reg] = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000591 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
592 *Subreg; ++Subreg) {
593 unsigned SubregReg = *Subreg;
594 DefIndices[SubregReg] = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000595 }
596 }
David Goodwin480c5292009-10-20 19:54:44 +0000597
598 DEBUG(errs() << "\tGroups:");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000599 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
600 MachineOperand &MO = MI->getOperand(i);
David Goodwin480c5292009-10-20 19:54:44 +0000601 if (!MO.isReg() || !MO.isDef()) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000602 unsigned Reg = MO.getReg();
603 if (Reg == 0) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000604
David Goodwin480c5292009-10-20 19:54:44 +0000605 DEBUG(errs() << " " << TRI->getName(Reg) << "=g" << GetGroup(Reg));
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000606
David Goodwin480c5292009-10-20 19:54:44 +0000607 // If MI's defs have special allocation requirement, don't allow
608 // any def registers to be changed. Also assume all registers
609 // defined in a call must not be changed (ABI).
610 if (MI->getDesc().isCall() || MI->getDesc().hasExtraDefRegAllocReq()) {
611 DEBUG(if (GetGroup(Reg) != 0) errs() << "->g0(alloc-req)");
612 UnionGroups(Reg, 0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000613 }
David Goodwin480c5292009-10-20 19:54:44 +0000614
615 // Any subregisters that are live at this point are defined here,
616 // so group those subregisters with Reg.
617 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
618 *Subreg; ++Subreg) {
619 unsigned SubregReg = *Subreg;
620 if (IsLive(SubregReg)) {
621 UnionGroups(Reg, SubregReg);
622 DEBUG(errs() << "->g" << GetGroup(Reg) << "(via " <<
623 TRI->getName(SubregReg) << ")");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000624 }
625 }
David Goodwin480c5292009-10-20 19:54:44 +0000626
627 // Note register reference...
628 const TargetRegisterClass *RC = NULL;
629 if (i < MI->getDesc().getNumOperands())
630 RC = MI->getDesc().OpInfo[i].getRegClass(TRI);
631 RegisterReference RR = { &MO, RC };
632 RegRefs.insert(std::make_pair(Reg, RR));
633 }
634
635 DEBUG(errs() << '\n');
636}
637
638void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
639 unsigned Count) {
640 // Scan the register uses for this instruction and update
641 // live-ranges, groups and RegRefs.
642 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
643 MachineOperand &MO = MI->getOperand(i);
644 if (!MO.isReg() || !MO.isUse()) continue;
645 unsigned Reg = MO.getReg();
646 if (Reg == 0) continue;
647
648 // It wasn't previously live but now it is, this is a kill. Forget
649 // the previous live-range information and start a new live-range
650 // for the register.
651 if (!IsLive(Reg)) {
652 KillIndices[Reg] = Count;
653 DefIndices[Reg] = ~0u;
654 RegRefs.erase(Reg);
655 LeaveGroup(Reg);
656 }
657 // Repeat, for subregisters.
658 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
659 *Subreg; ++Subreg) {
660 unsigned SubregReg = *Subreg;
661 if (!IsLive(SubregReg)) {
662 KillIndices[SubregReg] = Count;
663 DefIndices[SubregReg] = ~0u;
664 RegRefs.erase(SubregReg);
665 LeaveGroup(SubregReg);
666 }
667 }
668
669 // Note register reference...
670 const TargetRegisterClass *RC = NULL;
671 if (i < MI->getDesc().getNumOperands())
672 RC = MI->getDesc().OpInfo[i].getRegClass(TRI);
673 RegisterReference RR = { &MO, RC };
674 RegRefs.insert(std::make_pair(Reg, RR));
675 }
676
677 // Form a group of all defs and uses of a KILL instruction to ensure
678 // that all registers are renamed as a group.
679 if (MI->getOpcode() == TargetInstrInfo::KILL) {
680 unsigned FirstReg = 0;
681 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
682 MachineOperand &MO = MI->getOperand(i);
683 if (!MO.isReg()) continue;
684 unsigned Reg = MO.getReg();
685 if (Reg == 0) continue;
686
687 if (FirstReg != 0)
688 UnionGroups(FirstReg, Reg);
689 FirstReg = Reg;
690 }
691
692 DEBUG(if (FirstReg != 0) errs() << "\tKill Group: g" <<
693 GetGroup(FirstReg) << '\n');
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000694 }
695}
696
David Goodwin480c5292009-10-20 19:54:44 +0000697unsigned SchedulePostRATDList::FindSuitableFreeRegister(unsigned AntiDepReg,
698 unsigned LastNewReg) {
699 // Collect all registers in the same group as AntiDepReg. These all
700 // need to be renamed together if we are to break the
701 // anti-dependence.
702 std::vector<unsigned> Regs;
703 GetGroupRegs(GetGroup(AntiDepReg), Regs);
704
705 DEBUG(errs() << "\tRename Register Group:");
706 DEBUG(for (unsigned i = 0, e = Regs.size(); i != e; ++i)
707 DEBUG(errs() << " " << TRI->getName(Regs[i])));
708 DEBUG(errs() << "\n");
709
710 // If there is a single register that needs to be renamed then we
711 // can do it ourselves.
712 if (Regs.size() == 1) {
713 assert(Regs[0] == AntiDepReg && "Register group does not contain register!");
714
715 // Check all references that need rewriting. Gather up all the
716 // register classes for the register references.
717 const TargetRegisterClass *FirstRC = NULL;
718 std::set<const TargetRegisterClass *> RCs;
719 std::pair<std::multimap<unsigned, RegisterReference>::iterator,
720 std::multimap<unsigned, RegisterReference>::iterator>
721 Range = RegRefs.equal_range(AntiDepReg);
722 for (std::multimap<unsigned, RegisterReference>::iterator
723 Q = Range.first, QE = Range.second; Q != QE; ++Q) {
724 const TargetRegisterClass *RC = Q->second.RC;
725 if (RC == NULL) continue;
726 if (FirstRC == NULL)
727 FirstRC = RC;
728 else if (FirstRC != RC)
729 RCs.insert(RC);
730 }
731
732 if (FirstRC == NULL)
733 return 0;
734
735 DEBUG(errs() << "\tChecking Regclasses: " << FirstRC->getName());
736 DEBUG(for (std::set<const TargetRegisterClass *>::iterator S =
737 RCs.begin(), E = RCs.end(); S != E; ++S)
738 errs() << " " << (*S)->getName());
739 DEBUG(errs() << '\n');
740
741 // Using the allocation order for one of the register classes,
742 // find the first register that belongs to all the register
743 // classes that is available over the liverange of the register.
744 DEBUG(errs() << "\tFind Register:");
745 for (TargetRegisterClass::iterator R = FirstRC->allocation_order_begin(MF),
746 RE = FirstRC->allocation_order_end(MF); R != RE; ++R) {
747 unsigned NewReg = *R;
748
749 // Don't replace a register with itself.
750 if (NewReg == AntiDepReg) continue;
751
752 DEBUG(errs() << " " << TRI->getName(NewReg));
753
754 // Make sure NewReg is in all required register classes.
755 for (std::set<const TargetRegisterClass *>::iterator S =
756 RCs.begin(), E = RCs.end(); S != E; ++S) {
757 const TargetRegisterClass *RC = *S;
758 if (!RC->contains(NewReg)) {
759 DEBUG(errs() << "(not in " << RC->getName() << ")");
760 NewReg = 0;
761 break;
762 }
763 }
764
765 // If NewReg is dead and NewReg's most recent def is not before
766 // AntiDepReg's kill, it's safe to replace AntiDepReg with
767 // NewReg. We must also check all subregisters of NewReg.
768 if (IsLive(NewReg) || (KillIndices[AntiDepReg] > DefIndices[NewReg])) {
769 DEBUG(errs() << "(live)");
770 continue;
771 }
772 {
773 bool found = false;
774 for (const unsigned *Subreg = TRI->getSubRegisters(NewReg);
775 *Subreg; ++Subreg) {
776 unsigned SubregReg = *Subreg;
777 if (IsLive(SubregReg) || (KillIndices[AntiDepReg] > DefIndices[SubregReg])) {
778 DEBUG(errs() << "(subreg " << TRI->getName(SubregReg) << " live)");
779 found = true;
780 }
781 }
782 if (found)
783 continue;
784 }
785
786 if (NewReg != 0) {
787 DEBUG(errs() << '\n');
788 return NewReg;
789 }
790 }
791
792 DEBUG(errs() << '\n');
Dan Gohman26255ad2009-08-12 01:33:27 +0000793 }
794
795 // No registers are free and available!
796 return 0;
797}
798
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000799/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
800/// of the ScheduleDAG and break them by renaming registers.
801///
David Goodwin480c5292009-10-20 19:54:44 +0000802bool SchedulePostRATDList::BreakAntiDependencies(bool CriticalPathOnly) {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000803 // The code below assumes that there is at least one instruction,
804 // so just duck out immediately if the block is empty.
805 if (SUnits.empty()) return false;
806
David Goodwin480c5292009-10-20 19:54:44 +0000807 // If breaking anti-dependencies only along the critical path, track
808 // progress along the critical path through the SUnit graph as we
809 // walk the instructions.
810 SUnit *CriticalPathSU = 0;
811 MachineInstr *CriticalPathMI = 0;
812
813 // If breaking all anti-dependencies need a map from MI to SUnit.
814 std::map<MachineInstr *, SUnit *> MISUnitMap;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000815
David Goodwin480c5292009-10-20 19:54:44 +0000816 // Find the node at the bottom of the critical path.
817 if (CriticalPathOnly) {
818 SUnit *Max = 0;
819 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
820 SUnit *SU = &SUnits[i];
821 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
822 Max = SU;
823 }
824
David Goodwind452ea62009-10-13 19:16:03 +0000825 DEBUG(errs() << "Critical path has total latency "
826 << (Max->getDepth() + Max->Latency) << "\n");
David Goodwin480c5292009-10-20 19:54:44 +0000827 CriticalPathSU = Max;
828 CriticalPathMI = CriticalPathSU->getInstr();
829 } else {
830 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
831 SUnit *SU = &SUnits[i];
832 MISUnitMap.insert(std::pair<MachineInstr *, SUnit *>(SU->getInstr(), SU));
833 }
834 DEBUG(errs() << "Breaking all anti-dependencies\n");
835 }
836
837#ifndef NDEBUG
838 {
David Goodwind452ea62009-10-13 19:16:03 +0000839 DEBUG(errs() << "Available regs:");
840 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
David Goodwin480c5292009-10-20 19:54:44 +0000841 if (!IsLive(Reg))
David Goodwind452ea62009-10-13 19:16:03 +0000842 DEBUG(errs() << " " << TRI->getName(Reg));
843 }
844 DEBUG(errs() << '\n');
845 }
David Goodwin480c5292009-10-20 19:54:44 +0000846 std::string dbgStr;
David Goodwind452ea62009-10-13 19:16:03 +0000847#endif
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000848
Dan Gohman21d90032008-11-25 00:52:40 +0000849 // TODO: If we tracked more than one register here, we could potentially
850 // fix that remaining critical edge too. This is a little more involved,
851 // because unlike the most recent register, less recent registers should
852 // still be considered, though only if no other registers are available.
853 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
854
David Goodwin480c5292009-10-20 19:54:44 +0000855 // Attempt to break anti-dependence edges. Walk the instructions
856 // from the bottom up, tracking information about liveness as we go
857 // to help determine which registers are available.
Dan Gohman21d90032008-11-25 00:52:40 +0000858 bool Changed = false;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000859 unsigned Count = InsertPosIndex - 1;
860 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000861 I != E; --Count) {
862 MachineInstr *MI = --I;
Dan Gohman21d90032008-11-25 00:52:40 +0000863
David Goodwin480c5292009-10-20 19:54:44 +0000864 DEBUG(errs() << "Anti: ");
865 DEBUG(MI->dump());
866
867 // Process the defs in MI...
868 PrescanInstruction(MI, Count);
869
870 // Check if this instruction has an anti-dependence that we may be
871 // able to break. If it is, set AntiDepReg to the non-zero
872 // register associated with the anti-dependence.
Dan Gohman00dc84a2008-12-16 19:27:52 +0000873 //
David Goodwin480c5292009-10-20 19:54:44 +0000874 unsigned AntiDepReg = 0;
875
876 // Limiting our attention to the critical path is a heuristic to avoid
Dan Gohman00dc84a2008-12-16 19:27:52 +0000877 // breaking anti-dependence edges that aren't going to significantly
878 // impact the overall schedule. There are a limited number of registers
879 // and we want to save them for the important edges.
880 //
David Goodwin480c5292009-10-20 19:54:44 +0000881 // We can also break all anti-dependencies because they can
882 // occur along the non-critical path but are still detrimental for
883 // scheduling.
884 //
Dan Gohman00dc84a2008-12-16 19:27:52 +0000885 // TODO: Instructions with multiple defs could have multiple
886 // anti-dependencies. The current code here only knows how to break one
887 // edge per instruction. Note that we'd have to be able to break all of
888 // the anti-dependencies in an instruction in order to be effective.
David Goodwin480c5292009-10-20 19:54:44 +0000889 if (!CriticalPathOnly || (MI == CriticalPathMI)) {
890 DEBUG(dbgStr.clear());
891
892 SUnit *PathSU;
893 SDep *Edge;
894 if (CriticalPathOnly) {
895 PathSU = CriticalPathSU;
896 Edge = CriticalPathStep(PathSU);
897 } else {
898 PathSU = MISUnitMap[MI];
899 Edge = (PathSU) ? AntiDepPathStep(PathSU) : 0;
900 }
901
902 if (Edge) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000903 SUnit *NextSU = Edge->getSUnit();
904
David Goodwin480c5292009-10-20 19:54:44 +0000905 // Only consider anti-dependence edges, and ignore KILL
906 // instructions (they form a group in ScanInstruction but
907 // don't cause any anti-dependence breaking themselves)
908 if ((Edge->getKind() == SDep::Anti) &&
909 (MI->getOpcode() != TargetInstrInfo::KILL)) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000910 AntiDepReg = Edge->getReg();
David Goodwin480c5292009-10-20 19:54:44 +0000911 DEBUG(dbgStr += "\tAntidep reg: ");
912 DEBUG(dbgStr += TRI->getName(AntiDepReg));
Dan Gohman00dc84a2008-12-16 19:27:52 +0000913 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
David Goodwin480c5292009-10-20 19:54:44 +0000914 if (!AllocatableSet.test(AntiDepReg)) {
Evan Cheng714e8bc2009-10-01 08:26:23 +0000915 // Don't break anti-dependencies on non-allocatable registers.
David Goodwin480c5292009-10-20 19:54:44 +0000916 DEBUG(dbgStr += " (non-allocatable)");
Evan Cheng714e8bc2009-10-01 08:26:23 +0000917 AntiDepReg = 0;
David Goodwin480c5292009-10-20 19:54:44 +0000918 } else {
919 int OpIdx = MI->findRegisterDefOperandIdx(AntiDepReg);
920 assert(OpIdx != -1 && "Can't find index for defined register operand");
921 if (MI->isRegTiedToUseOperand(OpIdx)) {
922 // If the anti-dep register is tied to a use, then don't try to
923 // change it. It will be changed along with the use if required
924 // to break an earlier antidep.
925 DEBUG(dbgStr += " (tied-to-use)");
926 AntiDepReg = 0;
927 } else {
928 // If the SUnit has other dependencies on the SUnit that
929 // it anti-depends on, don't bother breaking the
930 // anti-dependency since those edges would prevent such
931 // units from being scheduled past each other
932 // regardless.
933 //
934 // Also, if there are dependencies on other SUnits with
935 // the same register as the anti-dependency, don't
936 // attempt to break it.
937 for (SUnit::pred_iterator P = PathSU->Preds.begin(),
938 PE = PathSU->Preds.end(); P != PE; ++P) {
939 if (P->getSUnit() == NextSU ?
Dan Gohman00dc84a2008-12-16 19:27:52 +0000940 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
941 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
David Goodwin480c5292009-10-20 19:54:44 +0000942 DEBUG(dbgStr += " (real dependency)");
943 AntiDepReg = 0;
944 break;
945 }
Dan Gohman00dc84a2008-12-16 19:27:52 +0000946 }
David Goodwin480c5292009-10-20 19:54:44 +0000947 }
Dan Gohman00dc84a2008-12-16 19:27:52 +0000948 }
949 }
David Goodwin480c5292009-10-20 19:54:44 +0000950
951 if (CriticalPathOnly) {
952 CriticalPathSU = NextSU;
953 CriticalPathMI = CriticalPathSU->getInstr();
954 }
Dan Gohman00dc84a2008-12-16 19:27:52 +0000955 } else {
956 // We've reached the end of the critical path.
957 CriticalPathSU = 0;
958 CriticalPathMI = 0;
959 }
960 }
Dan Gohman21d90032008-11-25 00:52:40 +0000961
David Goodwin480c5292009-10-20 19:54:44 +0000962 // Determine AntiDepReg's register group.
963 const unsigned GroupIndex = AntiDepReg != 0 ? GetGroup(AntiDepReg) : 0;
964 if (GroupIndex == 0) {
965 DEBUG(if (AntiDepReg != 0) dbgStr += " (zero group)");
Evan Cheng714e8bc2009-10-01 08:26:23 +0000966 AntiDepReg = 0;
Dan Gohman21d90032008-11-25 00:52:40 +0000967 }
968
David Goodwin480c5292009-10-20 19:54:44 +0000969 DEBUG(if (!dbgStr.empty()) errs() << dbgStr << '\n');
Dan Gohman21d90032008-11-25 00:52:40 +0000970
David Goodwin480c5292009-10-20 19:54:44 +0000971 // Look for a suitable register to use to break the anti-dependence.
Dan Gohman21d90032008-11-25 00:52:40 +0000972 //
973 // TODO: Instead of picking the first free register, consider which might
974 // be the best.
975 if (AntiDepReg != 0) {
David Goodwin480c5292009-10-20 19:54:44 +0000976 if (unsigned NewReg = FindSuitableFreeRegister(AntiDepReg,
977 LastNewReg[AntiDepReg])) {
978 DEBUG(errs() << "\tBreaking anti-dependence edge on "
Dan Gohman26255ad2009-08-12 01:33:27 +0000979 << TRI->getName(AntiDepReg)
980 << " with " << RegRefs.count(AntiDepReg) << " references"
981 << " using " << TRI->getName(NewReg) << "!\n");
Dan Gohman21d90032008-11-25 00:52:40 +0000982
Dan Gohman26255ad2009-08-12 01:33:27 +0000983 // Update the references to the old register to refer to the new
984 // register.
David Goodwin480c5292009-10-20 19:54:44 +0000985 std::pair<std::multimap<unsigned, RegisterReference>::iterator,
986 std::multimap<unsigned, RegisterReference>::iterator>
Dan Gohman26255ad2009-08-12 01:33:27 +0000987 Range = RegRefs.equal_range(AntiDepReg);
David Goodwin480c5292009-10-20 19:54:44 +0000988 for (std::multimap<unsigned, RegisterReference>::iterator
Dan Gohman26255ad2009-08-12 01:33:27 +0000989 Q = Range.first, QE = Range.second; Q != QE; ++Q)
David Goodwin480c5292009-10-20 19:54:44 +0000990 Q->second.Operand->setReg(NewReg);
Dan Gohman21d90032008-11-25 00:52:40 +0000991
Dan Gohman26255ad2009-08-12 01:33:27 +0000992 // We just went back in time and modified history; the
David Goodwin480c5292009-10-20 19:54:44 +0000993 // liveness information for the anti-dependence reg is now
Dan Gohman26255ad2009-08-12 01:33:27 +0000994 // inconsistent. Set the state as if it were dead.
David Goodwin480c5292009-10-20 19:54:44 +0000995 // FIXME forall in group
996 UnionGroups(NewReg, 0);
997 RegRefs.erase(NewReg);
Dan Gohman26255ad2009-08-12 01:33:27 +0000998 DefIndices[NewReg] = DefIndices[AntiDepReg];
999 KillIndices[NewReg] = KillIndices[AntiDepReg];
Dan Gohman21d90032008-11-25 00:52:40 +00001000
David Goodwin480c5292009-10-20 19:54:44 +00001001 // FIXME forall in group
1002 UnionGroups(AntiDepReg, 0);
1003 RegRefs.erase(AntiDepReg);
Dan Gohman26255ad2009-08-12 01:33:27 +00001004 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
1005 KillIndices[AntiDepReg] = ~0u;
1006 assert(((KillIndices[AntiDepReg] == ~0u) !=
1007 (DefIndices[AntiDepReg] == ~0u)) &&
1008 "Kill and Def maps aren't consistent for AntiDepReg!");
Dan Gohman21d90032008-11-25 00:52:40 +00001009
Dan Gohman26255ad2009-08-12 01:33:27 +00001010 Changed = true;
1011 LastNewReg[AntiDepReg] = NewReg;
David Goodwin480c5292009-10-20 19:54:44 +00001012 ++NumFixedAnti;
Dan Gohman21d90032008-11-25 00:52:40 +00001013 }
1014 }
1015
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001016 ScanInstruction(MI, Count);
Dan Gohman21d90032008-11-25 00:52:40 +00001017 }
Dan Gohman21d90032008-11-25 00:52:40 +00001018
1019 return Changed;
1020}
1021
David Goodwin5e411782009-09-03 22:15:25 +00001022/// StartBlockForKills - Initialize register live-range state for updating kills
1023///
1024void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
1025 // Initialize the indices to indicate that no registers are live.
1026 std::fill(KillIndices, array_endof(KillIndices), ~0u);
1027
1028 // Determine the live-out physregs for this block.
1029 if (!BB->empty() && BB->back().getDesc().isReturn()) {
1030 // In a return block, examine the function live-out regs.
1031 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
1032 E = MRI.liveout_end(); I != E; ++I) {
1033 unsigned Reg = *I;
1034 KillIndices[Reg] = BB->size();
1035 // Repeat, for all subregs.
1036 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1037 *Subreg; ++Subreg) {
1038 KillIndices[*Subreg] = BB->size();
1039 }
1040 }
1041 }
1042 else {
1043 // In a non-return block, examine the live-in regs of all successors.
1044 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
1045 SE = BB->succ_end(); SI != SE; ++SI) {
1046 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
1047 E = (*SI)->livein_end(); I != E; ++I) {
1048 unsigned Reg = *I;
1049 KillIndices[Reg] = BB->size();
1050 // Repeat, for all subregs.
1051 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1052 *Subreg; ++Subreg) {
1053 KillIndices[*Subreg] = BB->size();
1054 }
1055 }
1056 }
1057 }
1058}
1059
David Goodwin8f909342009-09-23 16:35:25 +00001060bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
1061 MachineOperand &MO) {
1062 // Setting kill flag...
1063 if (!MO.isKill()) {
1064 MO.setIsKill(true);
1065 return false;
1066 }
1067
1068 // If MO itself is live, clear the kill flag...
1069 if (KillIndices[MO.getReg()] != ~0u) {
1070 MO.setIsKill(false);
1071 return false;
1072 }
1073
1074 // If any subreg of MO is live, then create an imp-def for that
1075 // subreg and keep MO marked as killed.
Benjamin Kramer8bff4af2009-10-02 15:59:52 +00001076 MO.setIsKill(false);
David Goodwin8f909342009-09-23 16:35:25 +00001077 bool AllDead = true;
1078 const unsigned SuperReg = MO.getReg();
1079 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
1080 *Subreg; ++Subreg) {
1081 if (KillIndices[*Subreg] != ~0u) {
1082 MI->addOperand(MachineOperand::CreateReg(*Subreg,
1083 true /*IsDef*/,
1084 true /*IsImp*/,
1085 false /*IsKill*/,
1086 false /*IsDead*/));
1087 AllDead = false;
1088 }
1089 }
1090
David Goodwin480c5292009-10-20 19:54:44 +00001091 if (AllDead)
Benjamin Kramer8bff4af2009-10-02 15:59:52 +00001092 MO.setIsKill(true);
David Goodwin8f909342009-09-23 16:35:25 +00001093 return false;
1094}
1095
David Goodwin88a589c2009-08-25 17:03:05 +00001096/// FixupKills - Fix the register kill flags, they may have been made
1097/// incorrect by instruction reordering.
1098///
1099void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
1100 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
1101
1102 std::set<unsigned> killedRegs;
1103 BitVector ReservedRegs = TRI->getReservedRegs(MF);
David Goodwin5e411782009-09-03 22:15:25 +00001104
1105 StartBlockForKills(MBB);
David Goodwin7886cd82009-08-29 00:11:13 +00001106
1107 // Examine block from end to start...
David Goodwin88a589c2009-08-25 17:03:05 +00001108 unsigned Count = MBB->size();
1109 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
1110 I != E; --Count) {
1111 MachineInstr *MI = --I;
1112
David Goodwin7886cd82009-08-29 00:11:13 +00001113 // Update liveness. Registers that are defed but not used in this
1114 // instruction are now dead. Mark register and all subregs as they
1115 // are completely defined.
1116 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1117 MachineOperand &MO = MI->getOperand(i);
1118 if (!MO.isReg()) continue;
1119 unsigned Reg = MO.getReg();
1120 if (Reg == 0) continue;
1121 if (!MO.isDef()) continue;
1122 // Ignore two-addr defs.
1123 if (MI->isRegTiedToUseOperand(i)) continue;
1124
David Goodwin7886cd82009-08-29 00:11:13 +00001125 KillIndices[Reg] = ~0u;
1126
1127 // Repeat for all subregs.
1128 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1129 *Subreg; ++Subreg) {
1130 KillIndices[*Subreg] = ~0u;
1131 }
1132 }
David Goodwin88a589c2009-08-25 17:03:05 +00001133
David Goodwin8f909342009-09-23 16:35:25 +00001134 // Examine all used registers and set/clear kill flag. When a
1135 // register is used multiple times we only set the kill flag on
1136 // the first use.
David Goodwin88a589c2009-08-25 17:03:05 +00001137 killedRegs.clear();
1138 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1139 MachineOperand &MO = MI->getOperand(i);
1140 if (!MO.isReg() || !MO.isUse()) continue;
1141 unsigned Reg = MO.getReg();
1142 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1143
David Goodwin7886cd82009-08-29 00:11:13 +00001144 bool kill = false;
1145 if (killedRegs.find(Reg) == killedRegs.end()) {
1146 kill = true;
1147 // A register is not killed if any subregs are live...
1148 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1149 *Subreg; ++Subreg) {
1150 if (KillIndices[*Subreg] != ~0u) {
1151 kill = false;
1152 break;
1153 }
1154 }
1155
1156 // If subreg is not live, then register is killed if it became
1157 // live in this instruction
1158 if (kill)
1159 kill = (KillIndices[Reg] == ~0u);
1160 }
1161
David Goodwin88a589c2009-08-25 17:03:05 +00001162 if (MO.isKill() != kill) {
David Goodwin8f909342009-09-23 16:35:25 +00001163 bool removed = ToggleKillFlag(MI, MO);
1164 if (removed) {
1165 DEBUG(errs() << "Fixed <removed> in ");
1166 } else {
1167 DEBUG(errs() << "Fixed " << MO << " in ");
1168 }
David Goodwin88a589c2009-08-25 17:03:05 +00001169 DEBUG(MI->dump());
1170 }
David Goodwin7886cd82009-08-29 00:11:13 +00001171
David Goodwin88a589c2009-08-25 17:03:05 +00001172 killedRegs.insert(Reg);
1173 }
David Goodwin7886cd82009-08-29 00:11:13 +00001174
David Goodwina3251db2009-08-31 20:47:02 +00001175 // Mark any used register (that is not using undef) and subregs as
1176 // now live...
David Goodwin7886cd82009-08-29 00:11:13 +00001177 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1178 MachineOperand &MO = MI->getOperand(i);
David Goodwina3251db2009-08-31 20:47:02 +00001179 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
David Goodwin7886cd82009-08-29 00:11:13 +00001180 unsigned Reg = MO.getReg();
1181 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1182
David Goodwin7886cd82009-08-29 00:11:13 +00001183 KillIndices[Reg] = Count;
1184
1185 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1186 *Subreg; ++Subreg) {
1187 KillIndices[*Subreg] = Count;
1188 }
1189 }
David Goodwin88a589c2009-08-25 17:03:05 +00001190 }
1191}
1192
Dan Gohman343f0c02008-11-19 23:18:57 +00001193//===----------------------------------------------------------------------===//
1194// Top-Down Scheduling
1195//===----------------------------------------------------------------------===//
1196
1197/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1198/// the PendingQueue if the count reaches zero. Also update its cycle bound.
Dan Gohman54e4c362008-12-09 22:54:47 +00001199void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1200 SUnit *SuccSU = SuccEdge->getSUnit();
Reid Klecknerc277ab02009-09-30 20:15:38 +00001201
Dan Gohman343f0c02008-11-19 23:18:57 +00001202#ifndef NDEBUG
Reid Klecknerc277ab02009-09-30 20:15:38 +00001203 if (SuccSU->NumPredsLeft == 0) {
Chris Lattner103289e2009-08-23 07:19:13 +00001204 errs() << "*** Scheduling failed! ***\n";
Dan Gohman343f0c02008-11-19 23:18:57 +00001205 SuccSU->dump(this);
Chris Lattner103289e2009-08-23 07:19:13 +00001206 errs() << " has been released too many times!\n";
Torok Edwinc23197a2009-07-14 16:55:14 +00001207 llvm_unreachable(0);
Dan Gohman343f0c02008-11-19 23:18:57 +00001208 }
1209#endif
Reid Klecknerc277ab02009-09-30 20:15:38 +00001210 --SuccSU->NumPredsLeft;
1211
Dan Gohman343f0c02008-11-19 23:18:57 +00001212 // Compute how many cycles it will be before this actually becomes
1213 // available. This is the max of the start time of all predecessors plus
1214 // their latencies.
Dan Gohman3f237442008-12-16 03:25:46 +00001215 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
Dan Gohman343f0c02008-11-19 23:18:57 +00001216
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001217 // If all the node's predecessors are scheduled, this node is ready
1218 // to be scheduled. Ignore the special ExitSU node.
1219 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
Dan Gohman343f0c02008-11-19 23:18:57 +00001220 PendingQueue.push_back(SuccSU);
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001221}
1222
1223/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1224void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1225 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1226 I != E; ++I)
1227 ReleaseSucc(SU, &*I);
Dan Gohman343f0c02008-11-19 23:18:57 +00001228}
1229
1230/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1231/// count of its successors. If a successor pending count is zero, add it to
1232/// the Available queue.
1233void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
David Goodwin3a5f0d42009-08-11 01:44:26 +00001234 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
Dan Gohman343f0c02008-11-19 23:18:57 +00001235 DEBUG(SU->dump(this));
1236
1237 Sequence.push_back(SU);
Dan Gohman3f237442008-12-16 03:25:46 +00001238 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1239 SU->setDepthToAtLeast(CurCycle);
Dan Gohman343f0c02008-11-19 23:18:57 +00001240
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001241 ReleaseSuccessors(SU);
Dan Gohman343f0c02008-11-19 23:18:57 +00001242 SU->isScheduled = true;
1243 AvailableQueue.ScheduledNode(SU);
1244}
1245
1246/// ListScheduleTopDown - The main loop of list scheduling for top-down
1247/// schedulers.
1248void SchedulePostRATDList::ListScheduleTopDown() {
1249 unsigned CurCycle = 0;
1250
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001251 // Release any successors of the special Entry node.
1252 ReleaseSuccessors(&EntrySU);
1253
Dan Gohman343f0c02008-11-19 23:18:57 +00001254 // All leaves to Available queue.
1255 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1256 // It is available if it has no predecessors.
1257 if (SUnits[i].Preds.empty()) {
1258 AvailableQueue.push(&SUnits[i]);
1259 SUnits[i].isAvailable = true;
1260 }
1261 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001262
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001263 // In any cycle where we can't schedule any instructions, we must
1264 // stall or emit a noop, depending on the target.
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001265 bool CycleHasInsts = false;
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001266
Dan Gohman343f0c02008-11-19 23:18:57 +00001267 // While Available queue is not empty, grab the node with the highest
1268 // priority. If it is not ready put it back. Schedule the node.
Dan Gohman2836c282009-01-16 01:33:36 +00001269 std::vector<SUnit*> NotReady;
Dan Gohman343f0c02008-11-19 23:18:57 +00001270 Sequence.reserve(SUnits.size());
1271 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1272 // Check to see if any of the pending instructions are ready to issue. If
1273 // so, add them to the available queue.
Dan Gohman3f237442008-12-16 03:25:46 +00001274 unsigned MinDepth = ~0u;
Dan Gohman343f0c02008-11-19 23:18:57 +00001275 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
Dan Gohman3f237442008-12-16 03:25:46 +00001276 if (PendingQueue[i]->getDepth() <= CurCycle) {
Dan Gohman343f0c02008-11-19 23:18:57 +00001277 AvailableQueue.push(PendingQueue[i]);
1278 PendingQueue[i]->isAvailable = true;
1279 PendingQueue[i] = PendingQueue.back();
1280 PendingQueue.pop_back();
1281 --i; --e;
Dan Gohman3f237442008-12-16 03:25:46 +00001282 } else if (PendingQueue[i]->getDepth() < MinDepth)
1283 MinDepth = PendingQueue[i]->getDepth();
Dan Gohman343f0c02008-11-19 23:18:57 +00001284 }
David Goodwinc93d8372009-08-11 17:35:23 +00001285
David Goodwin7cd01182009-08-11 17:56:42 +00001286 DEBUG(errs() << "\n*** Examining Available\n";
1287 LatencyPriorityQueue q = AvailableQueue;
1288 while (!q.empty()) {
1289 SUnit *su = q.pop();
1290 errs() << "Height " << su->getHeight() << ": ";
1291 su->dump(this);
1292 });
David Goodwinc93d8372009-08-11 17:35:23 +00001293
Dan Gohman2836c282009-01-16 01:33:36 +00001294 SUnit *FoundSUnit = 0;
1295
1296 bool HasNoopHazards = false;
1297 while (!AvailableQueue.empty()) {
1298 SUnit *CurSUnit = AvailableQueue.pop();
1299
1300 ScheduleHazardRecognizer::HazardType HT =
1301 HazardRec->getHazardType(CurSUnit);
1302 if (HT == ScheduleHazardRecognizer::NoHazard) {
1303 FoundSUnit = CurSUnit;
1304 break;
1305 }
1306
1307 // Remember if this is a noop hazard.
1308 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1309
1310 NotReady.push_back(CurSUnit);
1311 }
1312
1313 // Add the nodes that aren't ready back onto the available list.
1314 if (!NotReady.empty()) {
1315 AvailableQueue.push_all(NotReady);
1316 NotReady.clear();
1317 }
1318
Dan Gohman343f0c02008-11-19 23:18:57 +00001319 // If we found a node to schedule, do it now.
1320 if (FoundSUnit) {
1321 ScheduleNodeTopDown(FoundSUnit, CurCycle);
Dan Gohman2836c282009-01-16 01:33:36 +00001322 HazardRec->EmitInstruction(FoundSUnit);
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001323 CycleHasInsts = true;
Dan Gohman343f0c02008-11-19 23:18:57 +00001324
David Goodwind94a4e52009-08-10 15:55:25 +00001325 // If we are using the target-specific hazards, then don't
1326 // advance the cycle time just because we schedule a node. If
1327 // the target allows it we can schedule multiple nodes in the
1328 // same cycle.
1329 if (!EnablePostRAHazardAvoidance) {
1330 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1331 ++CurCycle;
1332 }
Dan Gohman2836c282009-01-16 01:33:36 +00001333 } else {
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001334 if (CycleHasInsts) {
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001335 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1336 HazardRec->AdvanceCycle();
1337 } else if (!HasNoopHazards) {
1338 // Otherwise, we have a pipeline stall, but no other problem,
1339 // just advance the current cycle and try again.
1340 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1341 HazardRec->AdvanceCycle();
1342 ++NumStalls;
1343 } else {
1344 // Otherwise, we have no instructions to issue and we have instructions
1345 // that will fault if we don't do this right. This is the case for
1346 // processors without pipeline interlocks and other cases.
1347 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1348 HazardRec->EmitNoop();
1349 Sequence.push_back(0); // NULL here means noop
1350 ++NumNoops;
1351 }
1352
Dan Gohman2836c282009-01-16 01:33:36 +00001353 ++CurCycle;
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001354 CycleHasInsts = false;
Dan Gohman343f0c02008-11-19 23:18:57 +00001355 }
1356 }
1357
1358#ifndef NDEBUG
Dan Gohmana1e6d362008-11-20 01:26:25 +00001359 VerifySchedule(/*isBottomUp=*/false);
Dan Gohman343f0c02008-11-19 23:18:57 +00001360#endif
1361}
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001362
1363//===----------------------------------------------------------------------===//
1364// Public Constructor Functions
1365//===----------------------------------------------------------------------===//
1366
Evan Chengfa163542009-10-16 21:06:15 +00001367FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1368 return new PostRAScheduler(OptLevel);
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001369}