blob: 4da5496c079ec508cc8904cf071740a6ac7115b1 [file] [log] [blame]
Dale Johannesen72f15962007-07-13 17:31:29 +00001//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
David Goodwind94a4e52009-08-10 15:55:25 +000022#include "ExactHazardRecognizer.h"
23#include "SimpleHazardRecognizer.h"
Dan Gohman6dc75fe2009-02-06 17:12:10 +000024#include "ScheduleDAGInstrs.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000025#include "llvm/CodeGen/Passes.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000026#include "llvm/CodeGen/LatencyPriorityQueue.h"
27#include "llvm/CodeGen/SchedulerRegistry.h"
Dan Gohman3f237442008-12-16 03:25:46 +000028#include "llvm/CodeGen/MachineDominators.h"
David Goodwinc7951f82009-10-01 19:45:32 +000029#include "llvm/CodeGen/MachineFrameInfo.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000030#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohman3f237442008-12-16 03:25:46 +000031#include "llvm/CodeGen/MachineLoopInfo.h"
Dan Gohman21d90032008-11-25 00:52:40 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman2836c282009-01-16 01:33:36 +000033#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Dan Gohmana70dca12009-10-09 23:27:56 +000034#include "llvm/Analysis/AliasAnalysis.h"
Dan Gohmanbed353d2009-02-10 23:29:38 +000035#include "llvm/Target/TargetLowering.h"
Dan Gohman79ce2762009-01-15 19:20:50 +000036#include "llvm/Target/TargetMachine.h"
Dan Gohman21d90032008-11-25 00:52:40 +000037#include "llvm/Target/TargetInstrInfo.h"
38#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin0dad89f2009-09-30 00:10:16 +000039#include "llvm/Target/TargetSubtarget.h"
Chris Lattner459525d2008-01-14 19:00:06 +000040#include "llvm/Support/Compiler.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000041#include "llvm/Support/Debug.h"
Torok Edwinc25e7582009-07-11 20:10:48 +000042#include "llvm/Support/ErrorHandling.h"
David Goodwin3a5f0d42009-08-11 01:44:26 +000043#include "llvm/Support/raw_ostream.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000044#include "llvm/ADT/Statistic.h"
Dan Gohman21d90032008-11-25 00:52:40 +000045#include <map>
David Goodwin88a589c2009-08-25 17:03:05 +000046#include <set>
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000047using namespace llvm;
48
Dan Gohman2836c282009-01-16 01:33:36 +000049STATISTIC(NumNoops, "Number of noops inserted");
Dan Gohman343f0c02008-11-19 23:18:57 +000050STATISTIC(NumStalls, "Number of pipeline stalls");
51
David Goodwin471850a2009-10-01 21:46:35 +000052// Post-RA scheduling is enabled with
53// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
54// override the target.
55static cl::opt<bool>
56EnablePostRAScheduler("post-RA-scheduler",
57 cl::desc("Enable scheduling after register allocation"),
David Goodwin9843a932009-10-01 22:19:57 +000058 cl::init(false), cl::Hidden);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +000059static cl::opt<bool>
Dan Gohman21d90032008-11-25 00:52:40 +000060EnableAntiDepBreaking("break-anti-dependencies",
Dan Gohmanc1ae8c92009-10-21 01:44:44 +000061 cl::desc("Break post-RA scheduling anti-dependencies"),
62 cl::init(true), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000063static cl::opt<bool>
64EnablePostRAHazardAvoidance("avoid-hazards",
David Goodwind94a4e52009-08-10 15:55:25 +000065 cl::desc("Enable exact hazard avoidance"),
David Goodwin5e411782009-09-03 22:15:25 +000066 cl::init(true), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000067
David Goodwin1f152282009-09-01 18:34:03 +000068// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69static cl::opt<int>
70DebugDiv("postra-sched-debugdiv",
71 cl::desc("Debug control MBBs that are scheduled"),
72 cl::init(0), cl::Hidden);
73static cl::opt<int>
74DebugMod("postra-sched-debugmod",
75 cl::desc("Debug control MBBs that are scheduled"),
76 cl::init(0), cl::Hidden);
77
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000078namespace {
Dan Gohman343f0c02008-11-19 23:18:57 +000079 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
Dan Gohmana70dca12009-10-09 23:27:56 +000080 AliasAnalysis *AA;
Evan Chengfa163542009-10-16 21:06:15 +000081 CodeGenOpt::Level OptLevel;
Dan Gohmana70dca12009-10-09 23:27:56 +000082
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000083 public:
84 static char ID;
Evan Chengfa163542009-10-16 21:06:15 +000085 PostRAScheduler(CodeGenOpt::Level ol) :
86 MachineFunctionPass(&ID), OptLevel(ol) {}
Dan Gohman21d90032008-11-25 00:52:40 +000087
Dan Gohman3f237442008-12-16 03:25:46 +000088 void getAnalysisUsage(AnalysisUsage &AU) const {
Dan Gohman845012e2009-07-31 23:37:33 +000089 AU.setPreservesCFG();
Dan Gohmana70dca12009-10-09 23:27:56 +000090 AU.addRequired<AliasAnalysis>();
Dan Gohman3f237442008-12-16 03:25:46 +000091 AU.addRequired<MachineDominatorTree>();
92 AU.addPreserved<MachineDominatorTree>();
93 AU.addRequired<MachineLoopInfo>();
94 AU.addPreserved<MachineLoopInfo>();
95 MachineFunctionPass::getAnalysisUsage(AU);
96 }
97
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000098 const char *getPassName() const {
Dan Gohman21d90032008-11-25 00:52:40 +000099 return "Post RA top-down list latency scheduler";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000100 }
101
102 bool runOnMachineFunction(MachineFunction &Fn);
103 };
Dan Gohman343f0c02008-11-19 23:18:57 +0000104 char PostRAScheduler::ID = 0;
105
106 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
Dan Gohman343f0c02008-11-19 23:18:57 +0000107 /// AvailableQueue - The priority queue to use for the available SUnits.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000108 ///
Dan Gohman343f0c02008-11-19 23:18:57 +0000109 LatencyPriorityQueue AvailableQueue;
110
111 /// PendingQueue - This contains all of the instructions whose operands have
112 /// been issued, but their results are not ready yet (due to the latency of
113 /// the operation). Once the operands becomes available, the instruction is
114 /// added to the AvailableQueue.
115 std::vector<SUnit*> PendingQueue;
116
Dan Gohman21d90032008-11-25 00:52:40 +0000117 /// Topo - A topological ordering for SUnits.
118 ScheduleDAGTopologicalSort Topo;
Dan Gohman343f0c02008-11-19 23:18:57 +0000119
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000120 /// AllocatableSet - The set of allocatable registers.
121 /// We'll be ignoring anti-dependencies on non-allocatable registers,
122 /// because they may not be safe to break.
123 const BitVector AllocatableSet;
124
Dan Gohman2836c282009-01-16 01:33:36 +0000125 /// HazardRec - The hazard recognizer to use.
126 ScheduleHazardRecognizer *HazardRec;
127
Dan Gohmana70dca12009-10-09 23:27:56 +0000128 /// AA - AliasAnalysis for making memory reference queries.
129 AliasAnalysis *AA;
130
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000131 /// Classes - For live regs that are only used in one register class in a
132 /// live range, the register class. If the register is not live, the
133 /// corresponding value is null. If the register is live but used in
134 /// multiple register classes, the corresponding value is -1 casted to a
135 /// pointer.
136 const TargetRegisterClass *
137 Classes[TargetRegisterInfo::FirstVirtualRegister];
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000138
139 /// RegRegs - Map registers to all their references within a live range.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000140 std::multimap<unsigned, MachineOperand *> RegRefs;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000141
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000142 /// KillIndices - The index of the most recent kill (proceding bottom-up),
143 /// or ~0u if the register is not live.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000144 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
145
Evan Cheng714e8bc2009-10-01 08:26:23 +0000146 /// DefIndices - The index of the most recent complete def (proceding bottom
147 /// up), or ~0u if the register is live.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000148 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
149
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000150 /// KeepRegs - A set of registers which are live and cannot be changed to
151 /// break anti-dependencies.
152 SmallSet<unsigned, 4> KeepRegs;
153
Dan Gohman21d90032008-11-25 00:52:40 +0000154 public:
Dan Gohman79ce2762009-01-15 19:20:50 +0000155 SchedulePostRATDList(MachineFunction &MF,
Dan Gohman3f237442008-12-16 03:25:46 +0000156 const MachineLoopInfo &MLI,
Dan Gohman2836c282009-01-16 01:33:36 +0000157 const MachineDominatorTree &MDT,
Dan Gohmana70dca12009-10-09 23:27:56 +0000158 ScheduleHazardRecognizer *HR,
159 AliasAnalysis *aa)
Dan Gohman79ce2762009-01-15 19:20:50 +0000160 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000161 AllocatableSet(TRI->getAllocatableSet(MF)),
162 HazardRec(HR), AA(aa) {}
Dan Gohman2836c282009-01-16 01:33:36 +0000163
164 ~SchedulePostRATDList() {
165 delete HazardRec;
166 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000167
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000168 /// StartBlock - Initialize register live-range state for scheduling in
169 /// this block.
170 ///
171 void StartBlock(MachineBasicBlock *BB);
172
173 /// Schedule - Schedule the instruction range using list scheduling.
174 ///
Dan Gohman343f0c02008-11-19 23:18:57 +0000175 void Schedule();
David Goodwin88a589c2009-08-25 17:03:05 +0000176
177 /// FixupKills - Fix register kill flags that have been made
178 /// invalid due to scheduling
179 ///
180 void FixupKills(MachineBasicBlock *MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000181
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000182 /// Observe - Update liveness information to account for the current
183 /// instruction, which will not be scheduled.
184 ///
185 void Observe(MachineInstr *MI, unsigned Count);
186
187 /// FinishBlock - Clean up register live-range state.
188 ///
189 void FinishBlock();
190
Dan Gohman343f0c02008-11-19 23:18:57 +0000191 private:
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000192 void PrescanInstruction(MachineInstr *MI);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000193 void ScanInstruction(MachineInstr *MI, unsigned Count);
Dan Gohman54e4c362008-12-09 22:54:47 +0000194 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000195 void ReleaseSuccessors(SUnit *SU);
Dan Gohman343f0c02008-11-19 23:18:57 +0000196 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
197 void ListScheduleTopDown();
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000198 bool BreakAntiDependencies();
199 unsigned findSuitableFreeRegister(unsigned AntiDepReg,
200 unsigned LastNewReg,
201 const TargetRegisterClass *);
David Goodwin5e411782009-09-03 22:15:25 +0000202 void StartBlockForKills(MachineBasicBlock *BB);
David Goodwin8f909342009-09-23 16:35:25 +0000203
204 // ToggleKillFlag - Toggle a register operand kill flag. Other
205 // adjustments may be made to the instruction if necessary. Return
206 // true if the operand has been deleted, false if not.
207 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
Dan Gohman343f0c02008-11-19 23:18:57 +0000208 };
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000209}
210
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000211/// isSchedulingBoundary - Test if the given instruction should be
212/// considered a scheduling boundary. This primarily includes labels
213/// and terminators.
214///
215static bool isSchedulingBoundary(const MachineInstr *MI,
216 const MachineFunction &MF) {
217 // Terminators and labels can't be scheduled around.
218 if (MI->getDesc().isTerminator() || MI->isLabel())
219 return true;
220
Dan Gohmanbed353d2009-02-10 23:29:38 +0000221 // Don't attempt to schedule around any instruction that modifies
222 // a stack-oriented pointer, as it's unlikely to be profitable. This
223 // saves compile time, because it doesn't require every single
224 // stack slot reference to depend on the instruction that does the
225 // modification.
226 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
227 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
228 return true;
229
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000230 return false;
231}
232
Dan Gohman343f0c02008-11-19 23:18:57 +0000233bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
Dan Gohman5bf7c2a2009-10-10 00:15:38 +0000234 AA = &getAnalysis<AliasAnalysis>();
235
David Goodwin471850a2009-10-01 21:46:35 +0000236 // Check for explicit enable/disable of post-ra scheduling.
237 if (EnablePostRAScheduler.getPosition() > 0) {
238 if (!EnablePostRAScheduler)
Evan Chengc83da2f92009-10-16 06:10:34 +0000239 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000240 } else {
Evan Chengc83da2f92009-10-16 06:10:34 +0000241 // Check that post-RA scheduling is enabled for this target.
David Goodwin471850a2009-10-01 21:46:35 +0000242 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
Evan Chengfa163542009-10-16 21:06:15 +0000243 if (!ST.enablePostRAScheduler(OptLevel))
Evan Chengc83da2f92009-10-16 06:10:34 +0000244 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000245 }
David Goodwin0dad89f2009-09-30 00:10:16 +0000246
David Goodwin3a5f0d42009-08-11 01:44:26 +0000247 DEBUG(errs() << "PostRAScheduler\n");
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000248
Dan Gohman3f237442008-12-16 03:25:46 +0000249 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
250 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
David Goodwind94a4e52009-08-10 15:55:25 +0000251 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
Dan Gohman2836c282009-01-16 01:33:36 +0000252 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
David Goodwind94a4e52009-08-10 15:55:25 +0000253 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
254 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
Dan Gohman3f237442008-12-16 03:25:46 +0000255
Dan Gohmana70dca12009-10-09 23:27:56 +0000256 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA);
Dan Gohman79ce2762009-01-15 19:20:50 +0000257
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000258 // Loop over all of the basic blocks
259 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
Dan Gohman343f0c02008-11-19 23:18:57 +0000260 MBB != MBBe; ++MBB) {
David Goodwin1f152282009-09-01 18:34:03 +0000261#ifndef NDEBUG
262 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
263 if (DebugDiv > 0) {
264 static int bbcnt = 0;
265 if (bbcnt++ % DebugDiv != DebugMod)
266 continue;
267 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
268 ":MBB ID#" << MBB->getNumber() << " ***\n";
269 }
270#endif
271
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000272 // Initialize register live-range state for scheduling in this block.
273 Scheduler.StartBlock(MBB);
274
Dan Gohmanf7119392009-01-16 22:10:20 +0000275 // Schedule each sequence of instructions not interrupted by a label
276 // or anything else that effectively needs to shut down scheduling.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000277 MachineBasicBlock::iterator Current = MBB->end();
Dan Gohman47ac0f02009-02-11 04:27:20 +0000278 unsigned Count = MBB->size(), CurrentCount = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000279 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
280 MachineInstr *MI = prior(I);
281 if (isSchedulingBoundary(MI, Fn)) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000282 Scheduler.Run(MBB, I, Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000283 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000284 Current = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000285 CurrentCount = Count - 1;
Dan Gohman1274ced2009-03-10 18:10:43 +0000286 Scheduler.Observe(MI, CurrentCount);
Dan Gohmanf7119392009-01-16 22:10:20 +0000287 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000288 I = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000289 --Count;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000290 }
Dan Gohman47ac0f02009-02-11 04:27:20 +0000291 assert(Count == 0 && "Instruction count mismatch!");
Duncan Sands9e8bd0b2009-03-11 09:04:34 +0000292 assert((MBB->begin() == Current || CurrentCount != 0) &&
Dan Gohman1274ced2009-03-10 18:10:43 +0000293 "Instruction count mismatch!");
294 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000295 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000296
297 // Clean up register live-range state.
298 Scheduler.FinishBlock();
David Goodwin88a589c2009-08-25 17:03:05 +0000299
David Goodwin5e411782009-09-03 22:15:25 +0000300 // Update register kills
David Goodwin88a589c2009-08-25 17:03:05 +0000301 Scheduler.FixupKills(MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000302 }
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000303
304 return true;
305}
306
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000307/// StartBlock - Initialize register live-range state for scheduling in
308/// this block.
Dan Gohman21d90032008-11-25 00:52:40 +0000309///
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000310void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
311 // Call the superclass.
312 ScheduleDAGInstrs::StartBlock(BB);
Dan Gohman21d90032008-11-25 00:52:40 +0000313
David Goodwind94a4e52009-08-10 15:55:25 +0000314 // Reset the hazard recognizer.
315 HazardRec->Reset();
316
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000317 // Clear out the register class data.
318 std::fill(Classes, array_endof(Classes),
319 static_cast<const TargetRegisterClass *>(0));
Dan Gohman21d90032008-11-25 00:52:40 +0000320
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000321 // Initialize the indices to indicate that no registers are live.
Dan Gohman6c3643c2008-12-19 22:23:43 +0000322 std::fill(KillIndices, array_endof(KillIndices), ~0u);
Dan Gohman21d90032008-11-25 00:52:40 +0000323 std::fill(DefIndices, array_endof(DefIndices), BB->size());
324
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000325 // Clear "do not change" set.
326 KeepRegs.clear();
327
David Goodwin63bcbb72009-10-01 23:28:47 +0000328 bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
329
Dan Gohman21d90032008-11-25 00:52:40 +0000330 // Determine the live-out physregs for this block.
David Goodwin63bcbb72009-10-01 23:28:47 +0000331 if (IsReturnBlock) {
Dan Gohman21d90032008-11-25 00:52:40 +0000332 // In a return block, examine the function live-out regs.
333 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
334 E = MRI.liveout_end(); I != E; ++I) {
335 unsigned Reg = *I;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000336 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000337 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000338 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000339 // Repeat, for all aliases.
340 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
341 unsigned AliasReg = *Alias;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000342 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000343 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000344 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000345 }
346 }
David Goodwinc7951f82009-10-01 19:45:32 +0000347 } else {
Dan Gohman21d90032008-11-25 00:52:40 +0000348 // In a non-return block, examine the live-in regs of all successors.
349 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
Dan Gohman47ac0f02009-02-11 04:27:20 +0000350 SE = BB->succ_end(); SI != SE; ++SI)
Dan Gohman21d90032008-11-25 00:52:40 +0000351 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
352 E = (*SI)->livein_end(); I != E; ++I) {
353 unsigned Reg = *I;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000354 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000355 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000356 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000357 // Repeat, for all aliases.
358 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
359 unsigned AliasReg = *Alias;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000360 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000361 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000362 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000363 }
364 }
David Goodwin63bcbb72009-10-01 23:28:47 +0000365 }
Dan Gohman21d90032008-11-25 00:52:40 +0000366
David Goodwin63bcbb72009-10-01 23:28:47 +0000367 // Mark live-out callee-saved registers. In a return block this is
368 // all callee-saved registers. In non-return this is any
369 // callee-saved register that is not saved in the prolog.
370 const MachineFrameInfo *MFI = MF.getFrameInfo();
371 BitVector Pristine = MFI->getPristineRegs(BB);
372 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
373 unsigned Reg = *I;
374 if (!IsReturnBlock && !Pristine.test(Reg)) continue;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000375 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
David Goodwin63bcbb72009-10-01 23:28:47 +0000376 KillIndices[Reg] = BB->size();
377 DefIndices[Reg] = ~0u;
378 // Repeat, for all aliases.
379 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
380 unsigned AliasReg = *Alias;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000381 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
David Goodwin63bcbb72009-10-01 23:28:47 +0000382 KillIndices[AliasReg] = BB->size();
383 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000384 }
385 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000386}
387
388/// Schedule - Schedule the instruction range using list scheduling.
389///
390void SchedulePostRATDList::Schedule() {
David Goodwin3a5f0d42009-08-11 01:44:26 +0000391 DEBUG(errs() << "********** List Scheduling **********\n");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000392
393 // Build the scheduling graph.
Dan Gohmana70dca12009-10-09 23:27:56 +0000394 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000395
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000396 if (EnableAntiDepBreaking) {
397 if (BreakAntiDependencies()) {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000398 // We made changes. Update the dependency graph.
399 // Theoretically we could update the graph in place:
400 // When a live range is changed to use a different register, remove
401 // the def's anti-dependence *and* output-dependence edges due to
402 // that register, and add new anti-dependence and output-dependence
403 // edges based on the next live range of the register.
404 SUnits.clear();
405 EntrySU = SUnit();
406 ExitSU = SUnit();
Dan Gohmana70dca12009-10-09 23:27:56 +0000407 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000408 }
409 }
410
David Goodwind94a4e52009-08-10 15:55:25 +0000411 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
412 SUnits[su].dumpAll(this));
413
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000414 AvailableQueue.initNodes(SUnits);
415
416 ListScheduleTopDown();
417
418 AvailableQueue.releaseState();
419}
420
421/// Observe - Update liveness information to account for the current
422/// instruction, which will not be scheduled.
423///
Dan Gohman47ac0f02009-02-11 04:27:20 +0000424void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000425 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
426
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000427 // Any register which was defined within the previous scheduling region
428 // may have been rescheduled and its lifetime may overlap with registers
429 // in ways not reflected in our current liveness state. For each such
430 // register, adjust the liveness state to be conservatively correct.
431 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
432 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
433 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
434 // Mark this register to be non-renamable.
435 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
436 // Move the def index to the end of the previous region, to reflect
437 // that the def could theoretically have been scheduled at the end.
438 DefIndices[Reg] = InsertPosIndex;
David Goodwin480c5292009-10-20 19:54:44 +0000439 }
David Goodwin480c5292009-10-20 19:54:44 +0000440
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000441 PrescanInstruction(MI);
Dan Gohman47ac0f02009-02-11 04:27:20 +0000442 ScanInstruction(MI, Count);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000443}
444
445/// FinishBlock - Clean up register live-range state.
446///
447void SchedulePostRATDList::FinishBlock() {
448 RegRefs.clear();
449
450 // Call the superclass.
451 ScheduleDAGInstrs::FinishBlock();
452}
453
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000454/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
455/// critical path.
456static SDep *CriticalPathStep(SUnit *SU) {
457 SDep *Next = 0;
458 unsigned NextDepth = 0;
459 // Find the predecessor edge with the greatest depth.
460 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
461 P != PE; ++P) {
462 SUnit *PredSU = P->getSUnit();
463 unsigned PredLatency = P->getLatency();
464 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
465 // In the case of a latency tie, prefer an anti-dependency edge over
466 // other types of edges.
467 if (NextDepth < PredTotalLatency ||
468 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
469 NextDepth = PredTotalLatency;
470 Next = &*P;
471 }
472 }
473 return Next;
474}
475
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000476void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
477 // Scan the register operands for this instruction and update
478 // Classes and RegRefs.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000479 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
480 MachineOperand &MO = MI->getOperand(i);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000481 if (!MO.isReg()) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000482 unsigned Reg = MO.getReg();
483 if (Reg == 0) continue;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000484 const TargetRegisterClass *NewRC = 0;
485
486 if (i < MI->getDesc().getNumOperands())
487 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000488
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000489 // For now, only allow the register to be changed if its register
490 // class is consistent across all uses.
491 if (!Classes[Reg] && NewRC)
492 Classes[Reg] = NewRC;
493 else if (!NewRC || Classes[Reg] != NewRC)
494 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
David Goodwin480c5292009-10-20 19:54:44 +0000495
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000496 // Now check for aliases.
497 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
498 // If an alias of the reg is used during the live range, give up.
499 // Note that this allows us to skip checking if AntiDepReg
500 // overlaps with any of the aliases, among other things.
501 unsigned AliasReg = *Alias;
502 if (Classes[AliasReg]) {
503 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
504 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000505 }
506 }
David Goodwin480c5292009-10-20 19:54:44 +0000507
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000508 // If we're still willing to consider this register, note the reference.
509 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
510 RegRefs.insert(std::make_pair(Reg, &MO));
511
512 // It's not safe to change register allocation for source operands of
513 // that have special allocation requirements.
514 if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
515 if (KeepRegs.insert(Reg)) {
516 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
517 *Subreg; ++Subreg)
518 KeepRegs.insert(*Subreg);
519 }
520 }
521 }
David Goodwin480c5292009-10-20 19:54:44 +0000522}
523
524void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
525 unsigned Count) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000526 // Update liveness.
527 // Proceding upwards, registers that are defed but not used in this
528 // instruction are now dead.
David Goodwin480c5292009-10-20 19:54:44 +0000529 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
530 MachineOperand &MO = MI->getOperand(i);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000531 if (!MO.isReg()) continue;
David Goodwin480c5292009-10-20 19:54:44 +0000532 unsigned Reg = MO.getReg();
533 if (Reg == 0) continue;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000534 if (!MO.isDef()) continue;
535 // Ignore two-addr defs.
536 if (MI->isRegTiedToUseOperand(i)) continue;
David Goodwin7441d142009-10-20 22:50:43 +0000537
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000538 DefIndices[Reg] = Count;
539 KillIndices[Reg] = ~0u;
540 assert(((KillIndices[Reg] == ~0u) !=
541 (DefIndices[Reg] == ~0u)) &&
542 "Kill and Def maps aren't consistent for Reg!");
543 KeepRegs.erase(Reg);
544 Classes[Reg] = 0;
545 RegRefs.erase(Reg);
546 // Repeat, for all subregs.
David Goodwin480c5292009-10-20 19:54:44 +0000547 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
548 *Subreg; ++Subreg) {
549 unsigned SubregReg = *Subreg;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000550 DefIndices[SubregReg] = Count;
551 KillIndices[SubregReg] = ~0u;
552 KeepRegs.erase(SubregReg);
553 Classes[SubregReg] = 0;
554 RegRefs.erase(SubregReg);
555 }
556 // Conservatively mark super-registers as unusable.
557 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
558 *Super; ++Super) {
559 unsigned SuperReg = *Super;
560 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
561 }
562 }
563 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
564 MachineOperand &MO = MI->getOperand(i);
565 if (!MO.isReg()) continue;
566 unsigned Reg = MO.getReg();
567 if (Reg == 0) continue;
568 if (!MO.isUse()) continue;
569
570 const TargetRegisterClass *NewRC = 0;
571 if (i < MI->getDesc().getNumOperands())
572 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
573
574 // For now, only allow the register to be changed if its register
575 // class is consistent across all uses.
576 if (!Classes[Reg] && NewRC)
577 Classes[Reg] = NewRC;
578 else if (!NewRC || Classes[Reg] != NewRC)
579 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
580
581 RegRefs.insert(std::make_pair(Reg, &MO));
582
583 // It wasn't previously live but now it is, this is a kill.
584 if (KillIndices[Reg] == ~0u) {
585 KillIndices[Reg] = Count;
586 DefIndices[Reg] = ~0u;
587 assert(((KillIndices[Reg] == ~0u) !=
588 (DefIndices[Reg] == ~0u)) &&
589 "Kill and Def maps aren't consistent for Reg!");
590 }
591 // Repeat, for all aliases.
592 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
593 unsigned AliasReg = *Alias;
594 if (KillIndices[AliasReg] == ~0u) {
595 KillIndices[AliasReg] = Count;
596 DefIndices[AliasReg] = ~0u;
David Goodwin480c5292009-10-20 19:54:44 +0000597 }
598 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000599 }
600}
601
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000602unsigned
603SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
604 unsigned LastNewReg,
605 const TargetRegisterClass *RC) {
606 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
607 RE = RC->allocation_order_end(MF); R != RE; ++R) {
608 unsigned NewReg = *R;
609 // Don't replace a register with itself.
610 if (NewReg == AntiDepReg) continue;
611 // Don't replace a register with one that was recently used to repair
612 // an anti-dependence with this AntiDepReg, because that would
613 // re-introduce that anti-dependence.
614 if (NewReg == LastNewReg) continue;
615 // If NewReg is dead and NewReg's most recent def is not before
616 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
617 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
618 "Kill and Def maps aren't consistent for AntiDepReg!");
619 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
620 "Kill and Def maps aren't consistent for NewReg!");
621 if (KillIndices[NewReg] != ~0u ||
622 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
623 KillIndices[AntiDepReg] > DefIndices[NewReg])
624 continue;
625 return NewReg;
Dan Gohman26255ad2009-08-12 01:33:27 +0000626 }
627
628 // No registers are free and available!
629 return 0;
630}
631
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000632/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
633/// of the ScheduleDAG and break them by renaming registers.
634///
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000635bool SchedulePostRATDList::BreakAntiDependencies() {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000636 // The code below assumes that there is at least one instruction,
637 // so just duck out immediately if the block is empty.
638 if (SUnits.empty()) return false;
639
David Goodwin480c5292009-10-20 19:54:44 +0000640 // Find the node at the bottom of the critical path.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000641 SUnit *Max = 0;
642 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
643 SUnit *SU = &SUnits[i];
644 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
645 Max = SU;
David Goodwin480c5292009-10-20 19:54:44 +0000646 }
647
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000648#ifndef NDEBUG
David Goodwin480c5292009-10-20 19:54:44 +0000649 {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000650 DEBUG(errs() << "Critical path has total latency "
651 << (Max->getDepth() + Max->Latency) << "\n");
David Goodwind452ea62009-10-13 19:16:03 +0000652 DEBUG(errs() << "Available regs:");
653 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000654 if (KillIndices[Reg] == ~0u)
David Goodwind452ea62009-10-13 19:16:03 +0000655 DEBUG(errs() << " " << TRI->getName(Reg));
656 }
657 DEBUG(errs() << '\n');
658 }
659#endif
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000660
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000661 // Track progress along the critical path through the SUnit graph as we walk
662 // the instructions.
663 SUnit *CriticalPathSU = Max;
664 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
665
666 // Consider this pattern:
667 // A = ...
668 // ... = A
669 // A = ...
670 // ... = A
671 // A = ...
672 // ... = A
673 // A = ...
674 // ... = A
675 // There are three anti-dependencies here, and without special care,
676 // we'd break all of them using the same register:
677 // A = ...
678 // ... = A
679 // B = ...
680 // ... = B
681 // B = ...
682 // ... = B
683 // B = ...
684 // ... = B
685 // because at each anti-dependence, B is the first register that
686 // isn't A which is free. This re-introduces anti-dependencies
687 // at all but one of the original anti-dependencies that we were
688 // trying to break. To avoid this, keep track of the most recent
689 // register that each register was replaced with, avoid
690 // using it to repair an anti-dependence on the same register.
691 // This lets us produce this:
692 // A = ...
693 // ... = A
694 // B = ...
695 // ... = B
696 // C = ...
697 // ... = C
698 // B = ...
699 // ... = B
700 // This still has an anti-dependence on B, but at least it isn't on the
701 // original critical path.
702 //
703 // TODO: If we tracked more than one register here, we could potentially
704 // fix that remaining critical edge too. This is a little more involved,
705 // because unlike the most recent register, less recent registers should
706 // still be considered, though only if no other registers are available.
707 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
708
709 // Attempt to break anti-dependence edges on the critical path. Walk the
710 // instructions from the bottom up, tracking information about liveness
711 // as we go to help determine which registers are available.
Dan Gohman21d90032008-11-25 00:52:40 +0000712 bool Changed = false;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000713 unsigned Count = InsertPosIndex - 1;
714 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000715 I != E; --Count) {
716 MachineInstr *MI = --I;
Dan Gohman21d90032008-11-25 00:52:40 +0000717
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000718 // Check if this instruction has a dependence on the critical path that
719 // is an anti-dependence that we may be able to break. If it is, set
720 // AntiDepReg to the non-zero register associated with the anti-dependence.
Dan Gohman00dc84a2008-12-16 19:27:52 +0000721 //
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000722 // We limit our attention to the critical path as a heuristic to avoid
Dan Gohman00dc84a2008-12-16 19:27:52 +0000723 // breaking anti-dependence edges that aren't going to significantly
724 // impact the overall schedule. There are a limited number of registers
725 // and we want to save them for the important edges.
726 //
727 // TODO: Instructions with multiple defs could have multiple
728 // anti-dependencies. The current code here only knows how to break one
729 // edge per instruction. Note that we'd have to be able to break all of
730 // the anti-dependencies in an instruction in order to be effective.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000731 unsigned AntiDepReg = 0;
732 if (MI == CriticalPathMI) {
733 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000734 SUnit *NextSU = Edge->getSUnit();
735
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000736 // Only consider anti-dependence edges.
737 if (Edge->getKind() == SDep::Anti) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000738 AntiDepReg = Edge->getReg();
739 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000740 if (!AllocatableSet.test(AntiDepReg))
Evan Cheng714e8bc2009-10-01 08:26:23 +0000741 // Don't break anti-dependencies on non-allocatable registers.
742 AntiDepReg = 0;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000743 else if (KeepRegs.count(AntiDepReg))
744 // Don't break anti-dependencies if an use down below requires
745 // this exact register.
746 AntiDepReg = 0;
747 else {
748 // If the SUnit has other dependencies on the SUnit that it
749 // anti-depends on, don't bother breaking the anti-dependency
750 // since those edges would prevent such units from being
751 // scheduled past each other regardless.
752 //
753 // Also, if there are dependencies on other SUnits with the
754 // same register as the anti-dependency, don't attempt to
755 // break it.
756 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
757 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
758 if (P->getSUnit() == NextSU ?
Dan Gohman00dc84a2008-12-16 19:27:52 +0000759 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
760 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000761 AntiDepReg = 0;
762 break;
Dan Gohman00dc84a2008-12-16 19:27:52 +0000763 }
764 }
765 }
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000766 CriticalPathSU = NextSU;
767 CriticalPathMI = CriticalPathSU->getInstr();
Dan Gohman00dc84a2008-12-16 19:27:52 +0000768 } else {
769 // We've reached the end of the critical path.
770 CriticalPathSU = 0;
771 CriticalPathMI = 0;
772 }
773 }
Dan Gohman21d90032008-11-25 00:52:40 +0000774
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000775 PrescanInstruction(MI);
776
777 if (MI->getDesc().hasExtraDefRegAllocReq())
778 // If this instruction's defs have special allocation requirement, don't
779 // break this anti-dependency.
Evan Cheng714e8bc2009-10-01 08:26:23 +0000780 AntiDepReg = 0;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000781 else if (AntiDepReg) {
782 // If this instruction has a use of AntiDepReg, breaking it
783 // is invalid.
784 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
785 MachineOperand &MO = MI->getOperand(i);
786 if (!MO.isReg()) continue;
787 unsigned Reg = MO.getReg();
788 if (Reg == 0) continue;
789 if (MO.isUse() && AntiDepReg == Reg) {
790 AntiDepReg = 0;
791 break;
792 }
793 }
Dan Gohman21d90032008-11-25 00:52:40 +0000794 }
795
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000796 // Determine AntiDepReg's register class, if it is live and is
797 // consistently used within a single class.
798 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
799 assert((AntiDepReg == 0 || RC != NULL) &&
800 "Register should be live if it's causing an anti-dependence!");
801 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
802 AntiDepReg = 0;
Dan Gohman21d90032008-11-25 00:52:40 +0000803
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000804 // Look for a suitable register to use to break the anti-depenence.
805 //
806 // TODO: Instead of picking the first free register, consider which might
807 // be the best.
Dan Gohman21d90032008-11-25 00:52:40 +0000808 if (AntiDepReg != 0) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000809 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
810 LastNewReg[AntiDepReg],
811 RC)) {
812 DEBUG(errs() << "Breaking anti-dependence edge on "
Dan Gohman26255ad2009-08-12 01:33:27 +0000813 << TRI->getName(AntiDepReg)
814 << " with " << RegRefs.count(AntiDepReg) << " references"
815 << " using " << TRI->getName(NewReg) << "!\n");
Dan Gohman21d90032008-11-25 00:52:40 +0000816
Dan Gohman26255ad2009-08-12 01:33:27 +0000817 // Update the references to the old register to refer to the new
818 // register.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000819 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
820 std::multimap<unsigned, MachineOperand *>::iterator>
Dan Gohman26255ad2009-08-12 01:33:27 +0000821 Range = RegRefs.equal_range(AntiDepReg);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000822 for (std::multimap<unsigned, MachineOperand *>::iterator
Dan Gohman26255ad2009-08-12 01:33:27 +0000823 Q = Range.first, QE = Range.second; Q != QE; ++Q)
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000824 Q->second->setReg(NewReg);
Dan Gohman21d90032008-11-25 00:52:40 +0000825
Dan Gohman26255ad2009-08-12 01:33:27 +0000826 // We just went back in time and modified history; the
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000827 // liveness information for the anti-depenence reg is now
Dan Gohman26255ad2009-08-12 01:33:27 +0000828 // inconsistent. Set the state as if it were dead.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000829 Classes[NewReg] = Classes[AntiDepReg];
Dan Gohman26255ad2009-08-12 01:33:27 +0000830 DefIndices[NewReg] = DefIndices[AntiDepReg];
831 KillIndices[NewReg] = KillIndices[AntiDepReg];
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000832 assert(((KillIndices[NewReg] == ~0u) !=
833 (DefIndices[NewReg] == ~0u)) &&
834 "Kill and Def maps aren't consistent for NewReg!");
Dan Gohman21d90032008-11-25 00:52:40 +0000835
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000836 Classes[AntiDepReg] = 0;
Dan Gohman26255ad2009-08-12 01:33:27 +0000837 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
838 KillIndices[AntiDepReg] = ~0u;
839 assert(((KillIndices[AntiDepReg] == ~0u) !=
840 (DefIndices[AntiDepReg] == ~0u)) &&
841 "Kill and Def maps aren't consistent for AntiDepReg!");
Dan Gohman21d90032008-11-25 00:52:40 +0000842
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000843 RegRefs.erase(AntiDepReg);
Dan Gohman26255ad2009-08-12 01:33:27 +0000844 Changed = true;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000845 LastNewReg[AntiDepReg] = NewReg;
Dan Gohman21d90032008-11-25 00:52:40 +0000846 }
847 }
848
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000849 ScanInstruction(MI, Count);
Dan Gohman21d90032008-11-25 00:52:40 +0000850 }
Dan Gohman21d90032008-11-25 00:52:40 +0000851
852 return Changed;
853}
854
David Goodwin5e411782009-09-03 22:15:25 +0000855/// StartBlockForKills - Initialize register live-range state for updating kills
856///
857void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
858 // Initialize the indices to indicate that no registers are live.
859 std::fill(KillIndices, array_endof(KillIndices), ~0u);
860
861 // Determine the live-out physregs for this block.
862 if (!BB->empty() && BB->back().getDesc().isReturn()) {
863 // In a return block, examine the function live-out regs.
864 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
865 E = MRI.liveout_end(); I != E; ++I) {
866 unsigned Reg = *I;
867 KillIndices[Reg] = BB->size();
868 // Repeat, for all subregs.
869 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
870 *Subreg; ++Subreg) {
871 KillIndices[*Subreg] = BB->size();
872 }
873 }
874 }
875 else {
876 // In a non-return block, examine the live-in regs of all successors.
877 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
878 SE = BB->succ_end(); SI != SE; ++SI) {
879 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
880 E = (*SI)->livein_end(); I != E; ++I) {
881 unsigned Reg = *I;
882 KillIndices[Reg] = BB->size();
883 // Repeat, for all subregs.
884 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
885 *Subreg; ++Subreg) {
886 KillIndices[*Subreg] = BB->size();
887 }
888 }
889 }
890 }
891}
892
David Goodwin8f909342009-09-23 16:35:25 +0000893bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
894 MachineOperand &MO) {
895 // Setting kill flag...
896 if (!MO.isKill()) {
897 MO.setIsKill(true);
898 return false;
899 }
900
901 // If MO itself is live, clear the kill flag...
902 if (KillIndices[MO.getReg()] != ~0u) {
903 MO.setIsKill(false);
904 return false;
905 }
906
907 // If any subreg of MO is live, then create an imp-def for that
908 // subreg and keep MO marked as killed.
Benjamin Kramer8bff4af2009-10-02 15:59:52 +0000909 MO.setIsKill(false);
David Goodwin8f909342009-09-23 16:35:25 +0000910 bool AllDead = true;
911 const unsigned SuperReg = MO.getReg();
912 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
913 *Subreg; ++Subreg) {
914 if (KillIndices[*Subreg] != ~0u) {
915 MI->addOperand(MachineOperand::CreateReg(*Subreg,
916 true /*IsDef*/,
917 true /*IsImp*/,
918 false /*IsKill*/,
919 false /*IsDead*/));
920 AllDead = false;
921 }
922 }
923
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000924 if(AllDead)
Benjamin Kramer8bff4af2009-10-02 15:59:52 +0000925 MO.setIsKill(true);
David Goodwin8f909342009-09-23 16:35:25 +0000926 return false;
927}
928
David Goodwin88a589c2009-08-25 17:03:05 +0000929/// FixupKills - Fix the register kill flags, they may have been made
930/// incorrect by instruction reordering.
931///
932void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
933 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
934
935 std::set<unsigned> killedRegs;
936 BitVector ReservedRegs = TRI->getReservedRegs(MF);
David Goodwin5e411782009-09-03 22:15:25 +0000937
938 StartBlockForKills(MBB);
David Goodwin7886cd82009-08-29 00:11:13 +0000939
940 // Examine block from end to start...
David Goodwin88a589c2009-08-25 17:03:05 +0000941 unsigned Count = MBB->size();
942 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
943 I != E; --Count) {
944 MachineInstr *MI = --I;
945
David Goodwin7886cd82009-08-29 00:11:13 +0000946 // Update liveness. Registers that are defed but not used in this
947 // instruction are now dead. Mark register and all subregs as they
948 // are completely defined.
949 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
950 MachineOperand &MO = MI->getOperand(i);
951 if (!MO.isReg()) continue;
952 unsigned Reg = MO.getReg();
953 if (Reg == 0) continue;
954 if (!MO.isDef()) continue;
955 // Ignore two-addr defs.
956 if (MI->isRegTiedToUseOperand(i)) continue;
957
David Goodwin7886cd82009-08-29 00:11:13 +0000958 KillIndices[Reg] = ~0u;
959
960 // Repeat for all subregs.
961 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
962 *Subreg; ++Subreg) {
963 KillIndices[*Subreg] = ~0u;
964 }
965 }
David Goodwin88a589c2009-08-25 17:03:05 +0000966
David Goodwin8f909342009-09-23 16:35:25 +0000967 // Examine all used registers and set/clear kill flag. When a
968 // register is used multiple times we only set the kill flag on
969 // the first use.
David Goodwin88a589c2009-08-25 17:03:05 +0000970 killedRegs.clear();
971 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
972 MachineOperand &MO = MI->getOperand(i);
973 if (!MO.isReg() || !MO.isUse()) continue;
974 unsigned Reg = MO.getReg();
975 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
976
David Goodwin7886cd82009-08-29 00:11:13 +0000977 bool kill = false;
978 if (killedRegs.find(Reg) == killedRegs.end()) {
979 kill = true;
980 // A register is not killed if any subregs are live...
981 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
982 *Subreg; ++Subreg) {
983 if (KillIndices[*Subreg] != ~0u) {
984 kill = false;
985 break;
986 }
987 }
988
989 // If subreg is not live, then register is killed if it became
990 // live in this instruction
991 if (kill)
992 kill = (KillIndices[Reg] == ~0u);
993 }
994
David Goodwin88a589c2009-08-25 17:03:05 +0000995 if (MO.isKill() != kill) {
David Goodwin8f909342009-09-23 16:35:25 +0000996 bool removed = ToggleKillFlag(MI, MO);
997 if (removed) {
998 DEBUG(errs() << "Fixed <removed> in ");
999 } else {
1000 DEBUG(errs() << "Fixed " << MO << " in ");
1001 }
David Goodwin88a589c2009-08-25 17:03:05 +00001002 DEBUG(MI->dump());
1003 }
David Goodwin7886cd82009-08-29 00:11:13 +00001004
David Goodwin88a589c2009-08-25 17:03:05 +00001005 killedRegs.insert(Reg);
1006 }
David Goodwin7886cd82009-08-29 00:11:13 +00001007
David Goodwina3251db2009-08-31 20:47:02 +00001008 // Mark any used register (that is not using undef) and subregs as
1009 // now live...
David Goodwin7886cd82009-08-29 00:11:13 +00001010 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1011 MachineOperand &MO = MI->getOperand(i);
David Goodwina3251db2009-08-31 20:47:02 +00001012 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
David Goodwin7886cd82009-08-29 00:11:13 +00001013 unsigned Reg = MO.getReg();
1014 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1015
David Goodwin7886cd82009-08-29 00:11:13 +00001016 KillIndices[Reg] = Count;
1017
1018 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1019 *Subreg; ++Subreg) {
1020 KillIndices[*Subreg] = Count;
1021 }
1022 }
David Goodwin88a589c2009-08-25 17:03:05 +00001023 }
1024}
1025
Dan Gohman343f0c02008-11-19 23:18:57 +00001026//===----------------------------------------------------------------------===//
1027// Top-Down Scheduling
1028//===----------------------------------------------------------------------===//
1029
1030/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1031/// the PendingQueue if the count reaches zero. Also update its cycle bound.
Dan Gohman54e4c362008-12-09 22:54:47 +00001032void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1033 SUnit *SuccSU = SuccEdge->getSUnit();
Reid Klecknerc277ab02009-09-30 20:15:38 +00001034
Dan Gohman343f0c02008-11-19 23:18:57 +00001035#ifndef NDEBUG
Reid Klecknerc277ab02009-09-30 20:15:38 +00001036 if (SuccSU->NumPredsLeft == 0) {
Chris Lattner103289e2009-08-23 07:19:13 +00001037 errs() << "*** Scheduling failed! ***\n";
Dan Gohman343f0c02008-11-19 23:18:57 +00001038 SuccSU->dump(this);
Chris Lattner103289e2009-08-23 07:19:13 +00001039 errs() << " has been released too many times!\n";
Torok Edwinc23197a2009-07-14 16:55:14 +00001040 llvm_unreachable(0);
Dan Gohman343f0c02008-11-19 23:18:57 +00001041 }
1042#endif
Reid Klecknerc277ab02009-09-30 20:15:38 +00001043 --SuccSU->NumPredsLeft;
1044
Dan Gohman343f0c02008-11-19 23:18:57 +00001045 // Compute how many cycles it will be before this actually becomes
1046 // available. This is the max of the start time of all predecessors plus
1047 // their latencies.
Dan Gohman3f237442008-12-16 03:25:46 +00001048 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
Dan Gohman343f0c02008-11-19 23:18:57 +00001049
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001050 // If all the node's predecessors are scheduled, this node is ready
1051 // to be scheduled. Ignore the special ExitSU node.
1052 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
Dan Gohman343f0c02008-11-19 23:18:57 +00001053 PendingQueue.push_back(SuccSU);
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001054}
1055
1056/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1057void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1058 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1059 I != E; ++I)
1060 ReleaseSucc(SU, &*I);
Dan Gohman343f0c02008-11-19 23:18:57 +00001061}
1062
1063/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1064/// count of its successors. If a successor pending count is zero, add it to
1065/// the Available queue.
1066void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
David Goodwin3a5f0d42009-08-11 01:44:26 +00001067 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
Dan Gohman343f0c02008-11-19 23:18:57 +00001068 DEBUG(SU->dump(this));
1069
1070 Sequence.push_back(SU);
Dan Gohman3f237442008-12-16 03:25:46 +00001071 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1072 SU->setDepthToAtLeast(CurCycle);
Dan Gohman343f0c02008-11-19 23:18:57 +00001073
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001074 ReleaseSuccessors(SU);
Dan Gohman343f0c02008-11-19 23:18:57 +00001075 SU->isScheduled = true;
1076 AvailableQueue.ScheduledNode(SU);
1077}
1078
1079/// ListScheduleTopDown - The main loop of list scheduling for top-down
1080/// schedulers.
1081void SchedulePostRATDList::ListScheduleTopDown() {
1082 unsigned CurCycle = 0;
1083
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001084 // Release any successors of the special Entry node.
1085 ReleaseSuccessors(&EntrySU);
1086
Dan Gohman343f0c02008-11-19 23:18:57 +00001087 // All leaves to Available queue.
1088 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1089 // It is available if it has no predecessors.
1090 if (SUnits[i].Preds.empty()) {
1091 AvailableQueue.push(&SUnits[i]);
1092 SUnits[i].isAvailable = true;
1093 }
1094 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001095
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001096 // In any cycle where we can't schedule any instructions, we must
1097 // stall or emit a noop, depending on the target.
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001098 bool CycleHasInsts = false;
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001099
Dan Gohman343f0c02008-11-19 23:18:57 +00001100 // While Available queue is not empty, grab the node with the highest
1101 // priority. If it is not ready put it back. Schedule the node.
Dan Gohman2836c282009-01-16 01:33:36 +00001102 std::vector<SUnit*> NotReady;
Dan Gohman343f0c02008-11-19 23:18:57 +00001103 Sequence.reserve(SUnits.size());
1104 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1105 // Check to see if any of the pending instructions are ready to issue. If
1106 // so, add them to the available queue.
Dan Gohman3f237442008-12-16 03:25:46 +00001107 unsigned MinDepth = ~0u;
Dan Gohman343f0c02008-11-19 23:18:57 +00001108 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
Dan Gohman3f237442008-12-16 03:25:46 +00001109 if (PendingQueue[i]->getDepth() <= CurCycle) {
Dan Gohman343f0c02008-11-19 23:18:57 +00001110 AvailableQueue.push(PendingQueue[i]);
1111 PendingQueue[i]->isAvailable = true;
1112 PendingQueue[i] = PendingQueue.back();
1113 PendingQueue.pop_back();
1114 --i; --e;
Dan Gohman3f237442008-12-16 03:25:46 +00001115 } else if (PendingQueue[i]->getDepth() < MinDepth)
1116 MinDepth = PendingQueue[i]->getDepth();
Dan Gohman343f0c02008-11-19 23:18:57 +00001117 }
David Goodwinc93d8372009-08-11 17:35:23 +00001118
David Goodwin7cd01182009-08-11 17:56:42 +00001119 DEBUG(errs() << "\n*** Examining Available\n";
1120 LatencyPriorityQueue q = AvailableQueue;
1121 while (!q.empty()) {
1122 SUnit *su = q.pop();
1123 errs() << "Height " << su->getHeight() << ": ";
1124 su->dump(this);
1125 });
David Goodwinc93d8372009-08-11 17:35:23 +00001126
Dan Gohman2836c282009-01-16 01:33:36 +00001127 SUnit *FoundSUnit = 0;
1128
1129 bool HasNoopHazards = false;
1130 while (!AvailableQueue.empty()) {
1131 SUnit *CurSUnit = AvailableQueue.pop();
1132
1133 ScheduleHazardRecognizer::HazardType HT =
1134 HazardRec->getHazardType(CurSUnit);
1135 if (HT == ScheduleHazardRecognizer::NoHazard) {
1136 FoundSUnit = CurSUnit;
1137 break;
1138 }
1139
1140 // Remember if this is a noop hazard.
1141 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1142
1143 NotReady.push_back(CurSUnit);
1144 }
1145
1146 // Add the nodes that aren't ready back onto the available list.
1147 if (!NotReady.empty()) {
1148 AvailableQueue.push_all(NotReady);
1149 NotReady.clear();
1150 }
1151
Dan Gohman343f0c02008-11-19 23:18:57 +00001152 // If we found a node to schedule, do it now.
1153 if (FoundSUnit) {
1154 ScheduleNodeTopDown(FoundSUnit, CurCycle);
Dan Gohman2836c282009-01-16 01:33:36 +00001155 HazardRec->EmitInstruction(FoundSUnit);
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001156 CycleHasInsts = true;
Dan Gohman343f0c02008-11-19 23:18:57 +00001157
David Goodwind94a4e52009-08-10 15:55:25 +00001158 // If we are using the target-specific hazards, then don't
1159 // advance the cycle time just because we schedule a node. If
1160 // the target allows it we can schedule multiple nodes in the
1161 // same cycle.
1162 if (!EnablePostRAHazardAvoidance) {
1163 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1164 ++CurCycle;
1165 }
Dan Gohman2836c282009-01-16 01:33:36 +00001166 } else {
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001167 if (CycleHasInsts) {
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001168 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1169 HazardRec->AdvanceCycle();
1170 } else if (!HasNoopHazards) {
1171 // Otherwise, we have a pipeline stall, but no other problem,
1172 // just advance the current cycle and try again.
1173 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1174 HazardRec->AdvanceCycle();
1175 ++NumStalls;
1176 } else {
1177 // Otherwise, we have no instructions to issue and we have instructions
1178 // that will fault if we don't do this right. This is the case for
1179 // processors without pipeline interlocks and other cases.
1180 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1181 HazardRec->EmitNoop();
1182 Sequence.push_back(0); // NULL here means noop
1183 ++NumNoops;
1184 }
1185
Dan Gohman2836c282009-01-16 01:33:36 +00001186 ++CurCycle;
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001187 CycleHasInsts = false;
Dan Gohman343f0c02008-11-19 23:18:57 +00001188 }
1189 }
1190
1191#ifndef NDEBUG
Dan Gohmana1e6d362008-11-20 01:26:25 +00001192 VerifySchedule(/*isBottomUp=*/false);
Dan Gohman343f0c02008-11-19 23:18:57 +00001193#endif
1194}
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001195
1196//===----------------------------------------------------------------------===//
1197// Public Constructor Functions
1198//===----------------------------------------------------------------------===//
1199
Evan Chengfa163542009-10-16 21:06:15 +00001200FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1201 return new PostRAScheduler(OptLevel);
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001202}