blob: 106de5c064630b744322226f6597f22e6af0782f [file] [log] [blame]
Dale Johannesen72f15962007-07-13 17:31:29 +00001//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
David Goodwind94a4e52009-08-10 15:55:25 +000022#include "ExactHazardRecognizer.h"
23#include "SimpleHazardRecognizer.h"
Dan Gohman6dc75fe2009-02-06 17:12:10 +000024#include "ScheduleDAGInstrs.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000025#include "llvm/CodeGen/Passes.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000026#include "llvm/CodeGen/LatencyPriorityQueue.h"
27#include "llvm/CodeGen/SchedulerRegistry.h"
Dan Gohman3f237442008-12-16 03:25:46 +000028#include "llvm/CodeGen/MachineDominators.h"
David Goodwinc7951f82009-10-01 19:45:32 +000029#include "llvm/CodeGen/MachineFrameInfo.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000030#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohman3f237442008-12-16 03:25:46 +000031#include "llvm/CodeGen/MachineLoopInfo.h"
Dan Gohman21d90032008-11-25 00:52:40 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman2836c282009-01-16 01:33:36 +000033#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Dan Gohmana70dca12009-10-09 23:27:56 +000034#include "llvm/Analysis/AliasAnalysis.h"
Dan Gohmanbed353d2009-02-10 23:29:38 +000035#include "llvm/Target/TargetLowering.h"
Dan Gohman79ce2762009-01-15 19:20:50 +000036#include "llvm/Target/TargetMachine.h"
Dan Gohman21d90032008-11-25 00:52:40 +000037#include "llvm/Target/TargetInstrInfo.h"
38#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin0dad89f2009-09-30 00:10:16 +000039#include "llvm/Target/TargetSubtarget.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000040#include "llvm/Support/Debug.h"
Torok Edwinc25e7582009-07-11 20:10:48 +000041#include "llvm/Support/ErrorHandling.h"
David Goodwin3a5f0d42009-08-11 01:44:26 +000042#include "llvm/Support/raw_ostream.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000043#include "llvm/ADT/Statistic.h"
Dan Gohman21d90032008-11-25 00:52:40 +000044#include <map>
David Goodwin88a589c2009-08-25 17:03:05 +000045#include <set>
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000046using namespace llvm;
47
Dan Gohman2836c282009-01-16 01:33:36 +000048STATISTIC(NumNoops, "Number of noops inserted");
Dan Gohman343f0c02008-11-19 23:18:57 +000049STATISTIC(NumStalls, "Number of pipeline stalls");
50
David Goodwin471850a2009-10-01 21:46:35 +000051// Post-RA scheduling is enabled with
52// TargetSubtarget.enablePostRAScheduler(). This flag can be used to
53// override the target.
54static cl::opt<bool>
55EnablePostRAScheduler("post-RA-scheduler",
56 cl::desc("Enable scheduling after register allocation"),
David Goodwin9843a932009-10-01 22:19:57 +000057 cl::init(false), cl::Hidden);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +000058static cl::opt<bool>
Dan Gohman21d90032008-11-25 00:52:40 +000059EnableAntiDepBreaking("break-anti-dependencies",
Dan Gohmanc1ae8c92009-10-21 01:44:44 +000060 cl::desc("Break post-RA scheduling anti-dependencies"),
61 cl::init(true), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000062static cl::opt<bool>
63EnablePostRAHazardAvoidance("avoid-hazards",
David Goodwind94a4e52009-08-10 15:55:25 +000064 cl::desc("Enable exact hazard avoidance"),
David Goodwin5e411782009-09-03 22:15:25 +000065 cl::init(true), cl::Hidden);
Dan Gohman2836c282009-01-16 01:33:36 +000066
David Goodwin1f152282009-09-01 18:34:03 +000067// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
68static cl::opt<int>
69DebugDiv("postra-sched-debugdiv",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden);
72static cl::opt<int>
73DebugMod("postra-sched-debugmod",
74 cl::desc("Debug control MBBs that are scheduled"),
75 cl::init(0), cl::Hidden);
76
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000077namespace {
Nick Lewycky6726b6d2009-10-25 06:33:48 +000078 class PostRAScheduler : public MachineFunctionPass {
Dan Gohmana70dca12009-10-09 23:27:56 +000079 AliasAnalysis *AA;
Evan Chengfa163542009-10-16 21:06:15 +000080 CodeGenOpt::Level OptLevel;
Dan Gohmana70dca12009-10-09 23:27:56 +000081
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000082 public:
83 static char ID;
Evan Chengfa163542009-10-16 21:06:15 +000084 PostRAScheduler(CodeGenOpt::Level ol) :
85 MachineFunctionPass(&ID), OptLevel(ol) {}
Dan Gohman21d90032008-11-25 00:52:40 +000086
Dan Gohman3f237442008-12-16 03:25:46 +000087 void getAnalysisUsage(AnalysisUsage &AU) const {
Dan Gohman845012e2009-07-31 23:37:33 +000088 AU.setPreservesCFG();
Dan Gohmana70dca12009-10-09 23:27:56 +000089 AU.addRequired<AliasAnalysis>();
Dan Gohman3f237442008-12-16 03:25:46 +000090 AU.addRequired<MachineDominatorTree>();
91 AU.addPreserved<MachineDominatorTree>();
92 AU.addRequired<MachineLoopInfo>();
93 AU.addPreserved<MachineLoopInfo>();
94 MachineFunctionPass::getAnalysisUsage(AU);
95 }
96
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000097 const char *getPassName() const {
Dan Gohman21d90032008-11-25 00:52:40 +000098 return "Post RA top-down list latency scheduler";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000099 }
100
101 bool runOnMachineFunction(MachineFunction &Fn);
102 };
Dan Gohman343f0c02008-11-19 23:18:57 +0000103 char PostRAScheduler::ID = 0;
104
Nick Lewycky6726b6d2009-10-25 06:33:48 +0000105 class SchedulePostRATDList : public ScheduleDAGInstrs {
Dan Gohman343f0c02008-11-19 23:18:57 +0000106 /// AvailableQueue - The priority queue to use for the available SUnits.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000107 ///
Dan Gohman343f0c02008-11-19 23:18:57 +0000108 LatencyPriorityQueue AvailableQueue;
109
110 /// PendingQueue - This contains all of the instructions whose operands have
111 /// been issued, but their results are not ready yet (due to the latency of
112 /// the operation). Once the operands becomes available, the instruction is
113 /// added to the AvailableQueue.
114 std::vector<SUnit*> PendingQueue;
115
Dan Gohman21d90032008-11-25 00:52:40 +0000116 /// Topo - A topological ordering for SUnits.
117 ScheduleDAGTopologicalSort Topo;
Dan Gohman343f0c02008-11-19 23:18:57 +0000118
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000119 /// AllocatableSet - The set of allocatable registers.
120 /// We'll be ignoring anti-dependencies on non-allocatable registers,
121 /// because they may not be safe to break.
122 const BitVector AllocatableSet;
123
Dan Gohman2836c282009-01-16 01:33:36 +0000124 /// HazardRec - The hazard recognizer to use.
125 ScheduleHazardRecognizer *HazardRec;
126
Dan Gohmana70dca12009-10-09 23:27:56 +0000127 /// AA - AliasAnalysis for making memory reference queries.
128 AliasAnalysis *AA;
129
David Goodwin4c3715c2009-10-22 23:19:17 +0000130 /// AntiDepMode - Anti-dependence breaking mode
131 TargetSubtarget::AntiDepBreakMode AntiDepMode;
132
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000133 /// Classes - For live regs that are only used in one register class in a
134 /// live range, the register class. If the register is not live, the
135 /// corresponding value is null. If the register is live but used in
136 /// multiple register classes, the corresponding value is -1 casted to a
137 /// pointer.
138 const TargetRegisterClass *
139 Classes[TargetRegisterInfo::FirstVirtualRegister];
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000140
141 /// RegRegs - Map registers to all their references within a live range.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000142 std::multimap<unsigned, MachineOperand *> RegRefs;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000143
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000144 /// KillIndices - The index of the most recent kill (proceding bottom-up),
145 /// or ~0u if the register is not live.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000146 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
147
Evan Cheng714e8bc2009-10-01 08:26:23 +0000148 /// DefIndices - The index of the most recent complete def (proceding bottom
149 /// up), or ~0u if the register is live.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000150 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
151
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000152 /// KeepRegs - A set of registers which are live and cannot be changed to
153 /// break anti-dependencies.
154 SmallSet<unsigned, 4> KeepRegs;
155
Dan Gohman21d90032008-11-25 00:52:40 +0000156 public:
Dan Gohman79ce2762009-01-15 19:20:50 +0000157 SchedulePostRATDList(MachineFunction &MF,
Dan Gohman3f237442008-12-16 03:25:46 +0000158 const MachineLoopInfo &MLI,
Dan Gohman2836c282009-01-16 01:33:36 +0000159 const MachineDominatorTree &MDT,
Dan Gohmana70dca12009-10-09 23:27:56 +0000160 ScheduleHazardRecognizer *HR,
David Goodwin4c3715c2009-10-22 23:19:17 +0000161 AliasAnalysis *aa,
162 TargetSubtarget::AntiDepBreakMode adm)
Dan Gohman79ce2762009-01-15 19:20:50 +0000163 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000164 AllocatableSet(TRI->getAllocatableSet(MF)),
David Goodwin4c3715c2009-10-22 23:19:17 +0000165 HazardRec(HR), AA(aa), AntiDepMode(adm) {}
Dan Gohman2836c282009-01-16 01:33:36 +0000166
167 ~SchedulePostRATDList() {
168 delete HazardRec;
169 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000170
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000171 /// StartBlock - Initialize register live-range state for scheduling in
172 /// this block.
173 ///
174 void StartBlock(MachineBasicBlock *BB);
175
176 /// Schedule - Schedule the instruction range using list scheduling.
177 ///
Dan Gohman343f0c02008-11-19 23:18:57 +0000178 void Schedule();
David Goodwin88a589c2009-08-25 17:03:05 +0000179
180 /// FixupKills - Fix register kill flags that have been made
181 /// invalid due to scheduling
182 ///
183 void FixupKills(MachineBasicBlock *MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000184
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000185 /// Observe - Update liveness information to account for the current
186 /// instruction, which will not be scheduled.
187 ///
188 void Observe(MachineInstr *MI, unsigned Count);
189
190 /// FinishBlock - Clean up register live-range state.
191 ///
192 void FinishBlock();
193
Dan Gohman343f0c02008-11-19 23:18:57 +0000194 private:
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000195 void PrescanInstruction(MachineInstr *MI);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000196 void ScanInstruction(MachineInstr *MI, unsigned Count);
Dan Gohman54e4c362008-12-09 22:54:47 +0000197 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000198 void ReleaseSuccessors(SUnit *SU);
Dan Gohman343f0c02008-11-19 23:18:57 +0000199 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
200 void ListScheduleTopDown();
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000201 bool BreakAntiDependencies();
202 unsigned findSuitableFreeRegister(unsigned AntiDepReg,
203 unsigned LastNewReg,
204 const TargetRegisterClass *);
David Goodwin5e411782009-09-03 22:15:25 +0000205 void StartBlockForKills(MachineBasicBlock *BB);
David Goodwin8f909342009-09-23 16:35:25 +0000206
207 // ToggleKillFlag - Toggle a register operand kill flag. Other
208 // adjustments may be made to the instruction if necessary. Return
209 // true if the operand has been deleted, false if not.
210 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
Dan Gohman343f0c02008-11-19 23:18:57 +0000211 };
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000212}
213
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000214/// isSchedulingBoundary - Test if the given instruction should be
215/// considered a scheduling boundary. This primarily includes labels
216/// and terminators.
217///
218static bool isSchedulingBoundary(const MachineInstr *MI,
219 const MachineFunction &MF) {
220 // Terminators and labels can't be scheduled around.
221 if (MI->getDesc().isTerminator() || MI->isLabel())
222 return true;
223
Dan Gohmanbed353d2009-02-10 23:29:38 +0000224 // Don't attempt to schedule around any instruction that modifies
225 // a stack-oriented pointer, as it's unlikely to be profitable. This
226 // saves compile time, because it doesn't require every single
227 // stack slot reference to depend on the instruction that does the
228 // modification.
229 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
230 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
231 return true;
232
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000233 return false;
234}
235
Dan Gohman343f0c02008-11-19 23:18:57 +0000236bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
Dan Gohman5bf7c2a2009-10-10 00:15:38 +0000237 AA = &getAnalysis<AliasAnalysis>();
238
David Goodwin471850a2009-10-01 21:46:35 +0000239 // Check for explicit enable/disable of post-ra scheduling.
David Goodwin4c3715c2009-10-22 23:19:17 +0000240 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
David Goodwin471850a2009-10-01 21:46:35 +0000241 if (EnablePostRAScheduler.getPosition() > 0) {
242 if (!EnablePostRAScheduler)
Evan Chengc83da2f92009-10-16 06:10:34 +0000243 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000244 } else {
Evan Chengc83da2f92009-10-16 06:10:34 +0000245 // Check that post-RA scheduling is enabled for this target.
David Goodwin471850a2009-10-01 21:46:35 +0000246 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
David Goodwin4c3715c2009-10-22 23:19:17 +0000247 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
Evan Chengc83da2f92009-10-16 06:10:34 +0000248 return false;
David Goodwin471850a2009-10-01 21:46:35 +0000249 }
David Goodwin0dad89f2009-09-30 00:10:16 +0000250
David Goodwin4c3715c2009-10-22 23:19:17 +0000251 // Check for antidep breaking override...
252 if (EnableAntiDepBreaking.getPosition() > 0) {
253 AntiDepMode = (EnableAntiDepBreaking) ?
254 TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
255 }
256
David Goodwin3a5f0d42009-08-11 01:44:26 +0000257 DEBUG(errs() << "PostRAScheduler\n");
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000258
Dan Gohman3f237442008-12-16 03:25:46 +0000259 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
260 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
David Goodwind94a4e52009-08-10 15:55:25 +0000261 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
Dan Gohman2836c282009-01-16 01:33:36 +0000262 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
David Goodwind94a4e52009-08-10 15:55:25 +0000263 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
264 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
Dan Gohman3f237442008-12-16 03:25:46 +0000265
David Goodwin4c3715c2009-10-22 23:19:17 +0000266 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA, AntiDepMode);
Dan Gohman79ce2762009-01-15 19:20:50 +0000267
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000268 // Loop over all of the basic blocks
269 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
Dan Gohman343f0c02008-11-19 23:18:57 +0000270 MBB != MBBe; ++MBB) {
David Goodwin1f152282009-09-01 18:34:03 +0000271#ifndef NDEBUG
272 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
273 if (DebugDiv > 0) {
274 static int bbcnt = 0;
275 if (bbcnt++ % DebugDiv != DebugMod)
276 continue;
277 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
278 ":MBB ID#" << MBB->getNumber() << " ***\n";
279 }
280#endif
281
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000282 // Initialize register live-range state for scheduling in this block.
283 Scheduler.StartBlock(MBB);
284
Dan Gohmanf7119392009-01-16 22:10:20 +0000285 // Schedule each sequence of instructions not interrupted by a label
286 // or anything else that effectively needs to shut down scheduling.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000287 MachineBasicBlock::iterator Current = MBB->end();
Dan Gohman47ac0f02009-02-11 04:27:20 +0000288 unsigned Count = MBB->size(), CurrentCount = Count;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000289 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
290 MachineInstr *MI = prior(I);
291 if (isSchedulingBoundary(MI, Fn)) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000292 Scheduler.Run(MBB, I, Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000293 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000294 Current = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000295 CurrentCount = Count - 1;
Dan Gohman1274ced2009-03-10 18:10:43 +0000296 Scheduler.Observe(MI, CurrentCount);
Dan Gohmanf7119392009-01-16 22:10:20 +0000297 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000298 I = MI;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000299 --Count;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000300 }
Dan Gohman47ac0f02009-02-11 04:27:20 +0000301 assert(Count == 0 && "Instruction count mismatch!");
Duncan Sands9e8bd0b2009-03-11 09:04:34 +0000302 assert((MBB->begin() == Current || CurrentCount != 0) &&
Dan Gohman1274ced2009-03-10 18:10:43 +0000303 "Instruction count mismatch!");
304 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
Evan Chengfb2e7522009-09-18 21:02:19 +0000305 Scheduler.EmitSchedule(0);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000306
307 // Clean up register live-range state.
308 Scheduler.FinishBlock();
David Goodwin88a589c2009-08-25 17:03:05 +0000309
David Goodwin5e411782009-09-03 22:15:25 +0000310 // Update register kills
David Goodwin88a589c2009-08-25 17:03:05 +0000311 Scheduler.FixupKills(MBB);
Dan Gohman343f0c02008-11-19 23:18:57 +0000312 }
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000313
314 return true;
315}
316
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000317/// StartBlock - Initialize register live-range state for scheduling in
318/// this block.
Dan Gohman21d90032008-11-25 00:52:40 +0000319///
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000320void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
321 // Call the superclass.
322 ScheduleDAGInstrs::StartBlock(BB);
Dan Gohman21d90032008-11-25 00:52:40 +0000323
David Goodwind94a4e52009-08-10 15:55:25 +0000324 // Reset the hazard recognizer.
325 HazardRec->Reset();
326
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000327 // Clear out the register class data.
328 std::fill(Classes, array_endof(Classes),
329 static_cast<const TargetRegisterClass *>(0));
Dan Gohman21d90032008-11-25 00:52:40 +0000330
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000331 // Initialize the indices to indicate that no registers are live.
Dan Gohman6c3643c2008-12-19 22:23:43 +0000332 std::fill(KillIndices, array_endof(KillIndices), ~0u);
Dan Gohman21d90032008-11-25 00:52:40 +0000333 std::fill(DefIndices, array_endof(DefIndices), BB->size());
334
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000335 // Clear "do not change" set.
336 KeepRegs.clear();
337
David Goodwin63bcbb72009-10-01 23:28:47 +0000338 bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
339
Dan Gohman21d90032008-11-25 00:52:40 +0000340 // Determine the live-out physregs for this block.
David Goodwin63bcbb72009-10-01 23:28:47 +0000341 if (IsReturnBlock) {
Dan Gohman21d90032008-11-25 00:52:40 +0000342 // In a return block, examine the function live-out regs.
343 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
344 E = MRI.liveout_end(); I != E; ++I) {
345 unsigned Reg = *I;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000346 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000347 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000348 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000349 // Repeat, for all aliases.
350 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
351 unsigned AliasReg = *Alias;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000352 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000353 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000354 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000355 }
356 }
David Goodwinc7951f82009-10-01 19:45:32 +0000357 } else {
Dan Gohman21d90032008-11-25 00:52:40 +0000358 // In a non-return block, examine the live-in regs of all successors.
359 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
Dan Gohman47ac0f02009-02-11 04:27:20 +0000360 SE = BB->succ_end(); SI != SE; ++SI)
Dan Gohman21d90032008-11-25 00:52:40 +0000361 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
362 E = (*SI)->livein_end(); I != E; ++I) {
363 unsigned Reg = *I;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000364 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000365 KillIndices[Reg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000366 DefIndices[Reg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000367 // Repeat, for all aliases.
368 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
369 unsigned AliasReg = *Alias;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000370 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman21d90032008-11-25 00:52:40 +0000371 KillIndices[AliasReg] = BB->size();
Dan Gohman6c3643c2008-12-19 22:23:43 +0000372 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000373 }
374 }
David Goodwin63bcbb72009-10-01 23:28:47 +0000375 }
Dan Gohman21d90032008-11-25 00:52:40 +0000376
David Goodwin63bcbb72009-10-01 23:28:47 +0000377 // Mark live-out callee-saved registers. In a return block this is
378 // all callee-saved registers. In non-return this is any
379 // callee-saved register that is not saved in the prolog.
380 const MachineFrameInfo *MFI = MF.getFrameInfo();
381 BitVector Pristine = MFI->getPristineRegs(BB);
382 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
383 unsigned Reg = *I;
384 if (!IsReturnBlock && !Pristine.test(Reg)) continue;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000385 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
David Goodwin63bcbb72009-10-01 23:28:47 +0000386 KillIndices[Reg] = BB->size();
387 DefIndices[Reg] = ~0u;
388 // Repeat, for all aliases.
389 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
390 unsigned AliasReg = *Alias;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000391 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
David Goodwin63bcbb72009-10-01 23:28:47 +0000392 KillIndices[AliasReg] = BB->size();
393 DefIndices[AliasReg] = ~0u;
Dan Gohman21d90032008-11-25 00:52:40 +0000394 }
395 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000396}
397
398/// Schedule - Schedule the instruction range using list scheduling.
399///
400void SchedulePostRATDList::Schedule() {
David Goodwin3a5f0d42009-08-11 01:44:26 +0000401 DEBUG(errs() << "********** List Scheduling **********\n");
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000402
403 // Build the scheduling graph.
Dan Gohmana70dca12009-10-09 23:27:56 +0000404 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000405
David Goodwin4c3715c2009-10-22 23:19:17 +0000406 if (AntiDepMode != TargetSubtarget::ANTIDEP_NONE) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000407 if (BreakAntiDependencies()) {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000408 // We made changes. Update the dependency graph.
409 // Theoretically we could update the graph in place:
410 // When a live range is changed to use a different register, remove
411 // the def's anti-dependence *and* output-dependence edges due to
412 // that register, and add new anti-dependence and output-dependence
413 // edges based on the next live range of the register.
414 SUnits.clear();
415 EntrySU = SUnit();
416 ExitSU = SUnit();
Dan Gohmana70dca12009-10-09 23:27:56 +0000417 BuildSchedGraph(AA);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000418 }
419 }
420
David Goodwind94a4e52009-08-10 15:55:25 +0000421 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
422 SUnits[su].dumpAll(this));
423
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000424 AvailableQueue.initNodes(SUnits);
425
426 ListScheduleTopDown();
427
428 AvailableQueue.releaseState();
429}
430
431/// Observe - Update liveness information to account for the current
432/// instruction, which will not be scheduled.
433///
Dan Gohman47ac0f02009-02-11 04:27:20 +0000434void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
Dan Gohman1274ced2009-03-10 18:10:43 +0000435 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
436
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000437 // Any register which was defined within the previous scheduling region
438 // may have been rescheduled and its lifetime may overlap with registers
439 // in ways not reflected in our current liveness state. For each such
440 // register, adjust the liveness state to be conservatively correct.
441 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
442 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
443 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
444 // Mark this register to be non-renamable.
445 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
446 // Move the def index to the end of the previous region, to reflect
447 // that the def could theoretically have been scheduled at the end.
448 DefIndices[Reg] = InsertPosIndex;
David Goodwin480c5292009-10-20 19:54:44 +0000449 }
David Goodwin480c5292009-10-20 19:54:44 +0000450
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000451 PrescanInstruction(MI);
Dan Gohman47ac0f02009-02-11 04:27:20 +0000452 ScanInstruction(MI, Count);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000453}
454
455/// FinishBlock - Clean up register live-range state.
456///
457void SchedulePostRATDList::FinishBlock() {
458 RegRefs.clear();
459
460 // Call the superclass.
461 ScheduleDAGInstrs::FinishBlock();
462}
463
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000464/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
465/// critical path.
466static SDep *CriticalPathStep(SUnit *SU) {
467 SDep *Next = 0;
468 unsigned NextDepth = 0;
469 // Find the predecessor edge with the greatest depth.
470 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
471 P != PE; ++P) {
472 SUnit *PredSU = P->getSUnit();
473 unsigned PredLatency = P->getLatency();
474 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
475 // In the case of a latency tie, prefer an anti-dependency edge over
476 // other types of edges.
477 if (NextDepth < PredTotalLatency ||
478 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
479 NextDepth = PredTotalLatency;
480 Next = &*P;
481 }
482 }
483 return Next;
484}
485
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000486void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
487 // Scan the register operands for this instruction and update
488 // Classes and RegRefs.
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000489 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
490 MachineOperand &MO = MI->getOperand(i);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000491 if (!MO.isReg()) continue;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000492 unsigned Reg = MO.getReg();
493 if (Reg == 0) continue;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000494 const TargetRegisterClass *NewRC = 0;
495
496 if (i < MI->getDesc().getNumOperands())
497 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000498
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000499 // For now, only allow the register to be changed if its register
500 // class is consistent across all uses.
501 if (!Classes[Reg] && NewRC)
502 Classes[Reg] = NewRC;
503 else if (!NewRC || Classes[Reg] != NewRC)
504 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
David Goodwin480c5292009-10-20 19:54:44 +0000505
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000506 // Now check for aliases.
507 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
508 // If an alias of the reg is used during the live range, give up.
509 // Note that this allows us to skip checking if AntiDepReg
510 // overlaps with any of the aliases, among other things.
511 unsigned AliasReg = *Alias;
512 if (Classes[AliasReg]) {
513 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
514 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000515 }
516 }
David Goodwin480c5292009-10-20 19:54:44 +0000517
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000518 // If we're still willing to consider this register, note the reference.
519 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
520 RegRefs.insert(std::make_pair(Reg, &MO));
521
522 // It's not safe to change register allocation for source operands of
523 // that have special allocation requirements.
524 if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
525 if (KeepRegs.insert(Reg)) {
526 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
527 *Subreg; ++Subreg)
528 KeepRegs.insert(*Subreg);
529 }
530 }
531 }
David Goodwin480c5292009-10-20 19:54:44 +0000532}
533
534void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
535 unsigned Count) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000536 // Update liveness.
537 // Proceding upwards, registers that are defed but not used in this
538 // instruction are now dead.
David Goodwin480c5292009-10-20 19:54:44 +0000539 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
540 MachineOperand &MO = MI->getOperand(i);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000541 if (!MO.isReg()) continue;
David Goodwin480c5292009-10-20 19:54:44 +0000542 unsigned Reg = MO.getReg();
543 if (Reg == 0) continue;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000544 if (!MO.isDef()) continue;
545 // Ignore two-addr defs.
546 if (MI->isRegTiedToUseOperand(i)) continue;
David Goodwin7441d142009-10-20 22:50:43 +0000547
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000548 DefIndices[Reg] = Count;
549 KillIndices[Reg] = ~0u;
550 assert(((KillIndices[Reg] == ~0u) !=
551 (DefIndices[Reg] == ~0u)) &&
552 "Kill and Def maps aren't consistent for Reg!");
553 KeepRegs.erase(Reg);
554 Classes[Reg] = 0;
555 RegRefs.erase(Reg);
556 // Repeat, for all subregs.
David Goodwin480c5292009-10-20 19:54:44 +0000557 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
558 *Subreg; ++Subreg) {
559 unsigned SubregReg = *Subreg;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000560 DefIndices[SubregReg] = Count;
561 KillIndices[SubregReg] = ~0u;
562 KeepRegs.erase(SubregReg);
563 Classes[SubregReg] = 0;
564 RegRefs.erase(SubregReg);
565 }
566 // Conservatively mark super-registers as unusable.
567 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
568 *Super; ++Super) {
569 unsigned SuperReg = *Super;
570 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
571 }
572 }
573 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
574 MachineOperand &MO = MI->getOperand(i);
575 if (!MO.isReg()) continue;
576 unsigned Reg = MO.getReg();
577 if (Reg == 0) continue;
578 if (!MO.isUse()) continue;
579
580 const TargetRegisterClass *NewRC = 0;
581 if (i < MI->getDesc().getNumOperands())
582 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
583
584 // For now, only allow the register to be changed if its register
585 // class is consistent across all uses.
586 if (!Classes[Reg] && NewRC)
587 Classes[Reg] = NewRC;
588 else if (!NewRC || Classes[Reg] != NewRC)
589 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
590
591 RegRefs.insert(std::make_pair(Reg, &MO));
592
593 // It wasn't previously live but now it is, this is a kill.
594 if (KillIndices[Reg] == ~0u) {
595 KillIndices[Reg] = Count;
596 DefIndices[Reg] = ~0u;
597 assert(((KillIndices[Reg] == ~0u) !=
598 (DefIndices[Reg] == ~0u)) &&
599 "Kill and Def maps aren't consistent for Reg!");
600 }
601 // Repeat, for all aliases.
602 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
603 unsigned AliasReg = *Alias;
604 if (KillIndices[AliasReg] == ~0u) {
605 KillIndices[AliasReg] = Count;
606 DefIndices[AliasReg] = ~0u;
David Goodwin480c5292009-10-20 19:54:44 +0000607 }
608 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000609 }
610}
611
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000612unsigned
613SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
614 unsigned LastNewReg,
615 const TargetRegisterClass *RC) {
616 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
617 RE = RC->allocation_order_end(MF); R != RE; ++R) {
618 unsigned NewReg = *R;
619 // Don't replace a register with itself.
620 if (NewReg == AntiDepReg) continue;
621 // Don't replace a register with one that was recently used to repair
622 // an anti-dependence with this AntiDepReg, because that would
623 // re-introduce that anti-dependence.
624 if (NewReg == LastNewReg) continue;
625 // If NewReg is dead and NewReg's most recent def is not before
626 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
627 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
628 "Kill and Def maps aren't consistent for AntiDepReg!");
629 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
630 "Kill and Def maps aren't consistent for NewReg!");
631 if (KillIndices[NewReg] != ~0u ||
632 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
633 KillIndices[AntiDepReg] > DefIndices[NewReg])
634 continue;
635 return NewReg;
Dan Gohman26255ad2009-08-12 01:33:27 +0000636 }
637
638 // No registers are free and available!
639 return 0;
640}
641
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000642/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
643/// of the ScheduleDAG and break them by renaming registers.
644///
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000645bool SchedulePostRATDList::BreakAntiDependencies() {
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000646 // The code below assumes that there is at least one instruction,
647 // so just duck out immediately if the block is empty.
648 if (SUnits.empty()) return false;
649
David Goodwin480c5292009-10-20 19:54:44 +0000650 // Find the node at the bottom of the critical path.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000651 SUnit *Max = 0;
652 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
653 SUnit *SU = &SUnits[i];
654 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
655 Max = SU;
David Goodwin480c5292009-10-20 19:54:44 +0000656 }
657
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000658#ifndef NDEBUG
David Goodwin480c5292009-10-20 19:54:44 +0000659 {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000660 DEBUG(errs() << "Critical path has total latency "
661 << (Max->getDepth() + Max->Latency) << "\n");
David Goodwind452ea62009-10-13 19:16:03 +0000662 DEBUG(errs() << "Available regs:");
663 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000664 if (KillIndices[Reg] == ~0u)
David Goodwind452ea62009-10-13 19:16:03 +0000665 DEBUG(errs() << " " << TRI->getName(Reg));
666 }
667 DEBUG(errs() << '\n');
668 }
669#endif
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000670
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000671 // Track progress along the critical path through the SUnit graph as we walk
672 // the instructions.
673 SUnit *CriticalPathSU = Max;
674 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
675
676 // Consider this pattern:
677 // A = ...
678 // ... = A
679 // A = ...
680 // ... = A
681 // A = ...
682 // ... = A
683 // A = ...
684 // ... = A
685 // There are three anti-dependencies here, and without special care,
686 // we'd break all of them using the same register:
687 // A = ...
688 // ... = A
689 // B = ...
690 // ... = B
691 // B = ...
692 // ... = B
693 // B = ...
694 // ... = B
695 // because at each anti-dependence, B is the first register that
696 // isn't A which is free. This re-introduces anti-dependencies
697 // at all but one of the original anti-dependencies that we were
698 // trying to break. To avoid this, keep track of the most recent
699 // register that each register was replaced with, avoid
700 // using it to repair an anti-dependence on the same register.
701 // This lets us produce this:
702 // A = ...
703 // ... = A
704 // B = ...
705 // ... = B
706 // C = ...
707 // ... = C
708 // B = ...
709 // ... = B
710 // This still has an anti-dependence on B, but at least it isn't on the
711 // original critical path.
712 //
713 // TODO: If we tracked more than one register here, we could potentially
714 // fix that remaining critical edge too. This is a little more involved,
715 // because unlike the most recent register, less recent registers should
716 // still be considered, though only if no other registers are available.
717 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
718
719 // Attempt to break anti-dependence edges on the critical path. Walk the
720 // instructions from the bottom up, tracking information about liveness
721 // as we go to help determine which registers are available.
Dan Gohman21d90032008-11-25 00:52:40 +0000722 bool Changed = false;
Dan Gohman47ac0f02009-02-11 04:27:20 +0000723 unsigned Count = InsertPosIndex - 1;
724 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
Dan Gohman43f07fb2009-02-03 18:57:45 +0000725 I != E; --Count) {
726 MachineInstr *MI = --I;
Dan Gohman21d90032008-11-25 00:52:40 +0000727
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000728 // Check if this instruction has a dependence on the critical path that
729 // is an anti-dependence that we may be able to break. If it is, set
730 // AntiDepReg to the non-zero register associated with the anti-dependence.
Dan Gohman00dc84a2008-12-16 19:27:52 +0000731 //
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000732 // We limit our attention to the critical path as a heuristic to avoid
Dan Gohman00dc84a2008-12-16 19:27:52 +0000733 // breaking anti-dependence edges that aren't going to significantly
734 // impact the overall schedule. There are a limited number of registers
735 // and we want to save them for the important edges.
736 //
737 // TODO: Instructions with multiple defs could have multiple
738 // anti-dependencies. The current code here only knows how to break one
739 // edge per instruction. Note that we'd have to be able to break all of
740 // the anti-dependencies in an instruction in order to be effective.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000741 unsigned AntiDepReg = 0;
742 if (MI == CriticalPathMI) {
743 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000744 SUnit *NextSU = Edge->getSUnit();
745
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000746 // Only consider anti-dependence edges.
747 if (Edge->getKind() == SDep::Anti) {
Dan Gohman00dc84a2008-12-16 19:27:52 +0000748 AntiDepReg = Edge->getReg();
749 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000750 if (!AllocatableSet.test(AntiDepReg))
Evan Cheng714e8bc2009-10-01 08:26:23 +0000751 // Don't break anti-dependencies on non-allocatable registers.
752 AntiDepReg = 0;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000753 else if (KeepRegs.count(AntiDepReg))
754 // Don't break anti-dependencies if an use down below requires
755 // this exact register.
756 AntiDepReg = 0;
757 else {
758 // If the SUnit has other dependencies on the SUnit that it
759 // anti-depends on, don't bother breaking the anti-dependency
760 // since those edges would prevent such units from being
761 // scheduled past each other regardless.
762 //
763 // Also, if there are dependencies on other SUnits with the
764 // same register as the anti-dependency, don't attempt to
765 // break it.
766 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
767 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
768 if (P->getSUnit() == NextSU ?
Dan Gohman00dc84a2008-12-16 19:27:52 +0000769 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
770 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000771 AntiDepReg = 0;
772 break;
Dan Gohman00dc84a2008-12-16 19:27:52 +0000773 }
774 }
775 }
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000776 CriticalPathSU = NextSU;
777 CriticalPathMI = CriticalPathSU->getInstr();
Dan Gohman00dc84a2008-12-16 19:27:52 +0000778 } else {
779 // We've reached the end of the critical path.
780 CriticalPathSU = 0;
781 CriticalPathMI = 0;
782 }
783 }
Dan Gohman21d90032008-11-25 00:52:40 +0000784
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000785 PrescanInstruction(MI);
786
787 if (MI->getDesc().hasExtraDefRegAllocReq())
788 // If this instruction's defs have special allocation requirement, don't
789 // break this anti-dependency.
Evan Cheng714e8bc2009-10-01 08:26:23 +0000790 AntiDepReg = 0;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000791 else if (AntiDepReg) {
792 // If this instruction has a use of AntiDepReg, breaking it
793 // is invalid.
794 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
795 MachineOperand &MO = MI->getOperand(i);
796 if (!MO.isReg()) continue;
797 unsigned Reg = MO.getReg();
798 if (Reg == 0) continue;
799 if (MO.isUse() && AntiDepReg == Reg) {
800 AntiDepReg = 0;
801 break;
802 }
803 }
Dan Gohman21d90032008-11-25 00:52:40 +0000804 }
805
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000806 // Determine AntiDepReg's register class, if it is live and is
807 // consistently used within a single class.
808 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
809 assert((AntiDepReg == 0 || RC != NULL) &&
810 "Register should be live if it's causing an anti-dependence!");
811 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
812 AntiDepReg = 0;
Dan Gohman21d90032008-11-25 00:52:40 +0000813
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000814 // Look for a suitable register to use to break the anti-depenence.
815 //
816 // TODO: Instead of picking the first free register, consider which might
817 // be the best.
Dan Gohman21d90032008-11-25 00:52:40 +0000818 if (AntiDepReg != 0) {
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000819 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
820 LastNewReg[AntiDepReg],
821 RC)) {
822 DEBUG(errs() << "Breaking anti-dependence edge on "
Dan Gohman26255ad2009-08-12 01:33:27 +0000823 << TRI->getName(AntiDepReg)
824 << " with " << RegRefs.count(AntiDepReg) << " references"
825 << " using " << TRI->getName(NewReg) << "!\n");
Dan Gohman21d90032008-11-25 00:52:40 +0000826
Dan Gohman26255ad2009-08-12 01:33:27 +0000827 // Update the references to the old register to refer to the new
828 // register.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000829 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
830 std::multimap<unsigned, MachineOperand *>::iterator>
Dan Gohman26255ad2009-08-12 01:33:27 +0000831 Range = RegRefs.equal_range(AntiDepReg);
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000832 for (std::multimap<unsigned, MachineOperand *>::iterator
Dan Gohman26255ad2009-08-12 01:33:27 +0000833 Q = Range.first, QE = Range.second; Q != QE; ++Q)
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000834 Q->second->setReg(NewReg);
Dan Gohman21d90032008-11-25 00:52:40 +0000835
Dan Gohman26255ad2009-08-12 01:33:27 +0000836 // We just went back in time and modified history; the
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000837 // liveness information for the anti-depenence reg is now
Dan Gohman26255ad2009-08-12 01:33:27 +0000838 // inconsistent. Set the state as if it were dead.
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000839 Classes[NewReg] = Classes[AntiDepReg];
Dan Gohman26255ad2009-08-12 01:33:27 +0000840 DefIndices[NewReg] = DefIndices[AntiDepReg];
841 KillIndices[NewReg] = KillIndices[AntiDepReg];
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000842 assert(((KillIndices[NewReg] == ~0u) !=
843 (DefIndices[NewReg] == ~0u)) &&
844 "Kill and Def maps aren't consistent for NewReg!");
Dan Gohman21d90032008-11-25 00:52:40 +0000845
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000846 Classes[AntiDepReg] = 0;
Dan Gohman26255ad2009-08-12 01:33:27 +0000847 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
848 KillIndices[AntiDepReg] = ~0u;
849 assert(((KillIndices[AntiDepReg] == ~0u) !=
850 (DefIndices[AntiDepReg] == ~0u)) &&
851 "Kill and Def maps aren't consistent for AntiDepReg!");
Dan Gohman21d90032008-11-25 00:52:40 +0000852
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000853 RegRefs.erase(AntiDepReg);
Dan Gohman26255ad2009-08-12 01:33:27 +0000854 Changed = true;
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000855 LastNewReg[AntiDepReg] = NewReg;
Dan Gohman21d90032008-11-25 00:52:40 +0000856 }
857 }
858
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000859 ScanInstruction(MI, Count);
Dan Gohman21d90032008-11-25 00:52:40 +0000860 }
Dan Gohman21d90032008-11-25 00:52:40 +0000861
862 return Changed;
863}
864
David Goodwin5e411782009-09-03 22:15:25 +0000865/// StartBlockForKills - Initialize register live-range state for updating kills
866///
867void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
868 // Initialize the indices to indicate that no registers are live.
869 std::fill(KillIndices, array_endof(KillIndices), ~0u);
870
871 // Determine the live-out physregs for this block.
872 if (!BB->empty() && BB->back().getDesc().isReturn()) {
873 // In a return block, examine the function live-out regs.
874 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
875 E = MRI.liveout_end(); I != E; ++I) {
876 unsigned Reg = *I;
877 KillIndices[Reg] = BB->size();
878 // Repeat, for all subregs.
879 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
880 *Subreg; ++Subreg) {
881 KillIndices[*Subreg] = BB->size();
882 }
883 }
884 }
885 else {
886 // In a non-return block, examine the live-in regs of all successors.
887 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
888 SE = BB->succ_end(); SI != SE; ++SI) {
889 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
890 E = (*SI)->livein_end(); I != E; ++I) {
891 unsigned Reg = *I;
892 KillIndices[Reg] = BB->size();
893 // Repeat, for all subregs.
894 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
895 *Subreg; ++Subreg) {
896 KillIndices[*Subreg] = BB->size();
897 }
898 }
899 }
900 }
901}
902
David Goodwin8f909342009-09-23 16:35:25 +0000903bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
904 MachineOperand &MO) {
905 // Setting kill flag...
906 if (!MO.isKill()) {
907 MO.setIsKill(true);
908 return false;
909 }
910
911 // If MO itself is live, clear the kill flag...
912 if (KillIndices[MO.getReg()] != ~0u) {
913 MO.setIsKill(false);
914 return false;
915 }
916
917 // If any subreg of MO is live, then create an imp-def for that
918 // subreg and keep MO marked as killed.
Benjamin Kramer8bff4af2009-10-02 15:59:52 +0000919 MO.setIsKill(false);
David Goodwin8f909342009-09-23 16:35:25 +0000920 bool AllDead = true;
921 const unsigned SuperReg = MO.getReg();
922 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
923 *Subreg; ++Subreg) {
924 if (KillIndices[*Subreg] != ~0u) {
925 MI->addOperand(MachineOperand::CreateReg(*Subreg,
926 true /*IsDef*/,
927 true /*IsImp*/,
928 false /*IsKill*/,
929 false /*IsDead*/));
930 AllDead = false;
931 }
932 }
933
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000934 if(AllDead)
Benjamin Kramer8bff4af2009-10-02 15:59:52 +0000935 MO.setIsKill(true);
David Goodwin8f909342009-09-23 16:35:25 +0000936 return false;
937}
938
David Goodwin88a589c2009-08-25 17:03:05 +0000939/// FixupKills - Fix the register kill flags, they may have been made
940/// incorrect by instruction reordering.
941///
942void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
943 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
944
945 std::set<unsigned> killedRegs;
946 BitVector ReservedRegs = TRI->getReservedRegs(MF);
David Goodwin5e411782009-09-03 22:15:25 +0000947
948 StartBlockForKills(MBB);
David Goodwin7886cd82009-08-29 00:11:13 +0000949
950 // Examine block from end to start...
David Goodwin88a589c2009-08-25 17:03:05 +0000951 unsigned Count = MBB->size();
952 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
953 I != E; --Count) {
954 MachineInstr *MI = --I;
955
David Goodwin7886cd82009-08-29 00:11:13 +0000956 // Update liveness. Registers that are defed but not used in this
957 // instruction are now dead. Mark register and all subregs as they
958 // are completely defined.
959 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
960 MachineOperand &MO = MI->getOperand(i);
961 if (!MO.isReg()) continue;
962 unsigned Reg = MO.getReg();
963 if (Reg == 0) continue;
964 if (!MO.isDef()) continue;
965 // Ignore two-addr defs.
966 if (MI->isRegTiedToUseOperand(i)) continue;
967
David Goodwin7886cd82009-08-29 00:11:13 +0000968 KillIndices[Reg] = ~0u;
969
970 // Repeat for all subregs.
971 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
972 *Subreg; ++Subreg) {
973 KillIndices[*Subreg] = ~0u;
974 }
975 }
David Goodwin88a589c2009-08-25 17:03:05 +0000976
David Goodwin8f909342009-09-23 16:35:25 +0000977 // Examine all used registers and set/clear kill flag. When a
978 // register is used multiple times we only set the kill flag on
979 // the first use.
David Goodwin88a589c2009-08-25 17:03:05 +0000980 killedRegs.clear();
981 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
982 MachineOperand &MO = MI->getOperand(i);
983 if (!MO.isReg() || !MO.isUse()) continue;
984 unsigned Reg = MO.getReg();
985 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
986
David Goodwin7886cd82009-08-29 00:11:13 +0000987 bool kill = false;
988 if (killedRegs.find(Reg) == killedRegs.end()) {
989 kill = true;
990 // A register is not killed if any subregs are live...
991 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
992 *Subreg; ++Subreg) {
993 if (KillIndices[*Subreg] != ~0u) {
994 kill = false;
995 break;
996 }
997 }
998
999 // If subreg is not live, then register is killed if it became
1000 // live in this instruction
1001 if (kill)
1002 kill = (KillIndices[Reg] == ~0u);
1003 }
1004
David Goodwin88a589c2009-08-25 17:03:05 +00001005 if (MO.isKill() != kill) {
David Goodwin8f909342009-09-23 16:35:25 +00001006 bool removed = ToggleKillFlag(MI, MO);
1007 if (removed) {
1008 DEBUG(errs() << "Fixed <removed> in ");
1009 } else {
1010 DEBUG(errs() << "Fixed " << MO << " in ");
1011 }
David Goodwin88a589c2009-08-25 17:03:05 +00001012 DEBUG(MI->dump());
1013 }
David Goodwin7886cd82009-08-29 00:11:13 +00001014
David Goodwin88a589c2009-08-25 17:03:05 +00001015 killedRegs.insert(Reg);
1016 }
David Goodwin7886cd82009-08-29 00:11:13 +00001017
David Goodwina3251db2009-08-31 20:47:02 +00001018 // Mark any used register (that is not using undef) and subregs as
1019 // now live...
David Goodwin7886cd82009-08-29 00:11:13 +00001020 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1021 MachineOperand &MO = MI->getOperand(i);
David Goodwina3251db2009-08-31 20:47:02 +00001022 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
David Goodwin7886cd82009-08-29 00:11:13 +00001023 unsigned Reg = MO.getReg();
1024 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1025
David Goodwin7886cd82009-08-29 00:11:13 +00001026 KillIndices[Reg] = Count;
1027
1028 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1029 *Subreg; ++Subreg) {
1030 KillIndices[*Subreg] = Count;
1031 }
1032 }
David Goodwin88a589c2009-08-25 17:03:05 +00001033 }
1034}
1035
Dan Gohman343f0c02008-11-19 23:18:57 +00001036//===----------------------------------------------------------------------===//
1037// Top-Down Scheduling
1038//===----------------------------------------------------------------------===//
1039
1040/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1041/// the PendingQueue if the count reaches zero. Also update its cycle bound.
Dan Gohman54e4c362008-12-09 22:54:47 +00001042void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1043 SUnit *SuccSU = SuccEdge->getSUnit();
Reid Klecknerc277ab02009-09-30 20:15:38 +00001044
Dan Gohman343f0c02008-11-19 23:18:57 +00001045#ifndef NDEBUG
Reid Klecknerc277ab02009-09-30 20:15:38 +00001046 if (SuccSU->NumPredsLeft == 0) {
Chris Lattner103289e2009-08-23 07:19:13 +00001047 errs() << "*** Scheduling failed! ***\n";
Dan Gohman343f0c02008-11-19 23:18:57 +00001048 SuccSU->dump(this);
Chris Lattner103289e2009-08-23 07:19:13 +00001049 errs() << " has been released too many times!\n";
Torok Edwinc23197a2009-07-14 16:55:14 +00001050 llvm_unreachable(0);
Dan Gohman343f0c02008-11-19 23:18:57 +00001051 }
1052#endif
Reid Klecknerc277ab02009-09-30 20:15:38 +00001053 --SuccSU->NumPredsLeft;
1054
Dan Gohman343f0c02008-11-19 23:18:57 +00001055 // Compute how many cycles it will be before this actually becomes
1056 // available. This is the max of the start time of all predecessors plus
1057 // their latencies.
Dan Gohman3f237442008-12-16 03:25:46 +00001058 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
Dan Gohman343f0c02008-11-19 23:18:57 +00001059
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001060 // If all the node's predecessors are scheduled, this node is ready
1061 // to be scheduled. Ignore the special ExitSU node.
1062 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
Dan Gohman343f0c02008-11-19 23:18:57 +00001063 PendingQueue.push_back(SuccSU);
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001064}
1065
1066/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1067void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1068 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1069 I != E; ++I)
1070 ReleaseSucc(SU, &*I);
Dan Gohman343f0c02008-11-19 23:18:57 +00001071}
1072
1073/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1074/// count of its successors. If a successor pending count is zero, add it to
1075/// the Available queue.
1076void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
David Goodwin3a5f0d42009-08-11 01:44:26 +00001077 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
Dan Gohman343f0c02008-11-19 23:18:57 +00001078 DEBUG(SU->dump(this));
1079
1080 Sequence.push_back(SU);
Dan Gohman3f237442008-12-16 03:25:46 +00001081 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1082 SU->setDepthToAtLeast(CurCycle);
Dan Gohman343f0c02008-11-19 23:18:57 +00001083
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001084 ReleaseSuccessors(SU);
Dan Gohman343f0c02008-11-19 23:18:57 +00001085 SU->isScheduled = true;
1086 AvailableQueue.ScheduledNode(SU);
1087}
1088
1089/// ListScheduleTopDown - The main loop of list scheduling for top-down
1090/// schedulers.
1091void SchedulePostRATDList::ListScheduleTopDown() {
1092 unsigned CurCycle = 0;
1093
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001094 // Release any successors of the special Entry node.
1095 ReleaseSuccessors(&EntrySU);
1096
Dan Gohman343f0c02008-11-19 23:18:57 +00001097 // All leaves to Available queue.
1098 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1099 // It is available if it has no predecessors.
1100 if (SUnits[i].Preds.empty()) {
1101 AvailableQueue.push(&SUnits[i]);
1102 SUnits[i].isAvailable = true;
1103 }
1104 }
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001105
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001106 // In any cycle where we can't schedule any instructions, we must
1107 // stall or emit a noop, depending on the target.
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001108 bool CycleHasInsts = false;
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001109
Dan Gohman343f0c02008-11-19 23:18:57 +00001110 // While Available queue is not empty, grab the node with the highest
1111 // priority. If it is not ready put it back. Schedule the node.
Dan Gohman2836c282009-01-16 01:33:36 +00001112 std::vector<SUnit*> NotReady;
Dan Gohman343f0c02008-11-19 23:18:57 +00001113 Sequence.reserve(SUnits.size());
1114 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1115 // Check to see if any of the pending instructions are ready to issue. If
1116 // so, add them to the available queue.
Dan Gohman3f237442008-12-16 03:25:46 +00001117 unsigned MinDepth = ~0u;
Dan Gohman343f0c02008-11-19 23:18:57 +00001118 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
Dan Gohman3f237442008-12-16 03:25:46 +00001119 if (PendingQueue[i]->getDepth() <= CurCycle) {
Dan Gohman343f0c02008-11-19 23:18:57 +00001120 AvailableQueue.push(PendingQueue[i]);
1121 PendingQueue[i]->isAvailable = true;
1122 PendingQueue[i] = PendingQueue.back();
1123 PendingQueue.pop_back();
1124 --i; --e;
Dan Gohman3f237442008-12-16 03:25:46 +00001125 } else if (PendingQueue[i]->getDepth() < MinDepth)
1126 MinDepth = PendingQueue[i]->getDepth();
Dan Gohman343f0c02008-11-19 23:18:57 +00001127 }
David Goodwinc93d8372009-08-11 17:35:23 +00001128
David Goodwin7cd01182009-08-11 17:56:42 +00001129 DEBUG(errs() << "\n*** Examining Available\n";
1130 LatencyPriorityQueue q = AvailableQueue;
1131 while (!q.empty()) {
1132 SUnit *su = q.pop();
1133 errs() << "Height " << su->getHeight() << ": ";
1134 su->dump(this);
1135 });
David Goodwinc93d8372009-08-11 17:35:23 +00001136
Dan Gohman2836c282009-01-16 01:33:36 +00001137 SUnit *FoundSUnit = 0;
1138
1139 bool HasNoopHazards = false;
1140 while (!AvailableQueue.empty()) {
1141 SUnit *CurSUnit = AvailableQueue.pop();
1142
1143 ScheduleHazardRecognizer::HazardType HT =
1144 HazardRec->getHazardType(CurSUnit);
1145 if (HT == ScheduleHazardRecognizer::NoHazard) {
1146 FoundSUnit = CurSUnit;
1147 break;
1148 }
1149
1150 // Remember if this is a noop hazard.
1151 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1152
1153 NotReady.push_back(CurSUnit);
1154 }
1155
1156 // Add the nodes that aren't ready back onto the available list.
1157 if (!NotReady.empty()) {
1158 AvailableQueue.push_all(NotReady);
1159 NotReady.clear();
1160 }
1161
Dan Gohman343f0c02008-11-19 23:18:57 +00001162 // If we found a node to schedule, do it now.
1163 if (FoundSUnit) {
1164 ScheduleNodeTopDown(FoundSUnit, CurCycle);
Dan Gohman2836c282009-01-16 01:33:36 +00001165 HazardRec->EmitInstruction(FoundSUnit);
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001166 CycleHasInsts = true;
Dan Gohman343f0c02008-11-19 23:18:57 +00001167
David Goodwind94a4e52009-08-10 15:55:25 +00001168 // If we are using the target-specific hazards, then don't
1169 // advance the cycle time just because we schedule a node. If
1170 // the target allows it we can schedule multiple nodes in the
1171 // same cycle.
1172 if (!EnablePostRAHazardAvoidance) {
1173 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1174 ++CurCycle;
1175 }
Dan Gohman2836c282009-01-16 01:33:36 +00001176 } else {
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001177 if (CycleHasInsts) {
David Goodwin2ffb0ce2009-08-12 21:47:46 +00001178 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1179 HazardRec->AdvanceCycle();
1180 } else if (!HasNoopHazards) {
1181 // Otherwise, we have a pipeline stall, but no other problem,
1182 // just advance the current cycle and try again.
1183 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1184 HazardRec->AdvanceCycle();
1185 ++NumStalls;
1186 } else {
1187 // Otherwise, we have no instructions to issue and we have instructions
1188 // that will fault if we don't do this right. This is the case for
1189 // processors without pipeline interlocks and other cases.
1190 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1191 HazardRec->EmitNoop();
1192 Sequence.push_back(0); // NULL here means noop
1193 ++NumNoops;
1194 }
1195
Dan Gohman2836c282009-01-16 01:33:36 +00001196 ++CurCycle;
Benjamin Kramerbe441c02009-09-06 12:10:17 +00001197 CycleHasInsts = false;
Dan Gohman343f0c02008-11-19 23:18:57 +00001198 }
1199 }
1200
1201#ifndef NDEBUG
Dan Gohmana1e6d362008-11-20 01:26:25 +00001202 VerifySchedule(/*isBottomUp=*/false);
Dan Gohman343f0c02008-11-19 23:18:57 +00001203#endif
1204}
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001205
1206//===----------------------------------------------------------------------===//
1207// Public Constructor Functions
1208//===----------------------------------------------------------------------===//
1209
Evan Chengfa163542009-10-16 21:06:15 +00001210FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1211 return new PostRAScheduler(OptLevel);
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00001212}