blob: a035c180f8f2c7277c0cc90e748093482dc51146 [file] [log] [blame]
Dale Johannesen72f15962007-07-13 17:31:29 +00001//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "llvm/CodeGen/Passes.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000023#include "llvm/CodeGen/ScheduleDAGInstrs.h"
24#include "llvm/CodeGen/LatencyPriorityQueue.h"
25#include "llvm/CodeGen/SchedulerRegistry.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000026#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohman21d90032008-11-25 00:52:40 +000027#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetRegisterInfo.h"
Chris Lattner459525d2008-01-14 19:00:06 +000030#include "llvm/Support/Compiler.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000031#include "llvm/Support/Debug.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000032#include "llvm/ADT/Statistic.h"
Dan Gohman21d90032008-11-25 00:52:40 +000033#include "llvm/ADT/DenseSet.h"
Dan Gohmancef874a2008-12-03 23:07:27 +000034#include "llvm/ADT/SmallVector.h"
Dan Gohman21d90032008-11-25 00:52:40 +000035#include <map>
36#include <climits>
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000037using namespace llvm;
38
Dan Gohman343f0c02008-11-19 23:18:57 +000039STATISTIC(NumStalls, "Number of pipeline stalls");
40
Dan Gohman21d90032008-11-25 00:52:40 +000041static cl::opt<bool>
42EnableAntiDepBreaking("break-anti-dependencies",
43 cl::desc("Break scheduling anti-dependencies"),
44 cl::init(false));
45
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000046namespace {
Dan Gohman343f0c02008-11-19 23:18:57 +000047 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000048 public:
49 static char ID;
Dan Gohman343f0c02008-11-19 23:18:57 +000050 PostRAScheduler() : MachineFunctionPass(&ID) {}
Dan Gohman21d90032008-11-25 00:52:40 +000051
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000052 const char *getPassName() const {
Dan Gohman21d90032008-11-25 00:52:40 +000053 return "Post RA top-down list latency scheduler";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000054 }
55
56 bool runOnMachineFunction(MachineFunction &Fn);
57 };
Dan Gohman343f0c02008-11-19 23:18:57 +000058 char PostRAScheduler::ID = 0;
59
60 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
Dan Gohman343f0c02008-11-19 23:18:57 +000061 /// AvailableQueue - The priority queue to use for the available SUnits.
62 ///
63 LatencyPriorityQueue AvailableQueue;
64
65 /// PendingQueue - This contains all of the instructions whose operands have
66 /// been issued, but their results are not ready yet (due to the latency of
67 /// the operation). Once the operands becomes available, the instruction is
68 /// added to the AvailableQueue.
69 std::vector<SUnit*> PendingQueue;
70
Dan Gohman21d90032008-11-25 00:52:40 +000071 /// Topo - A topological ordering for SUnits.
72 ScheduleDAGTopologicalSort Topo;
Dan Gohman343f0c02008-11-19 23:18:57 +000073
Dan Gohman21d90032008-11-25 00:52:40 +000074 public:
75 SchedulePostRATDList(MachineBasicBlock *mbb, const TargetMachine &tm)
76 : ScheduleDAGInstrs(mbb, tm), Topo(SUnits) {}
Dan Gohman343f0c02008-11-19 23:18:57 +000077
78 void Schedule();
79
80 private:
81 void ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain);
82 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
83 void ListScheduleTopDown();
Dan Gohman21d90032008-11-25 00:52:40 +000084 bool BreakAntiDependencies();
Dan Gohman343f0c02008-11-19 23:18:57 +000085 };
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000086}
87
Dan Gohman343f0c02008-11-19 23:18:57 +000088bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
89 DOUT << "PostRAScheduler\n";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000090
91 // Loop over all of the basic blocks
92 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
Dan Gohman343f0c02008-11-19 23:18:57 +000093 MBB != MBBe; ++MBB) {
94
Dan Gohman21d90032008-11-25 00:52:40 +000095 SchedulePostRATDList Scheduler(MBB, Fn.getTarget());
Dan Gohman343f0c02008-11-19 23:18:57 +000096
97 Scheduler.Run();
98
99 Scheduler.EmitSchedule();
100 }
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000101
102 return true;
103}
104
Dan Gohman343f0c02008-11-19 23:18:57 +0000105/// Schedule - Schedule the DAG using list scheduling.
106void SchedulePostRATDList::Schedule() {
107 DOUT << "********** List Scheduling **********\n";
108
109 // Build scheduling units.
110 BuildSchedUnits();
111
Dan Gohman21d90032008-11-25 00:52:40 +0000112 if (EnableAntiDepBreaking) {
113 if (BreakAntiDependencies()) {
114 // We made changes. Update the dependency graph.
115 // Theoretically we could update the graph in place:
116 // When a live range is changed to use a different register, remove
117 // the def's anti-dependence *and* output-dependence edges due to
118 // that register, and add new anti-dependence and output-dependence
119 // edges based on the next live range of the register.
120 SUnits.clear();
121 BuildSchedUnits();
122 }
123 }
124
Dan Gohman343f0c02008-11-19 23:18:57 +0000125 AvailableQueue.initNodes(SUnits);
Dan Gohman21d90032008-11-25 00:52:40 +0000126
Dan Gohman343f0c02008-11-19 23:18:57 +0000127 ListScheduleTopDown();
128
129 AvailableQueue.releaseState();
130}
131
Dan Gohman21d90032008-11-25 00:52:40 +0000132/// getInstrOperandRegClass - Return register class of the operand of an
133/// instruction of the specified TargetInstrDesc.
134static const TargetRegisterClass*
135getInstrOperandRegClass(const TargetRegisterInfo *TRI,
136 const TargetInstrInfo *TII, const TargetInstrDesc &II,
137 unsigned Op) {
138 if (Op >= II.getNumOperands())
139 return NULL;
140 if (II.OpInfo[Op].isLookupPtrRegClass())
141 return TII->getPointerRegClass();
142 return TRI->getRegClass(II.OpInfo[Op].RegClass);
143}
144
145/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
146/// of the ScheduleDAG and break them by renaming registers.
147///
148bool SchedulePostRATDList::BreakAntiDependencies() {
149 // The code below assumes that there is at least one instruction,
150 // so just duck out immediately if the block is empty.
151 if (BB->empty()) return false;
152
153 Topo.InitDAGTopologicalSorting();
154
155 // Compute a critical path for the DAG.
156 SUnit *Max = 0;
157 std::vector<SDep *> CriticalPath(SUnits.size());
158 for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(),
159 E = Topo.end(); I != E; ++I) {
160 SUnit *SU = &SUnits[*I];
161 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
162 P != PE; ++P) {
163 SUnit *PredSU = P->Dep;
Dan Gohmane5617512008-12-03 19:37:34 +0000164 // This assumes that there's no delay for reusing registers.
165 unsigned PredLatency = (P->isCtrl && P->Reg != 0) ? 1 : PredSU->Latency;
166 unsigned PredTotalLatency = PredSU->CycleBound + PredLatency;
167 if (SU->CycleBound < PredTotalLatency ||
168 (SU->CycleBound == PredTotalLatency && !P->isAntiDep)) {
169 SU->CycleBound = PredTotalLatency;
Dan Gohman21d90032008-11-25 00:52:40 +0000170 CriticalPath[*I] = &*P;
171 }
172 }
173 // Keep track of the node at the end of the critical path.
174 if (!Max || SU->CycleBound + SU->Latency > Max->CycleBound + Max->Latency)
175 Max = SU;
176 }
177
178 DOUT << "Critical path has total latency "
179 << (Max ? Max->CycleBound + Max->Latency : 0) << "\n";
180
181 // Walk the critical path from the bottom up. Collect all anti-dependence
182 // edges on the critical path. Skip anti-dependencies between SUnits that
183 // are connected with other edges, since such units won't be able to be
184 // scheduled past each other anyway.
185 //
186 // The heuristic is that edges on the critical path are more important to
187 // break than other edges. And since there are a limited number of
188 // registers, we don't want to waste them breaking edges that aren't
189 // important.
190 //
191 // TODO: Instructions with multiple defs could have multiple
192 // anti-dependencies. The current code here only knows how to break one
193 // edge per instruction. Note that we'd have to be able to break all of
194 // the anti-dependencies in an instruction in order to be effective.
195 BitVector AllocatableSet = TRI->getAllocatableSet(*MF);
196 DenseMap<MachineInstr *, unsigned> CriticalAntiDeps;
197 for (SUnit *SU = Max; CriticalPath[SU->NodeNum];
198 SU = CriticalPath[SU->NodeNum]->Dep) {
199 SDep *Edge = CriticalPath[SU->NodeNum];
200 SUnit *NextSU = Edge->Dep;
201 unsigned AntiDepReg = Edge->Reg;
Dan Gohman0dba0e52008-12-03 19:32:26 +0000202 // Only consider anti-dependence edges.
203 if (!Edge->isAntiDep)
204 continue;
205 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
Dan Gohman21d90032008-11-25 00:52:40 +0000206 // Don't break anti-dependencies on non-allocatable registers.
207 if (!AllocatableSet.test(AntiDepReg))
208 continue;
209 // If the SUnit has other dependencies on the SUnit that it
210 // anti-depends on, don't bother breaking the anti-dependency.
211 // Also, if there are dependencies on other SUnits with the
212 // same register as the anti-dependency, don't attempt to
213 // break it.
214 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
215 P != PE; ++P)
216 if (P->Dep == NextSU ?
217 (!P->isAntiDep || P->Reg != AntiDepReg) :
218 (!P->isCtrl && !P->isAntiDep && P->Reg == AntiDepReg)) {
219 AntiDepReg = 0;
220 break;
221 }
222 if (AntiDepReg != 0)
223 CriticalAntiDeps[SU->getInstr()] = AntiDepReg;
224 }
225
226 // For live regs that are only used in one register class in a live range,
Dan Gohmane96cc772008-12-03 19:38:38 +0000227 // the register class. If the register is not live, the corresponding value
228 // is null. If the register is live but used in multiple register classes,
229 // the corresponding value is -1 casted to a pointer.
Dan Gohman21d90032008-11-25 00:52:40 +0000230 const TargetRegisterClass *
231 Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
232
233 // Map registers to all their references within a live range.
234 std::multimap<unsigned, MachineOperand *> RegRefs;
235
236 // The index of the most recent kill (proceding bottom-up), or -1 if
237 // the register is not live.
238 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
239 std::fill(KillIndices, array_endof(KillIndices), -1);
240 // The index of the most recent def (proceding bottom up), or -1 if
241 // the register is live.
242 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
243 std::fill(DefIndices, array_endof(DefIndices), BB->size());
244
245 // Determine the live-out physregs for this block.
246 if (!BB->empty() && BB->back().getDesc().isReturn())
247 // In a return block, examine the function live-out regs.
248 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
249 E = MRI.liveout_end(); I != E; ++I) {
250 unsigned Reg = *I;
251 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
252 KillIndices[Reg] = BB->size();
253 DefIndices[Reg] = -1;
254 // Repeat, for all aliases.
255 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
256 unsigned AliasReg = *Alias;
257 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
258 KillIndices[AliasReg] = BB->size();
259 DefIndices[AliasReg] = -1;
260 }
261 }
262 else
263 // In a non-return block, examine the live-in regs of all successors.
264 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
265 SE = BB->succ_end(); SI != SE; ++SI)
266 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
267 E = (*SI)->livein_end(); I != E; ++I) {
268 unsigned Reg = *I;
269 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
270 KillIndices[Reg] = BB->size();
271 DefIndices[Reg] = -1;
272 // Repeat, for all aliases.
273 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
274 unsigned AliasReg = *Alias;
275 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
276 KillIndices[AliasReg] = BB->size();
277 DefIndices[AliasReg] = -1;
278 }
279 }
280
281 // Consider callee-saved registers as live-out, since we're running after
282 // prologue/epilogue insertion so there's no way to add additional
283 // saved registers.
284 //
285 // TODO: If the callee saves and restores these, then we can potentially
286 // use them between the save and the restore. To do that, we could scan
287 // the exit blocks to see which of these registers are defined.
Dan Gohmanebb0a312008-12-03 19:30:13 +0000288 // Alternatively, calle-saved registers that aren't saved and restored
289 // could be marked live-in in every block.
Dan Gohman21d90032008-11-25 00:52:40 +0000290 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
291 unsigned Reg = *I;
292 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
293 KillIndices[Reg] = BB->size();
294 DefIndices[Reg] = -1;
295 // Repeat, for all aliases.
296 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
297 unsigned AliasReg = *Alias;
298 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
299 KillIndices[AliasReg] = BB->size();
300 DefIndices[AliasReg] = -1;
301 }
302 }
303
304 // Consider this pattern:
305 // A = ...
306 // ... = A
307 // A = ...
308 // ... = A
309 // A = ...
310 // ... = A
311 // A = ...
312 // ... = A
313 // There are three anti-dependencies here, and without special care,
314 // we'd break all of them using the same register:
315 // A = ...
316 // ... = A
317 // B = ...
318 // ... = B
319 // B = ...
320 // ... = B
321 // B = ...
322 // ... = B
323 // because at each anti-dependence, B is the first register that
324 // isn't A which is free. This re-introduces anti-dependencies
325 // at all but one of the original anti-dependencies that we were
326 // trying to break. To avoid this, keep track of the most recent
327 // register that each register was replaced with, avoid avoid
328 // using it to repair an anti-dependence on the same register.
329 // This lets us produce this:
330 // A = ...
331 // ... = A
332 // B = ...
333 // ... = B
334 // C = ...
335 // ... = C
336 // B = ...
337 // ... = B
338 // This still has an anti-dependence on B, but at least it isn't on the
339 // original critical path.
340 //
341 // TODO: If we tracked more than one register here, we could potentially
342 // fix that remaining critical edge too. This is a little more involved,
343 // because unlike the most recent register, less recent registers should
344 // still be considered, though only if no other registers are available.
345 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
346
Dan Gohman21d90032008-11-25 00:52:40 +0000347 // Attempt to break anti-dependence edges on the critical path. Walk the
348 // instructions from the bottom up, tracking information about liveness
349 // as we go to help determine which registers are available.
350 bool Changed = false;
351 unsigned Count = BB->size() - 1;
352 for (MachineBasicBlock::reverse_iterator I = BB->rbegin(), E = BB->rend();
353 I != E; ++I, --Count) {
354 MachineInstr *MI = &*I;
355
356 // Check if this instruction has an anti-dependence that we're
357 // interested in.
358 DenseMap<MachineInstr *, unsigned>::iterator C = CriticalAntiDeps.find(MI);
359 unsigned AntiDepReg = C != CriticalAntiDeps.end() ?
360 C->second : 0;
361
362 // Scan the register operands for this instruction and update
363 // Classes and RegRefs.
364 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
365 MachineOperand &MO = MI->getOperand(i);
366 if (!MO.isReg()) continue;
367 unsigned Reg = MO.getReg();
368 if (Reg == 0) continue;
369 const TargetRegisterClass *NewRC =
370 getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
371
372 // If this instruction has a use of AntiDepReg, breaking it
373 // is invalid.
374 if (MO.isUse() && AntiDepReg == Reg)
375 AntiDepReg = 0;
376
377 // For now, only allow the register to be changed if its register
378 // class is consistent across all uses.
379 if (!Classes[Reg] && NewRC)
380 Classes[Reg] = NewRC;
381 else if (!NewRC || Classes[Reg] != NewRC)
382 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
383
384 // Now check for aliases.
385 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
386 // If an alias of the reg is used during the live range, give up.
387 // Note that this allows us to skip checking if AntiDepReg
388 // overlaps with any of the aliases, among other things.
389 unsigned AliasReg = *Alias;
390 if (Classes[AliasReg]) {
391 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
392 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
393 }
394 }
395
396 // If we're still willing to consider this register, note the reference.
397 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
398 RegRefs.insert(std::make_pair(Reg, &MO));
399 }
400
401 // Determine AntiDepReg's register class, if it is live and is
402 // consistently used within a single class.
403 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
Nick Lewyckya89d1022008-11-27 17:29:52 +0000404 assert((AntiDepReg == 0 || RC != NULL) &&
Dan Gohman21d90032008-11-25 00:52:40 +0000405 "Register should be live if it's causing an anti-dependence!");
406 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
407 AntiDepReg = 0;
408
409 // Look for a suitable register to use to break the anti-depenence.
410 //
411 // TODO: Instead of picking the first free register, consider which might
412 // be the best.
413 if (AntiDepReg != 0) {
414 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(*MF),
415 RE = RC->allocation_order_end(*MF); R != RE; ++R) {
416 unsigned NewReg = *R;
417 // Don't replace a register with itself.
418 if (NewReg == AntiDepReg) continue;
419 // Don't replace a register with one that was recently used to repair
420 // an anti-dependence with this AntiDepReg, because that would
421 // re-introduce that anti-dependence.
422 if (NewReg == LastNewReg[AntiDepReg]) continue;
423 // If NewReg is dead and NewReg's most recent def is not before
424 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
Dan Gohman878ef1d2008-11-25 18:53:54 +0000425 assert(((KillIndices[AntiDepReg] == -1u) != (DefIndices[AntiDepReg] == -1u)) &&
Dan Gohman21d90032008-11-25 00:52:40 +0000426 "Kill and Def maps aren't consistent for AntiDepReg!");
Dan Gohman878ef1d2008-11-25 18:53:54 +0000427 assert(((KillIndices[NewReg] == -1u) != (DefIndices[NewReg] == -1u)) &&
Dan Gohman21d90032008-11-25 00:52:40 +0000428 "Kill and Def maps aren't consistent for NewReg!");
Dan Gohman878ef1d2008-11-25 18:53:54 +0000429 if (KillIndices[NewReg] == -1u &&
Dan Gohman21d90032008-11-25 00:52:40 +0000430 KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
431 DOUT << "Breaking anti-dependence edge on reg " << AntiDepReg
Dan Gohmancef874a2008-12-03 23:07:27 +0000432 << " with " << RegRefs.count(AntiDepReg) << " references"
433 << " with new reg " << NewReg << "!\n";
Dan Gohman21d90032008-11-25 00:52:40 +0000434
435 // Update the references to the old register to refer to the new
436 // register.
437 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
438 std::multimap<unsigned, MachineOperand *>::iterator>
439 Range = RegRefs.equal_range(AntiDepReg);
440 for (std::multimap<unsigned, MachineOperand *>::iterator
441 Q = Range.first, QE = Range.second; Q != QE; ++Q)
442 Q->second->setReg(NewReg);
443
444 // We just went back in time and modified history; the
445 // liveness information for the anti-depenence reg is now
446 // inconsistent. Set the state as if it were dead.
447 Classes[NewReg] = Classes[AntiDepReg];
448 DefIndices[NewReg] = DefIndices[AntiDepReg];
449 KillIndices[NewReg] = KillIndices[AntiDepReg];
450
451 Classes[AntiDepReg] = 0;
452 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
453 KillIndices[AntiDepReg] = -1;
454
455 RegRefs.erase(AntiDepReg);
456 Changed = true;
457 LastNewReg[AntiDepReg] = NewReg;
458 break;
459 }
460 }
461 }
462
463 // Update liveness.
Dan Gohmancef874a2008-12-03 23:07:27 +0000464 // Proceding upwards, registers that are defed but not used in this
465 // instruction are now dead.
Dan Gohman21d90032008-11-25 00:52:40 +0000466 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
467 MachineOperand &MO = MI->getOperand(i);
468 if (!MO.isReg()) continue;
469 unsigned Reg = MO.getReg();
470 if (Reg == 0) continue;
Dan Gohmancef874a2008-12-03 23:07:27 +0000471 if (!MO.isDef()) continue;
472 // Ignore two-addr defs.
473 if (MI->isRegReDefinedByTwoAddr(Reg, i)) continue;
474
Dan Gohman21d90032008-11-25 00:52:40 +0000475 DefIndices[Reg] = Count;
476 KillIndices[Reg] = -1;
477 Classes[Reg] = 0;
478 RegRefs.erase(Reg);
479 // Repeat, for all subregs.
480 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
481 *Subreg; ++Subreg) {
482 unsigned SubregReg = *Subreg;
483 DefIndices[SubregReg] = Count;
484 KillIndices[SubregReg] = -1;
485 Classes[SubregReg] = 0;
486 RegRefs.erase(SubregReg);
487 }
488 }
Dan Gohmancef874a2008-12-03 23:07:27 +0000489 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
490 MachineOperand &MO = MI->getOperand(i);
491 if (!MO.isReg()) continue;
492 unsigned Reg = MO.getReg();
493 if (Reg == 0) continue;
494 if (!MO.isUse()) continue;
495
496 const TargetRegisterClass *NewRC =
497 getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
498
499 // For now, only allow the register to be changed if its register
500 // class is consistent across all uses.
501 if (!Classes[Reg] && NewRC)
502 Classes[Reg] = NewRC;
503 else if (!NewRC || Classes[Reg] != NewRC)
504 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
505
506 RegRefs.insert(std::make_pair(Reg, &MO));
507
508 // It wasn't previously live but now it is, this is a kill.
509 if (KillIndices[Reg] == -1u) {
510 KillIndices[Reg] = Count;
511 DefIndices[Reg] = -1u;
512 }
513 // Repeat, for all aliases.
514 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
515 unsigned AliasReg = *Alias;
516 if (KillIndices[AliasReg] == -1u) {
517 KillIndices[AliasReg] = Count;
518 DefIndices[AliasReg] = -1u;
519 }
520 }
521 }
Dan Gohman21d90032008-11-25 00:52:40 +0000522 }
523 assert(Count == -1u && "Count mismatch!");
524
525 return Changed;
526}
527
Dan Gohman343f0c02008-11-19 23:18:57 +0000528//===----------------------------------------------------------------------===//
529// Top-Down Scheduling
530//===----------------------------------------------------------------------===//
531
532/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
533/// the PendingQueue if the count reaches zero. Also update its cycle bound.
534void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain) {
535 --SuccSU->NumPredsLeft;
536
537#ifndef NDEBUG
538 if (SuccSU->NumPredsLeft < 0) {
539 cerr << "*** Scheduling failed! ***\n";
540 SuccSU->dump(this);
541 cerr << " has been released too many times!\n";
542 assert(0);
543 }
544#endif
545
546 // Compute how many cycles it will be before this actually becomes
547 // available. This is the max of the start time of all predecessors plus
548 // their latencies.
549 // If this is a token edge, we don't need to wait for the latency of the
550 // preceeding instruction (e.g. a long-latency load) unless there is also
551 // some other data dependence.
552 unsigned PredDoneCycle = SU->Cycle;
553 if (!isChain)
554 PredDoneCycle += SU->Latency;
555 else if (SU->Latency)
556 PredDoneCycle += 1;
557 SuccSU->CycleBound = std::max(SuccSU->CycleBound, PredDoneCycle);
558
559 if (SuccSU->NumPredsLeft == 0) {
560 PendingQueue.push_back(SuccSU);
561 }
562}
563
564/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
565/// count of its successors. If a successor pending count is zero, add it to
566/// the Available queue.
567void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
568 DOUT << "*** Scheduling [" << CurCycle << "]: ";
569 DEBUG(SU->dump(this));
570
571 Sequence.push_back(SU);
572 SU->Cycle = CurCycle;
573
574 // Top down: release successors.
575 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
576 I != E; ++I)
577 ReleaseSucc(SU, I->Dep, I->isCtrl);
578
579 SU->isScheduled = true;
580 AvailableQueue.ScheduledNode(SU);
581}
582
583/// ListScheduleTopDown - The main loop of list scheduling for top-down
584/// schedulers.
585void SchedulePostRATDList::ListScheduleTopDown() {
586 unsigned CurCycle = 0;
587
588 // All leaves to Available queue.
589 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
590 // It is available if it has no predecessors.
591 if (SUnits[i].Preds.empty()) {
592 AvailableQueue.push(&SUnits[i]);
593 SUnits[i].isAvailable = true;
594 }
595 }
596
597 // While Available queue is not empty, grab the node with the highest
598 // priority. If it is not ready put it back. Schedule the node.
599 Sequence.reserve(SUnits.size());
600 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
601 // Check to see if any of the pending instructions are ready to issue. If
602 // so, add them to the available queue.
603 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
604 if (PendingQueue[i]->CycleBound == CurCycle) {
605 AvailableQueue.push(PendingQueue[i]);
606 PendingQueue[i]->isAvailable = true;
607 PendingQueue[i] = PendingQueue.back();
608 PendingQueue.pop_back();
609 --i; --e;
610 } else {
611 assert(PendingQueue[i]->CycleBound > CurCycle && "Negative latency?");
612 }
613 }
614
Dan Gohman21d90032008-11-25 00:52:40 +0000615 // If there are no instructions available, don't try to issue anything.
Dan Gohman343f0c02008-11-19 23:18:57 +0000616 if (AvailableQueue.empty()) {
617 ++CurCycle;
618 continue;
619 }
620
621 SUnit *FoundSUnit = AvailableQueue.pop();
622
623 // If we found a node to schedule, do it now.
624 if (FoundSUnit) {
625 ScheduleNodeTopDown(FoundSUnit, CurCycle);
626
627 // If this is a pseudo-op node, we don't want to increment the current
628 // cycle.
629 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
630 ++CurCycle;
631 } else {
632 // Otherwise, we have a pipeline stall, but no other problem, just advance
633 // the current cycle and try again.
634 DOUT << "*** Advancing cycle, no work to do\n";
635 ++NumStalls;
636 ++CurCycle;
637 }
638 }
639
640#ifndef NDEBUG
Dan Gohmana1e6d362008-11-20 01:26:25 +0000641 VerifySchedule(/*isBottomUp=*/false);
Dan Gohman343f0c02008-11-19 23:18:57 +0000642#endif
643}
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000644
645//===----------------------------------------------------------------------===//
646// Public Constructor Functions
647//===----------------------------------------------------------------------===//
648
649FunctionPass *llvm::createPostRAScheduler() {
Dan Gohman343f0c02008-11-19 23:18:57 +0000650 return new PostRAScheduler();
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000651}