blob: bd9783281cc27292067352f9b38a6cf95844ac31 [file] [log] [blame]
Dale Johannesen72f15962007-07-13 17:31:29 +00001//===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dale Johannesene7e7d0d2007-07-13 17:13:54 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This implements a top-down list scheduler, using standard algorithms.
11// The basic approach uses a priority queue of available nodes to schedule.
12// One at a time, nodes are taken from the priority queue (thus in priority
13// order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
18//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "post-RA-sched"
22#include "llvm/CodeGen/Passes.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000023#include "llvm/CodeGen/ScheduleDAGInstrs.h"
24#include "llvm/CodeGen/LatencyPriorityQueue.h"
25#include "llvm/CodeGen/SchedulerRegistry.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000026#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohman21d90032008-11-25 00:52:40 +000027#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetRegisterInfo.h"
Chris Lattner459525d2008-01-14 19:00:06 +000030#include "llvm/Support/Compiler.h"
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000031#include "llvm/Support/Debug.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000032#include "llvm/ADT/Statistic.h"
Dan Gohman21d90032008-11-25 00:52:40 +000033#include "llvm/ADT/DenseSet.h"
34#include <map>
35#include <climits>
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000036using namespace llvm;
37
Dan Gohman343f0c02008-11-19 23:18:57 +000038STATISTIC(NumStalls, "Number of pipeline stalls");
39
Dan Gohman21d90032008-11-25 00:52:40 +000040static cl::opt<bool>
41EnableAntiDepBreaking("break-anti-dependencies",
42 cl::desc("Break scheduling anti-dependencies"),
43 cl::init(false));
44
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000045namespace {
Dan Gohman343f0c02008-11-19 23:18:57 +000046 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000047 public:
48 static char ID;
Dan Gohman343f0c02008-11-19 23:18:57 +000049 PostRAScheduler() : MachineFunctionPass(&ID) {}
Dan Gohman21d90032008-11-25 00:52:40 +000050
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000051 const char *getPassName() const {
Dan Gohman21d90032008-11-25 00:52:40 +000052 return "Post RA top-down list latency scheduler";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000053 }
54
55 bool runOnMachineFunction(MachineFunction &Fn);
56 };
Dan Gohman343f0c02008-11-19 23:18:57 +000057 char PostRAScheduler::ID = 0;
58
59 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
Dan Gohman343f0c02008-11-19 23:18:57 +000060 /// AvailableQueue - The priority queue to use for the available SUnits.
61 ///
62 LatencyPriorityQueue AvailableQueue;
63
64 /// PendingQueue - This contains all of the instructions whose operands have
65 /// been issued, but their results are not ready yet (due to the latency of
66 /// the operation). Once the operands becomes available, the instruction is
67 /// added to the AvailableQueue.
68 std::vector<SUnit*> PendingQueue;
69
Dan Gohman21d90032008-11-25 00:52:40 +000070 /// Topo - A topological ordering for SUnits.
71 ScheduleDAGTopologicalSort Topo;
Dan Gohman343f0c02008-11-19 23:18:57 +000072
Dan Gohman21d90032008-11-25 00:52:40 +000073 public:
74 SchedulePostRATDList(MachineBasicBlock *mbb, const TargetMachine &tm)
75 : ScheduleDAGInstrs(mbb, tm), Topo(SUnits) {}
Dan Gohman343f0c02008-11-19 23:18:57 +000076
77 void Schedule();
78
79 private:
80 void ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain);
81 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
82 void ListScheduleTopDown();
Dan Gohman21d90032008-11-25 00:52:40 +000083 bool BreakAntiDependencies();
Dan Gohman343f0c02008-11-19 23:18:57 +000084 };
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000085}
86
Dan Gohman343f0c02008-11-19 23:18:57 +000087bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
88 DOUT << "PostRAScheduler\n";
Dale Johannesene7e7d0d2007-07-13 17:13:54 +000089
90 // Loop over all of the basic blocks
91 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
Dan Gohman343f0c02008-11-19 23:18:57 +000092 MBB != MBBe; ++MBB) {
93
Dan Gohman21d90032008-11-25 00:52:40 +000094 SchedulePostRATDList Scheduler(MBB, Fn.getTarget());
Dan Gohman343f0c02008-11-19 23:18:57 +000095
96 Scheduler.Run();
97
98 Scheduler.EmitSchedule();
99 }
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000100
101 return true;
102}
103
Dan Gohman343f0c02008-11-19 23:18:57 +0000104/// Schedule - Schedule the DAG using list scheduling.
105void SchedulePostRATDList::Schedule() {
106 DOUT << "********** List Scheduling **********\n";
107
108 // Build scheduling units.
109 BuildSchedUnits();
110
Dan Gohman21d90032008-11-25 00:52:40 +0000111 if (EnableAntiDepBreaking) {
112 if (BreakAntiDependencies()) {
113 // We made changes. Update the dependency graph.
114 // Theoretically we could update the graph in place:
115 // When a live range is changed to use a different register, remove
116 // the def's anti-dependence *and* output-dependence edges due to
117 // that register, and add new anti-dependence and output-dependence
118 // edges based on the next live range of the register.
119 SUnits.clear();
120 BuildSchedUnits();
121 }
122 }
123
Dan Gohman343f0c02008-11-19 23:18:57 +0000124 AvailableQueue.initNodes(SUnits);
Dan Gohman21d90032008-11-25 00:52:40 +0000125
Dan Gohman343f0c02008-11-19 23:18:57 +0000126 ListScheduleTopDown();
127
128 AvailableQueue.releaseState();
129}
130
Dan Gohman21d90032008-11-25 00:52:40 +0000131/// getInstrOperandRegClass - Return register class of the operand of an
132/// instruction of the specified TargetInstrDesc.
133static const TargetRegisterClass*
134getInstrOperandRegClass(const TargetRegisterInfo *TRI,
135 const TargetInstrInfo *TII, const TargetInstrDesc &II,
136 unsigned Op) {
137 if (Op >= II.getNumOperands())
138 return NULL;
139 if (II.OpInfo[Op].isLookupPtrRegClass())
140 return TII->getPointerRegClass();
141 return TRI->getRegClass(II.OpInfo[Op].RegClass);
142}
143
144/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
145/// of the ScheduleDAG and break them by renaming registers.
146///
147bool SchedulePostRATDList::BreakAntiDependencies() {
148 // The code below assumes that there is at least one instruction,
149 // so just duck out immediately if the block is empty.
150 if (BB->empty()) return false;
151
152 Topo.InitDAGTopologicalSorting();
153
154 // Compute a critical path for the DAG.
155 SUnit *Max = 0;
156 std::vector<SDep *> CriticalPath(SUnits.size());
157 for (ScheduleDAGTopologicalSort::const_iterator I = Topo.begin(),
158 E = Topo.end(); I != E; ++I) {
159 SUnit *SU = &SUnits[*I];
160 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
161 P != PE; ++P) {
162 SUnit *PredSU = P->Dep;
163 unsigned PredLatency = PredSU->CycleBound + PredSU->Latency;
164 if (SU->CycleBound < PredLatency) {
165 SU->CycleBound = PredLatency;
166 CriticalPath[*I] = &*P;
167 }
168 }
169 // Keep track of the node at the end of the critical path.
170 if (!Max || SU->CycleBound + SU->Latency > Max->CycleBound + Max->Latency)
171 Max = SU;
172 }
173
174 DOUT << "Critical path has total latency "
175 << (Max ? Max->CycleBound + Max->Latency : 0) << "\n";
176
177 // Walk the critical path from the bottom up. Collect all anti-dependence
178 // edges on the critical path. Skip anti-dependencies between SUnits that
179 // are connected with other edges, since such units won't be able to be
180 // scheduled past each other anyway.
181 //
182 // The heuristic is that edges on the critical path are more important to
183 // break than other edges. And since there are a limited number of
184 // registers, we don't want to waste them breaking edges that aren't
185 // important.
186 //
187 // TODO: Instructions with multiple defs could have multiple
188 // anti-dependencies. The current code here only knows how to break one
189 // edge per instruction. Note that we'd have to be able to break all of
190 // the anti-dependencies in an instruction in order to be effective.
191 BitVector AllocatableSet = TRI->getAllocatableSet(*MF);
192 DenseMap<MachineInstr *, unsigned> CriticalAntiDeps;
193 for (SUnit *SU = Max; CriticalPath[SU->NodeNum];
194 SU = CriticalPath[SU->NodeNum]->Dep) {
195 SDep *Edge = CriticalPath[SU->NodeNum];
196 SUnit *NextSU = Edge->Dep;
197 unsigned AntiDepReg = Edge->Reg;
198 // Don't break anti-dependencies on non-allocatable registers.
199 if (!AllocatableSet.test(AntiDepReg))
200 continue;
201 // If the SUnit has other dependencies on the SUnit that it
202 // anti-depends on, don't bother breaking the anti-dependency.
203 // Also, if there are dependencies on other SUnits with the
204 // same register as the anti-dependency, don't attempt to
205 // break it.
206 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
207 P != PE; ++P)
208 if (P->Dep == NextSU ?
209 (!P->isAntiDep || P->Reg != AntiDepReg) :
210 (!P->isCtrl && !P->isAntiDep && P->Reg == AntiDepReg)) {
211 AntiDepReg = 0;
212 break;
213 }
214 if (AntiDepReg != 0)
215 CriticalAntiDeps[SU->getInstr()] = AntiDepReg;
216 }
217
218 // For live regs that are only used in one register class in a live range,
219 // the register class. If the register is not live or is referenced in
220 // multiple register classes, the corresponding value is null. If the
221 // register is used in multiple register classes, the corresponding value
222 // is -1 casted to a pointer.
223 const TargetRegisterClass *
224 Classes[TargetRegisterInfo::FirstVirtualRegister] = {};
225
226 // Map registers to all their references within a live range.
227 std::multimap<unsigned, MachineOperand *> RegRefs;
228
229 // The index of the most recent kill (proceding bottom-up), or -1 if
230 // the register is not live.
231 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
232 std::fill(KillIndices, array_endof(KillIndices), -1);
233 // The index of the most recent def (proceding bottom up), or -1 if
234 // the register is live.
235 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
236 std::fill(DefIndices, array_endof(DefIndices), BB->size());
237
238 // Determine the live-out physregs for this block.
239 if (!BB->empty() && BB->back().getDesc().isReturn())
240 // In a return block, examine the function live-out regs.
241 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
242 E = MRI.liveout_end(); I != E; ++I) {
243 unsigned Reg = *I;
244 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
245 KillIndices[Reg] = BB->size();
246 DefIndices[Reg] = -1;
247 // Repeat, for all aliases.
248 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
249 unsigned AliasReg = *Alias;
250 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
251 KillIndices[AliasReg] = BB->size();
252 DefIndices[AliasReg] = -1;
253 }
254 }
255 else
256 // In a non-return block, examine the live-in regs of all successors.
257 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
258 SE = BB->succ_end(); SI != SE; ++SI)
259 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
260 E = (*SI)->livein_end(); I != E; ++I) {
261 unsigned Reg = *I;
262 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
263 KillIndices[Reg] = BB->size();
264 DefIndices[Reg] = -1;
265 // Repeat, for all aliases.
266 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
267 unsigned AliasReg = *Alias;
268 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
269 KillIndices[AliasReg] = BB->size();
270 DefIndices[AliasReg] = -1;
271 }
272 }
273
274 // Consider callee-saved registers as live-out, since we're running after
275 // prologue/epilogue insertion so there's no way to add additional
276 // saved registers.
277 //
278 // TODO: If the callee saves and restores these, then we can potentially
279 // use them between the save and the restore. To do that, we could scan
280 // the exit blocks to see which of these registers are defined.
Dan Gohmanebb0a312008-12-03 19:30:13 +0000281 // Alternatively, calle-saved registers that aren't saved and restored
282 // could be marked live-in in every block.
Dan Gohman21d90032008-11-25 00:52:40 +0000283 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
284 unsigned Reg = *I;
285 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
286 KillIndices[Reg] = BB->size();
287 DefIndices[Reg] = -1;
288 // Repeat, for all aliases.
289 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
290 unsigned AliasReg = *Alias;
291 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
292 KillIndices[AliasReg] = BB->size();
293 DefIndices[AliasReg] = -1;
294 }
295 }
296
297 // Consider this pattern:
298 // A = ...
299 // ... = A
300 // A = ...
301 // ... = A
302 // A = ...
303 // ... = A
304 // A = ...
305 // ... = A
306 // There are three anti-dependencies here, and without special care,
307 // we'd break all of them using the same register:
308 // A = ...
309 // ... = A
310 // B = ...
311 // ... = B
312 // B = ...
313 // ... = B
314 // B = ...
315 // ... = B
316 // because at each anti-dependence, B is the first register that
317 // isn't A which is free. This re-introduces anti-dependencies
318 // at all but one of the original anti-dependencies that we were
319 // trying to break. To avoid this, keep track of the most recent
320 // register that each register was replaced with, avoid avoid
321 // using it to repair an anti-dependence on the same register.
322 // This lets us produce this:
323 // A = ...
324 // ... = A
325 // B = ...
326 // ... = B
327 // C = ...
328 // ... = C
329 // B = ...
330 // ... = B
331 // This still has an anti-dependence on B, but at least it isn't on the
332 // original critical path.
333 //
334 // TODO: If we tracked more than one register here, we could potentially
335 // fix that remaining critical edge too. This is a little more involved,
336 // because unlike the most recent register, less recent registers should
337 // still be considered, though only if no other registers are available.
338 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
339
340 // A registers defined and not used in an instruction. This is used for
341 // liveness tracking and is declared outside the loop only to avoid
342 // having it be re-allocated on each iteration.
343 DenseSet<unsigned> Defs;
344
345 // Attempt to break anti-dependence edges on the critical path. Walk the
346 // instructions from the bottom up, tracking information about liveness
347 // as we go to help determine which registers are available.
348 bool Changed = false;
349 unsigned Count = BB->size() - 1;
350 for (MachineBasicBlock::reverse_iterator I = BB->rbegin(), E = BB->rend();
351 I != E; ++I, --Count) {
352 MachineInstr *MI = &*I;
353
354 // Check if this instruction has an anti-dependence that we're
355 // interested in.
356 DenseMap<MachineInstr *, unsigned>::iterator C = CriticalAntiDeps.find(MI);
357 unsigned AntiDepReg = C != CriticalAntiDeps.end() ?
358 C->second : 0;
359
360 // Scan the register operands for this instruction and update
361 // Classes and RegRefs.
362 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
363 MachineOperand &MO = MI->getOperand(i);
364 if (!MO.isReg()) continue;
365 unsigned Reg = MO.getReg();
366 if (Reg == 0) continue;
367 const TargetRegisterClass *NewRC =
368 getInstrOperandRegClass(TRI, TII, MI->getDesc(), i);
369
370 // If this instruction has a use of AntiDepReg, breaking it
371 // is invalid.
372 if (MO.isUse() && AntiDepReg == Reg)
373 AntiDepReg = 0;
374
375 // For now, only allow the register to be changed if its register
376 // class is consistent across all uses.
377 if (!Classes[Reg] && NewRC)
378 Classes[Reg] = NewRC;
379 else if (!NewRC || Classes[Reg] != NewRC)
380 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
381
382 // Now check for aliases.
383 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
384 // If an alias of the reg is used during the live range, give up.
385 // Note that this allows us to skip checking if AntiDepReg
386 // overlaps with any of the aliases, among other things.
387 unsigned AliasReg = *Alias;
388 if (Classes[AliasReg]) {
389 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
390 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
391 }
392 }
393
394 // If we're still willing to consider this register, note the reference.
395 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
396 RegRefs.insert(std::make_pair(Reg, &MO));
397 }
398
399 // Determine AntiDepReg's register class, if it is live and is
400 // consistently used within a single class.
401 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
Nick Lewyckya89d1022008-11-27 17:29:52 +0000402 assert((AntiDepReg == 0 || RC != NULL) &&
Dan Gohman21d90032008-11-25 00:52:40 +0000403 "Register should be live if it's causing an anti-dependence!");
404 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
405 AntiDepReg = 0;
406
407 // Look for a suitable register to use to break the anti-depenence.
408 //
409 // TODO: Instead of picking the first free register, consider which might
410 // be the best.
411 if (AntiDepReg != 0) {
412 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(*MF),
413 RE = RC->allocation_order_end(*MF); R != RE; ++R) {
414 unsigned NewReg = *R;
415 // Don't replace a register with itself.
416 if (NewReg == AntiDepReg) continue;
417 // Don't replace a register with one that was recently used to repair
418 // an anti-dependence with this AntiDepReg, because that would
419 // re-introduce that anti-dependence.
420 if (NewReg == LastNewReg[AntiDepReg]) continue;
421 // If NewReg is dead and NewReg's most recent def is not before
422 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
Dan Gohman878ef1d2008-11-25 18:53:54 +0000423 assert(((KillIndices[AntiDepReg] == -1u) != (DefIndices[AntiDepReg] == -1u)) &&
Dan Gohman21d90032008-11-25 00:52:40 +0000424 "Kill and Def maps aren't consistent for AntiDepReg!");
Dan Gohman878ef1d2008-11-25 18:53:54 +0000425 assert(((KillIndices[NewReg] == -1u) != (DefIndices[NewReg] == -1u)) &&
Dan Gohman21d90032008-11-25 00:52:40 +0000426 "Kill and Def maps aren't consistent for NewReg!");
Dan Gohman878ef1d2008-11-25 18:53:54 +0000427 if (KillIndices[NewReg] == -1u &&
Dan Gohman21d90032008-11-25 00:52:40 +0000428 KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
429 DOUT << "Breaking anti-dependence edge on reg " << AntiDepReg
430 << " with reg " << NewReg << "!\n";
431
432 // Update the references to the old register to refer to the new
433 // register.
434 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
435 std::multimap<unsigned, MachineOperand *>::iterator>
436 Range = RegRefs.equal_range(AntiDepReg);
437 for (std::multimap<unsigned, MachineOperand *>::iterator
438 Q = Range.first, QE = Range.second; Q != QE; ++Q)
439 Q->second->setReg(NewReg);
440
441 // We just went back in time and modified history; the
442 // liveness information for the anti-depenence reg is now
443 // inconsistent. Set the state as if it were dead.
444 Classes[NewReg] = Classes[AntiDepReg];
445 DefIndices[NewReg] = DefIndices[AntiDepReg];
446 KillIndices[NewReg] = KillIndices[AntiDepReg];
447
448 Classes[AntiDepReg] = 0;
449 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
450 KillIndices[AntiDepReg] = -1;
451
452 RegRefs.erase(AntiDepReg);
453 Changed = true;
454 LastNewReg[AntiDepReg] = NewReg;
455 break;
456 }
457 }
458 }
459
460 // Update liveness.
461 Defs.clear();
462 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
463 MachineOperand &MO = MI->getOperand(i);
464 if (!MO.isReg()) continue;
465 unsigned Reg = MO.getReg();
466 if (Reg == 0) continue;
467 if (MO.isDef())
468 Defs.insert(Reg);
469 else {
470 // Treat a use in the same instruction as a def as an extension of
471 // a live range.
472 Defs.erase(Reg);
473 // It wasn't previously live but now it is, this is a kill.
Dan Gohman878ef1d2008-11-25 18:53:54 +0000474 if (KillIndices[Reg] == -1u) {
Dan Gohman21d90032008-11-25 00:52:40 +0000475 KillIndices[Reg] = Count;
Dan Gohman878ef1d2008-11-25 18:53:54 +0000476 DefIndices[Reg] = -1u;
Dan Gohman21d90032008-11-25 00:52:40 +0000477 }
478 // Repeat, for all aliases.
479 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
480 unsigned AliasReg = *Alias;
481 Defs.erase(AliasReg);
Dan Gohman878ef1d2008-11-25 18:53:54 +0000482 if (KillIndices[AliasReg] == -1u) {
Dan Gohman21d90032008-11-25 00:52:40 +0000483 KillIndices[AliasReg] = Count;
Dan Gohman878ef1d2008-11-25 18:53:54 +0000484 DefIndices[AliasReg] = -1u;
Dan Gohman21d90032008-11-25 00:52:40 +0000485 }
486 }
487 }
488 }
489 // Proceding upwards, registers that are defed but not used in this
490 // instruction are now dead.
491 for (DenseSet<unsigned>::iterator D = Defs.begin(), DE = Defs.end();
492 D != DE; ++D) {
493 unsigned Reg = *D;
494 DefIndices[Reg] = Count;
495 KillIndices[Reg] = -1;
496 Classes[Reg] = 0;
497 RegRefs.erase(Reg);
498 // Repeat, for all subregs.
499 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
500 *Subreg; ++Subreg) {
501 unsigned SubregReg = *Subreg;
502 DefIndices[SubregReg] = Count;
503 KillIndices[SubregReg] = -1;
504 Classes[SubregReg] = 0;
505 RegRefs.erase(SubregReg);
506 }
507 }
508 }
509 assert(Count == -1u && "Count mismatch!");
510
511 return Changed;
512}
513
Dan Gohman343f0c02008-11-19 23:18:57 +0000514//===----------------------------------------------------------------------===//
515// Top-Down Scheduling
516//===----------------------------------------------------------------------===//
517
518/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
519/// the PendingQueue if the count reaches zero. Also update its cycle bound.
520void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain) {
521 --SuccSU->NumPredsLeft;
522
523#ifndef NDEBUG
524 if (SuccSU->NumPredsLeft < 0) {
525 cerr << "*** Scheduling failed! ***\n";
526 SuccSU->dump(this);
527 cerr << " has been released too many times!\n";
528 assert(0);
529 }
530#endif
531
532 // Compute how many cycles it will be before this actually becomes
533 // available. This is the max of the start time of all predecessors plus
534 // their latencies.
535 // If this is a token edge, we don't need to wait for the latency of the
536 // preceeding instruction (e.g. a long-latency load) unless there is also
537 // some other data dependence.
538 unsigned PredDoneCycle = SU->Cycle;
539 if (!isChain)
540 PredDoneCycle += SU->Latency;
541 else if (SU->Latency)
542 PredDoneCycle += 1;
543 SuccSU->CycleBound = std::max(SuccSU->CycleBound, PredDoneCycle);
544
545 if (SuccSU->NumPredsLeft == 0) {
546 PendingQueue.push_back(SuccSU);
547 }
548}
549
550/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
551/// count of its successors. If a successor pending count is zero, add it to
552/// the Available queue.
553void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
554 DOUT << "*** Scheduling [" << CurCycle << "]: ";
555 DEBUG(SU->dump(this));
556
557 Sequence.push_back(SU);
558 SU->Cycle = CurCycle;
559
560 // Top down: release successors.
561 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
562 I != E; ++I)
563 ReleaseSucc(SU, I->Dep, I->isCtrl);
564
565 SU->isScheduled = true;
566 AvailableQueue.ScheduledNode(SU);
567}
568
569/// ListScheduleTopDown - The main loop of list scheduling for top-down
570/// schedulers.
571void SchedulePostRATDList::ListScheduleTopDown() {
572 unsigned CurCycle = 0;
573
574 // All leaves to Available queue.
575 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
576 // It is available if it has no predecessors.
577 if (SUnits[i].Preds.empty()) {
578 AvailableQueue.push(&SUnits[i]);
579 SUnits[i].isAvailable = true;
580 }
581 }
582
583 // While Available queue is not empty, grab the node with the highest
584 // priority. If it is not ready put it back. Schedule the node.
585 Sequence.reserve(SUnits.size());
586 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
587 // Check to see if any of the pending instructions are ready to issue. If
588 // so, add them to the available queue.
589 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
590 if (PendingQueue[i]->CycleBound == CurCycle) {
591 AvailableQueue.push(PendingQueue[i]);
592 PendingQueue[i]->isAvailable = true;
593 PendingQueue[i] = PendingQueue.back();
594 PendingQueue.pop_back();
595 --i; --e;
596 } else {
597 assert(PendingQueue[i]->CycleBound > CurCycle && "Negative latency?");
598 }
599 }
600
Dan Gohman21d90032008-11-25 00:52:40 +0000601 // If there are no instructions available, don't try to issue anything.
Dan Gohman343f0c02008-11-19 23:18:57 +0000602 if (AvailableQueue.empty()) {
603 ++CurCycle;
604 continue;
605 }
606
607 SUnit *FoundSUnit = AvailableQueue.pop();
608
609 // If we found a node to schedule, do it now.
610 if (FoundSUnit) {
611 ScheduleNodeTopDown(FoundSUnit, CurCycle);
612
613 // If this is a pseudo-op node, we don't want to increment the current
614 // cycle.
615 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
616 ++CurCycle;
617 } else {
618 // Otherwise, we have a pipeline stall, but no other problem, just advance
619 // the current cycle and try again.
620 DOUT << "*** Advancing cycle, no work to do\n";
621 ++NumStalls;
622 ++CurCycle;
623 }
624 }
625
626#ifndef NDEBUG
Dan Gohmana1e6d362008-11-20 01:26:25 +0000627 VerifySchedule(/*isBottomUp=*/false);
Dan Gohman343f0c02008-11-19 23:18:57 +0000628#endif
629}
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000630
631//===----------------------------------------------------------------------===//
632// Public Constructor Functions
633//===----------------------------------------------------------------------===//
634
635FunctionPass *llvm::createPostRAScheduler() {
Dan Gohman343f0c02008-11-19 23:18:57 +0000636 return new PostRAScheduler();
Dale Johannesene7e7d0d2007-07-13 17:13:54 +0000637}