blob: 7e87b525a079223a7218258e1690ddde92a3d0b9 [file] [log] [blame]
Evan Chengab495562006-01-25 09:14:32 +00001//===---- ScheduleDAGList.cpp - Implement a list scheduler for isel DAG ---===//
Evan Cheng31272342006-01-23 08:26:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by Evan Cheng and is distributed under the
6// University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Chris Lattner01aa7522006-03-06 17:58:04 +000010// This implements bottom-up and top-down list schedulers, using standard
11// algorithms. The basic approach uses a priority queue of available nodes to
12// schedule. One at a time, nodes are taken from the priority queue (thus in
13// priority order), checked for legality to schedule, and emitted if legal.
14//
15// Nodes may not be legal to schedule either due to structural hazards (e.g.
16// pipeline or resource constraints) or because an input to the instruction has
17// not completed execution.
Evan Cheng31272342006-01-23 08:26:10 +000018//
19//===----------------------------------------------------------------------===//
20
21#define DEBUG_TYPE "sched"
22#include "llvm/CodeGen/ScheduleDAG.h"
Evan Cheng9add8802006-05-04 19:16:39 +000023#include "llvm/CodeGen/SSARegMap.h"
24#include "llvm/Target/MRegisterInfo.h"
Evan Cheng31272342006-01-23 08:26:10 +000025#include "llvm/Target/TargetMachine.h"
26#include "llvm/Target/TargetInstrInfo.h"
Evan Chengab495562006-01-25 09:14:32 +000027#include "llvm/Support/Debug.h"
Chris Lattnerfa5e1c92006-03-05 23:13:56 +000028#include "llvm/ADT/Statistic.h"
Evan Chengab495562006-01-25 09:14:32 +000029#include <climits>
30#include <iostream>
Evan Cheng31272342006-01-23 08:26:10 +000031#include <queue>
Evan Cheng4e3904f2006-03-02 21:38:29 +000032#include <set>
33#include <vector>
Chris Lattnerd4130372006-03-09 07:15:18 +000034#include "llvm/Support/CommandLine.h"
Evan Cheng31272342006-01-23 08:26:10 +000035using namespace llvm;
36
Evan Chengab495562006-01-25 09:14:32 +000037namespace {
Evan Cheng7d693892006-05-09 07:13:34 +000038 cl::opt<bool> SchedVertically("sched-vertically", cl::Hidden);
39 cl::opt<bool> SchedLowerDefNUse("sched-lower-defnuse", cl::Hidden);
Evan Cheng9add8802006-05-04 19:16:39 +000040}
41
42namespace {
Chris Lattnerfa5e1c92006-03-05 23:13:56 +000043 Statistic<> NumNoops ("scheduler", "Number of noops inserted");
44 Statistic<> NumStalls("scheduler", "Number of pipeline stalls");
Evan Cheng31272342006-01-23 08:26:10 +000045
Chris Lattner12c6d892006-03-08 04:41:06 +000046 /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or
47 /// a group of nodes flagged together.
Chris Lattneraf5e26c2006-03-08 04:37:58 +000048 struct SUnit {
49 SDNode *Node; // Representative node.
50 std::vector<SDNode*> FlaggedNodes; // All nodes flagged to Node.
Chris Lattner578d8fc2006-03-11 22:24:20 +000051
52 // Preds/Succs - The SUnits before/after us in the graph. The boolean value
53 // is true if the edge is a token chain edge, false if it is a value edge.
54 std::set<std::pair<SUnit*,bool> > Preds; // All sunit predecessors.
55 std::set<std::pair<SUnit*,bool> > Succs; // All sunit successors.
56
Chris Lattner12c6d892006-03-08 04:41:06 +000057 short NumPredsLeft; // # of preds not scheduled.
58 short NumSuccsLeft; // # of succs not scheduled.
59 short NumChainPredsLeft; // # of chain preds not scheduled.
60 short NumChainSuccsLeft; // # of chain succs not scheduled.
Chris Lattner12c6d892006-03-08 04:41:06 +000061 bool isTwoAddress : 1; // Is a two-address instruction.
62 bool isDefNUseOperand : 1; // Is a def&use operand.
Chris Lattner572003c2006-03-12 00:38:57 +000063 bool isPending : 1; // True once pending.
Chris Lattner349e9dd2006-03-10 05:51:05 +000064 bool isAvailable : 1; // True once available.
65 bool isScheduled : 1; // True once scheduled.
Chris Lattner12c6d892006-03-08 04:41:06 +000066 unsigned short Latency; // Node latency.
Chris Lattneraf5e26c2006-03-08 04:37:58 +000067 unsigned CycleBound; // Upper/lower cycle to be scheduled at.
Chris Lattner356183d2006-03-11 22:44:37 +000068 unsigned Cycle; // Once scheduled, the cycle of the op.
Chris Lattnerfd22d422006-03-08 05:18:27 +000069 unsigned NodeNum; // Entry # of node in the node vector.
Chris Lattneraf5e26c2006-03-08 04:37:58 +000070
Chris Lattnerfd22d422006-03-08 05:18:27 +000071 SUnit(SDNode *node, unsigned nodenum)
Chris Lattneraf5e26c2006-03-08 04:37:58 +000072 : Node(node), NumPredsLeft(0), NumSuccsLeft(0),
Evan Cheng9add8802006-05-04 19:16:39 +000073 NumChainPredsLeft(0), NumChainSuccsLeft(0),
74 isTwoAddress(false), isDefNUseOperand(false),
75 isPending(false), isAvailable(false), isScheduled(false),
76 Latency(0), CycleBound(0), Cycle(0), NodeNum(nodenum) {}
Chris Lattneraf5e26c2006-03-08 04:37:58 +000077
Chris Lattnerd4130372006-03-09 07:15:18 +000078 void dump(const SelectionDAG *G) const;
79 void dumpAll(const SelectionDAG *G) const;
Chris Lattneraf5e26c2006-03-08 04:37:58 +000080 };
81}
Evan Chengab495562006-01-25 09:14:32 +000082
Chris Lattnerd4130372006-03-09 07:15:18 +000083void SUnit::dump(const SelectionDAG *G) const {
Evan Chengffef8b92006-05-03 02:10:45 +000084 std::cerr << "SU(" << NodeNum << "): ";
Evan Chengab495562006-01-25 09:14:32 +000085 Node->dump(G);
86 std::cerr << "\n";
Evan Chengab495562006-01-25 09:14:32 +000087 if (FlaggedNodes.size() != 0) {
Evan Chengab495562006-01-25 09:14:32 +000088 for (unsigned i = 0, e = FlaggedNodes.size(); i != e; i++) {
Evan Chengc4c339c2006-01-26 00:30:29 +000089 std::cerr << " ";
Evan Chengab495562006-01-25 09:14:32 +000090 FlaggedNodes[i]->dump(G);
91 std::cerr << "\n";
92 }
93 }
Chris Lattnerd4130372006-03-09 07:15:18 +000094}
Evan Chengab495562006-01-25 09:14:32 +000095
Chris Lattnerd4130372006-03-09 07:15:18 +000096void SUnit::dumpAll(const SelectionDAG *G) const {
97 dump(G);
Evan Chengc4c339c2006-01-26 00:30:29 +000098
Chris Lattnerd4130372006-03-09 07:15:18 +000099 std::cerr << " # preds left : " << NumPredsLeft << "\n";
100 std::cerr << " # succs left : " << NumSuccsLeft << "\n";
101 std::cerr << " # chain preds left : " << NumChainPredsLeft << "\n";
102 std::cerr << " # chain succs left : " << NumChainSuccsLeft << "\n";
103 std::cerr << " Latency : " << Latency << "\n";
104
105 if (Preds.size() != 0) {
106 std::cerr << " Predecessors:\n";
Chris Lattner578d8fc2006-03-11 22:24:20 +0000107 for (std::set<std::pair<SUnit*,bool> >::const_iterator I = Preds.begin(),
Chris Lattnerd4130372006-03-09 07:15:18 +0000108 E = Preds.end(); I != E; ++I) {
Chris Lattner578d8fc2006-03-11 22:24:20 +0000109 if (I->second)
110 std::cerr << " ch ";
111 else
112 std::cerr << " val ";
113 I->first->dump(G);
Chris Lattnerd4130372006-03-09 07:15:18 +0000114 }
115 }
116 if (Succs.size() != 0) {
117 std::cerr << " Successors:\n";
Chris Lattner578d8fc2006-03-11 22:24:20 +0000118 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = Succs.begin(),
Chris Lattnerd4130372006-03-09 07:15:18 +0000119 E = Succs.end(); I != E; ++I) {
Chris Lattner578d8fc2006-03-11 22:24:20 +0000120 if (I->second)
121 std::cerr << " ch ";
122 else
123 std::cerr << " val ";
124 I->first->dump(G);
Chris Lattnerd4130372006-03-09 07:15:18 +0000125 }
126 }
127 std::cerr << "\n";
Evan Chengab495562006-01-25 09:14:32 +0000128}
129
Chris Lattner9df64752006-03-09 06:35:14 +0000130//===----------------------------------------------------------------------===//
Chris Lattner9e95acc2006-03-09 06:37:29 +0000131/// SchedulingPriorityQueue - This interface is used to plug different
132/// priorities computation algorithms into the list scheduler. It implements the
133/// interface of a standard priority queue, where nodes are inserted in
134/// arbitrary order and returned in priority order. The computation of the
135/// priority and the representation of the queue are totally up to the
136/// implementation to decide.
137///
138namespace {
Chris Lattner9df64752006-03-09 06:35:14 +0000139class SchedulingPriorityQueue {
140public:
141 virtual ~SchedulingPriorityQueue() {}
Chris Lattnerfd22d422006-03-08 05:18:27 +0000142
Chris Lattner9df64752006-03-09 06:35:14 +0000143 virtual void initNodes(const std::vector<SUnit> &SUnits) = 0;
144 virtual void releaseState() = 0;
Chris Lattnerfd22d422006-03-08 05:18:27 +0000145
Chris Lattner9df64752006-03-09 06:35:14 +0000146 virtual bool empty() const = 0;
147 virtual void push(SUnit *U) = 0;
Chris Lattner25e25562006-03-10 04:32:49 +0000148
149 virtual void push_all(const std::vector<SUnit *> &Nodes) = 0;
Chris Lattner9df64752006-03-09 06:35:14 +0000150 virtual SUnit *pop() = 0;
Evan Cheng9add8802006-05-04 19:16:39 +0000151
152 virtual void RemoveFromPriorityQueue(SUnit *SU) = 0;
Chris Lattner25e25562006-03-10 04:32:49 +0000153
154 /// ScheduledNode - As each node is scheduled, this method is invoked. This
155 /// allows the priority function to adjust the priority of node that have
156 /// already been emitted.
157 virtual void ScheduledNode(SUnit *Node) {}
Chris Lattner9df64752006-03-09 06:35:14 +0000158};
Chris Lattner9e95acc2006-03-09 06:37:29 +0000159}
Chris Lattnerfd22d422006-03-08 05:18:27 +0000160
161
Chris Lattnere50c0922006-03-05 22:45:01 +0000162
Chris Lattneraf5e26c2006-03-08 04:37:58 +0000163namespace {
Chris Lattner9e95acc2006-03-09 06:37:29 +0000164//===----------------------------------------------------------------------===//
165/// ScheduleDAGList - The actual list scheduler implementation. This supports
166/// both top-down and bottom-up scheduling.
167///
Evan Cheng31272342006-01-23 08:26:10 +0000168class ScheduleDAGList : public ScheduleDAG {
169private:
Evan Chengab495562006-01-25 09:14:32 +0000170 // SDNode to SUnit mapping (many to one).
171 std::map<SDNode*, SUnit*> SUnitMap;
Evan Cheng9add8802006-05-04 19:16:39 +0000172
Chris Lattner00b52ea2006-03-05 23:59:20 +0000173 // The schedule. Null SUnit*'s represent noop instructions.
Evan Chengab495562006-01-25 09:14:32 +0000174 std::vector<SUnit*> Sequence;
Chris Lattner42e20262006-03-08 04:54:34 +0000175
176 // The scheduling units.
177 std::vector<SUnit> SUnits;
Evan Cheng31272342006-01-23 08:26:10 +0000178
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000179 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
180 /// it is top-down.
181 bool isBottomUp;
182
Chris Lattner356183d2006-03-11 22:44:37 +0000183 /// AvailableQueue - The priority queue to use for the available SUnits.
184 ///
185 SchedulingPriorityQueue *AvailableQueue;
Chris Lattner9df64752006-03-09 06:35:14 +0000186
Chris Lattner572003c2006-03-12 00:38:57 +0000187 /// PendingQueue - This contains all of the instructions whose operands have
188 /// been issued, but their results are not ready yet (due to the latency of
189 /// the operation). Once the operands becomes available, the instruction is
190 /// added to the AvailableQueue. This keeps track of each SUnit and the
191 /// number of cycles left to execute before the operation is available.
192 std::vector<std::pair<unsigned, SUnit*> > PendingQueue;
Evan Cheng9add8802006-05-04 19:16:39 +0000193
Chris Lattnere50c0922006-03-05 22:45:01 +0000194 /// HazardRec - The hazard recognizer to use.
Chris Lattner543832d2006-03-08 04:25:59 +0000195 HazardRecognizer *HazardRec;
Evan Cheng9add8802006-05-04 19:16:39 +0000196
197 /// OpenNodes - Nodes with open live ranges, i.e. predecessors or successors
198 /// of scheduled nodes which are not themselves scheduled.
199 std::map<const TargetRegisterClass*, std::set<SUnit*> > OpenNodes;
200
Evan Cheng7d693892006-05-09 07:13:34 +0000201 /// RegPressureLimits - Keep track of upper limit of register pressure for
202 /// each register class that allows the scheduler to go into vertical mode.
Evan Cheng9add8802006-05-04 19:16:39 +0000203 std::map<const TargetRegisterClass*, unsigned> RegPressureLimits;
204
Evan Cheng31272342006-01-23 08:26:10 +0000205public:
206 ScheduleDAGList(SelectionDAG &dag, MachineBasicBlock *bb,
Chris Lattnere50c0922006-03-05 22:45:01 +0000207 const TargetMachine &tm, bool isbottomup,
Chris Lattner356183d2006-03-11 22:44:37 +0000208 SchedulingPriorityQueue *availqueue,
Chris Lattner543832d2006-03-08 04:25:59 +0000209 HazardRecognizer *HR)
Chris Lattner063086b2006-03-11 22:34:41 +0000210 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
Chris Lattner356183d2006-03-11 22:44:37 +0000211 AvailableQueue(availqueue), HazardRec(HR) {
Chris Lattnere50c0922006-03-05 22:45:01 +0000212 }
Evan Chengab495562006-01-25 09:14:32 +0000213
214 ~ScheduleDAGList() {
Chris Lattner543832d2006-03-08 04:25:59 +0000215 delete HazardRec;
Chris Lattner356183d2006-03-11 22:44:37 +0000216 delete AvailableQueue;
Evan Chengab495562006-01-25 09:14:32 +0000217 }
Evan Cheng31272342006-01-23 08:26:10 +0000218
219 void Schedule();
Evan Cheng31272342006-01-23 08:26:10 +0000220
Chris Lattnerd4130372006-03-09 07:15:18 +0000221 void dumpSchedule() const;
Evan Chengab495562006-01-25 09:14:32 +0000222
223private:
Evan Chengc4c339c2006-01-26 00:30:29 +0000224 SUnit *NewSUnit(SDNode *N);
Chris Lattner063086b2006-03-11 22:34:41 +0000225 void ReleasePred(SUnit *PredSU, bool isChain, unsigned CurCycle);
Chris Lattner572003c2006-03-12 00:38:57 +0000226 void ReleaseSucc(SUnit *SuccSU, bool isChain);
Evan Cheng9add8802006-05-04 19:16:39 +0000227 void ScheduleNodeBottomUp(SUnit *SU, unsigned& CurCycle, bool Veritical=true);
228 void ScheduleVertically(SUnit *SU, unsigned& CurCycle);
Chris Lattner063086b2006-03-11 22:34:41 +0000229 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
Chris Lattner399bee22006-03-09 06:48:37 +0000230 void ListScheduleTopDown();
231 void ListScheduleBottomUp();
Evan Chengab495562006-01-25 09:14:32 +0000232 void BuildSchedUnits();
233 void EmitSchedule();
234};
Chris Lattneraf5e26c2006-03-08 04:37:58 +0000235} // end anonymous namespace
Evan Chengab495562006-01-25 09:14:32 +0000236
Chris Lattner47639db2006-03-06 00:22:00 +0000237HazardRecognizer::~HazardRecognizer() {}
238
Evan Chengc4c339c2006-01-26 00:30:29 +0000239
240/// NewSUnit - Creates a new SUnit and return a ptr to it.
241SUnit *ScheduleDAGList::NewSUnit(SDNode *N) {
Chris Lattnerfd22d422006-03-08 05:18:27 +0000242 SUnits.push_back(SUnit(N, SUnits.size()));
Chris Lattner42e20262006-03-08 04:54:34 +0000243 return &SUnits.back();
Evan Chengc4c339c2006-01-26 00:30:29 +0000244}
245
Chris Lattner9995a0c2006-03-11 22:28:35 +0000246/// BuildSchedUnits - Build SUnits from the selection dag that we are input.
247/// This SUnit graph is similar to the SelectionDAG, but represents flagged
248/// together nodes with a single SUnit.
249void ScheduleDAGList::BuildSchedUnits() {
250 // Reserve entries in the vector for each of the SUnits we are creating. This
251 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
252 // invalidated.
253 SUnits.reserve(std::distance(DAG.allnodes_begin(), DAG.allnodes_end()));
254
255 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
256
257 for (SelectionDAG::allnodes_iterator NI = DAG.allnodes_begin(),
258 E = DAG.allnodes_end(); NI != E; ++NI) {
259 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
260 continue;
261
262 // If this node has already been processed, stop now.
263 if (SUnitMap[NI]) continue;
264
265 SUnit *NodeSUnit = NewSUnit(NI);
266
267 // See if anything is flagged to this node, if so, add them to flagged
268 // nodes. Nodes can have at most one flag input and one flag output. Flags
269 // are required the be the last operand and result of a node.
270
271 // Scan up, adding flagged preds to FlaggedNodes.
272 SDNode *N = NI;
273 while (N->getNumOperands() &&
274 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
275 N = N->getOperand(N->getNumOperands()-1).Val;
276 NodeSUnit->FlaggedNodes.push_back(N);
277 SUnitMap[N] = NodeSUnit;
278 }
279
280 // Scan down, adding this node and any flagged succs to FlaggedNodes if they
281 // have a user of the flag operand.
282 N = NI;
283 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
284 SDOperand FlagVal(N, N->getNumValues()-1);
285
286 // There are either zero or one users of the Flag result.
287 bool HasFlagUse = false;
288 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
289 UI != E; ++UI)
290 if (FlagVal.isOperand(*UI)) {
291 HasFlagUse = true;
292 NodeSUnit->FlaggedNodes.push_back(N);
293 SUnitMap[N] = NodeSUnit;
294 N = *UI;
295 break;
296 }
297 if (!HasFlagUse) break;
298 }
299
300 // Now all flagged nodes are in FlaggedNodes and N is the bottom-most node.
301 // Update the SUnit
302 NodeSUnit->Node = N;
303 SUnitMap[N] = NodeSUnit;
304
305 // Compute the latency for the node. We use the sum of the latencies for
306 // all nodes flagged together into this SUnit.
307 if (InstrItins.isEmpty()) {
308 // No latency information.
309 NodeSUnit->Latency = 1;
310 } else {
311 NodeSUnit->Latency = 0;
312 if (N->isTargetOpcode()) {
313 unsigned SchedClass = TII->getSchedClass(N->getTargetOpcode());
314 InstrStage *S = InstrItins.begin(SchedClass);
315 InstrStage *E = InstrItins.end(SchedClass);
316 for (; S != E; ++S)
317 NodeSUnit->Latency += S->Cycles;
318 }
319 for (unsigned i = 0, e = NodeSUnit->FlaggedNodes.size(); i != e; ++i) {
320 SDNode *FNode = NodeSUnit->FlaggedNodes[i];
321 if (FNode->isTargetOpcode()) {
322 unsigned SchedClass = TII->getSchedClass(FNode->getTargetOpcode());
323 InstrStage *S = InstrItins.begin(SchedClass);
324 InstrStage *E = InstrItins.end(SchedClass);
325 for (; S != E; ++S)
326 NodeSUnit->Latency += S->Cycles;
327 }
328 }
329 }
330 }
331
332 // Pass 2: add the preds, succs, etc.
333 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
334 SUnit *SU = &SUnits[su];
335 SDNode *MainNode = SU->Node;
336
Evan Cheng24e79542006-05-01 09:14:40 +0000337 if (MainNode->isTargetOpcode()) {
338 unsigned Opc = MainNode->getTargetOpcode();
Evan Chengffef8b92006-05-03 02:10:45 +0000339 if (TII->isTwoAddrInstr(Opc)) {
Evan Cheng24e79542006-05-01 09:14:40 +0000340 SU->isTwoAddress = true;
Evan Chengffef8b92006-05-03 02:10:45 +0000341 SDNode *OpN = MainNode->getOperand(0).Val;
342 SUnit *OpSU = SUnitMap[OpN];
343 if (OpSU)
344 OpSU->isDefNUseOperand = true;
345 }
Evan Cheng24e79542006-05-01 09:14:40 +0000346 }
Chris Lattner9995a0c2006-03-11 22:28:35 +0000347
348 // Find all predecessors and successors of the group.
349 // Temporarily add N to make code simpler.
350 SU->FlaggedNodes.push_back(MainNode);
351
352 for (unsigned n = 0, e = SU->FlaggedNodes.size(); n != e; ++n) {
353 SDNode *N = SU->FlaggedNodes[n];
354
355 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
356 SDNode *OpN = N->getOperand(i).Val;
357 if (isPassiveNode(OpN)) continue; // Not scheduled.
358 SUnit *OpSU = SUnitMap[OpN];
359 assert(OpSU && "Node has no SUnit!");
360 if (OpSU == SU) continue; // In the same group.
Evan Chengffef8b92006-05-03 02:10:45 +0000361
Chris Lattner9995a0c2006-03-11 22:28:35 +0000362 MVT::ValueType OpVT = N->getOperand(i).getValueType();
363 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
364 bool isChain = OpVT == MVT::Other;
365
366 if (SU->Preds.insert(std::make_pair(OpSU, isChain)).second) {
367 if (!isChain) {
368 SU->NumPredsLeft++;
369 } else {
370 SU->NumChainPredsLeft++;
371 }
372 }
373 if (OpSU->Succs.insert(std::make_pair(SU, isChain)).second) {
374 if (!isChain) {
375 OpSU->NumSuccsLeft++;
376 } else {
377 OpSU->NumChainSuccsLeft++;
378 }
379 }
380 }
381 }
382
383 // Remove MainNode from FlaggedNodes again.
384 SU->FlaggedNodes.pop_back();
385 }
Chris Lattnera767dbf2006-03-12 09:01:41 +0000386
Chris Lattner9995a0c2006-03-11 22:28:35 +0000387 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
388 SUnits[su].dumpAll(&DAG));
Evan Cheng24e79542006-05-01 09:14:40 +0000389 return;
Chris Lattner9995a0c2006-03-11 22:28:35 +0000390}
391
392/// EmitSchedule - Emit the machine code in scheduled order.
393void ScheduleDAGList::EmitSchedule() {
394 std::map<SDNode*, unsigned> VRBaseMap;
395 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
396 if (SUnit *SU = Sequence[i]) {
397 for (unsigned j = 0, ee = SU->FlaggedNodes.size(); j != ee; j++)
398 EmitNode(SU->FlaggedNodes[j], VRBaseMap);
399 EmitNode(SU->Node, VRBaseMap);
400 } else {
401 // Null SUnit* is a noop.
402 EmitNoop();
403 }
404 }
405}
406
407/// dump - dump the schedule.
408void ScheduleDAGList::dumpSchedule() const {
409 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
410 if (SUnit *SU = Sequence[i])
411 SU->dump(&DAG);
412 else
413 std::cerr << "**** NOOP ****\n";
414 }
415}
416
417/// Schedule - Schedule the DAG using list scheduling.
Chris Lattner9995a0c2006-03-11 22:28:35 +0000418void ScheduleDAGList::Schedule() {
419 DEBUG(std::cerr << "********** List Scheduling **********\n");
420
421 // Build scheduling units.
422 BuildSchedUnits();
Evan Cheng7d693892006-05-09 07:13:34 +0000423
Chris Lattner356183d2006-03-11 22:44:37 +0000424 AvailableQueue->initNodes(SUnits);
Chris Lattner9995a0c2006-03-11 22:28:35 +0000425
426 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
427 if (isBottomUp)
428 ListScheduleBottomUp();
429 else
430 ListScheduleTopDown();
431
Chris Lattner356183d2006-03-11 22:44:37 +0000432 AvailableQueue->releaseState();
Chris Lattner9995a0c2006-03-11 22:28:35 +0000433
434 DEBUG(std::cerr << "*** Final schedule ***\n");
435 DEBUG(dumpSchedule());
436 DEBUG(std::cerr << "\n");
437
438 // Emit in scheduled order
439 EmitSchedule();
440}
441
442//===----------------------------------------------------------------------===//
443// Bottom-Up Scheduling
444//===----------------------------------------------------------------------===//
445
Evan Cheng9add8802006-05-04 19:16:39 +0000446static const TargetRegisterClass *getRegClass(SUnit *SU,
447 const TargetInstrInfo *TII,
448 const MRegisterInfo *MRI,
449 SSARegMap *RegMap) {
450 if (SU->Node->isTargetOpcode()) {
451 unsigned Opc = SU->Node->getTargetOpcode();
452 const TargetInstrDescriptor &II = TII->get(Opc);
453 return II.OpInfo->RegClass;
454 } else {
455 assert(SU->Node->getOpcode() == ISD::CopyFromReg);
456 unsigned SrcReg = cast<RegisterSDNode>(SU->Node->getOperand(1))->getReg();
457 if (MRegisterInfo::isVirtualRegister(SrcReg))
458 return RegMap->getRegClass(SrcReg);
459 else {
460 for (MRegisterInfo::regclass_iterator I = MRI->regclass_begin(),
461 E = MRI->regclass_end(); I != E; ++I)
462 if ((*I)->hasType(SU->Node->getValueType(0)) &&
463 (*I)->contains(SrcReg))
464 return *I;
465 assert(false && "Couldn't find register class for reg copy!");
466 }
467 return NULL;
468 }
469}
470
471static unsigned getNumResults(SUnit *SU) {
472 unsigned NumResults = 0;
473 for (unsigned i = 0, e = SU->Node->getNumValues(); i != e; ++i) {
474 MVT::ValueType VT = SU->Node->getValueType(i);
475 if (VT != MVT::Other && VT != MVT::Flag)
476 NumResults++;
477 }
478 return NumResults;
479}
480
Evan Chengc4c339c2006-01-26 00:30:29 +0000481/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
482/// the Available queue is the count reaches zero. Also update its cycle bound.
Chris Lattner063086b2006-03-11 22:34:41 +0000483void ScheduleDAGList::ReleasePred(SUnit *PredSU, bool isChain,
Chris Lattner356183d2006-03-11 22:44:37 +0000484 unsigned CurCycle) {
Evan Cheng4e3904f2006-03-02 21:38:29 +0000485 // FIXME: the distance between two nodes is not always == the predecessor's
486 // latency. For example, the reader can very well read the register written
487 // by the predecessor later than the issue cycle. It also depends on the
488 // interrupt model (drain vs. freeze).
Chris Lattner356183d2006-03-11 22:44:37 +0000489 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
Evan Cheng4e3904f2006-03-02 21:38:29 +0000490
Evan Chengc5c06582006-03-06 06:08:54 +0000491 if (!isChain)
Evan Cheng4e3904f2006-03-02 21:38:29 +0000492 PredSU->NumSuccsLeft--;
Evan Chengc5c06582006-03-06 06:08:54 +0000493 else
Evan Cheng4e3904f2006-03-02 21:38:29 +0000494 PredSU->NumChainSuccsLeft--;
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000495
Evan Chengab495562006-01-25 09:14:32 +0000496#ifndef NDEBUG
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000497 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
Evan Chengab495562006-01-25 09:14:32 +0000498 std::cerr << "*** List scheduling failed! ***\n";
499 PredSU->dump(&DAG);
500 std::cerr << " has been released too many times!\n";
501 assert(0);
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000502 }
Evan Chengab495562006-01-25 09:14:32 +0000503#endif
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000504
505 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
506 // EntryToken has to go last! Special case it here.
Chris Lattner349e9dd2006-03-10 05:51:05 +0000507 if (PredSU->Node->getOpcode() != ISD::EntryToken) {
508 PredSU->isAvailable = true;
Chris Lattner356183d2006-03-11 22:44:37 +0000509 AvailableQueue->push(PredSU);
Chris Lattner349e9dd2006-03-10 05:51:05 +0000510 }
Evan Chengab495562006-01-25 09:14:32 +0000511 }
Evan Cheng9add8802006-05-04 19:16:39 +0000512
513 if (getNumResults(PredSU) > 0) {
514 const TargetRegisterClass *RegClass = getRegClass(PredSU, TII, MRI, RegMap);
515 OpenNodes[RegClass].insert(PredSU);
516 }
Evan Chengab495562006-01-25 09:14:32 +0000517}
Evan Cheng9add8802006-05-04 19:16:39 +0000518
519/// SharesOperandWithTwoAddr - Check if there is a unscheduled two-address node
520/// with which SU shares an operand. If so, returns the node.
521static SUnit *SharesOperandWithTwoAddr(SUnit *SU) {
522 assert(!SU->isTwoAddress && "Node cannot be two-address op");
523 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
524 E = SU->Preds.end(); I != E; ++I) {
525 if (I->second) continue;
526 SUnit *PredSU = I->first;
527 for (std::set<std::pair<SUnit*, bool> >::iterator II =
528 PredSU->Succs.begin(), EE = PredSU->Succs.end(); II != EE; ++II) {
529 if (II->second) continue;
530 SUnit *SSU = II->first;
531 if (SSU->isTwoAddress && !SSU->isScheduled) {
532 return SSU;
533 }
534 }
535 }
536 return NULL;
537}
538
539static bool isFloater(const SUnit *SU) {
540 unsigned Opc = SU->Node->getOpcode();
541 return (Opc != ISD::CopyFromReg && SU->NumPredsLeft == 0);
542}
543
544static bool isSimpleFloaterUse(const SUnit *SU) {
545 unsigned NumOps = 0;
Jeff Cohen78a7f0e2006-05-05 01:47:05 +0000546 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Preds.begin(),
Evan Cheng9add8802006-05-04 19:16:39 +0000547 E = SU->Preds.end(); I != E; ++I) {
548 if (I->second) continue;
549 if (++NumOps > 1)
550 return false;
551 if (!isFloater(I->first))
552 return false;
553 }
554 return true;
555}
556
557/// ScheduleVertically - Schedule vertically. That is, follow up the D&U chain
558/// (of two-address code) and schedule floaters aggressively.
559void ScheduleDAGList::ScheduleVertically(SUnit *SU, unsigned& CurCycle) {
560 // Try scheduling Def&Use operand if register pressure is low.
561 const TargetRegisterClass *RegClass = getRegClass(SU, TII, MRI, RegMap);
562 unsigned Pressure = OpenNodes[RegClass].size();
563 unsigned Limit = RegPressureLimits[RegClass];
564
565 // See if we can schedule any predecessor that takes no registers.
566 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
567 E = SU->Preds.end(); I != E; ++I) {
568 if (I->second) continue;
569
570 SUnit *PredSU = I->first;
571 if (!PredSU->isAvailable || PredSU->isScheduled)
572 continue;
573
574 if (isFloater(PredSU)) {
575 DEBUG(std::cerr<<"*** Scheduling floater\n");
576 AvailableQueue->RemoveFromPriorityQueue(PredSU);
577 ScheduleNodeBottomUp(PredSU, CurCycle, false);
578 }
579 }
580
581 SUnit *DUSU = NULL;
582 if (SU->isTwoAddress && Pressure < Limit) {
583 DUSU = SUnitMap[SU->Node->getOperand(0).Val];
584 if (!DUSU->isAvailable || DUSU->isScheduled)
585 DUSU = NULL;
586 else if (!DUSU->isTwoAddress) {
587 SUnit *SSU = SharesOperandWithTwoAddr(DUSU);
588 if (SSU && SSU->isAvailable) {
589 AvailableQueue->RemoveFromPriorityQueue(SSU);
590 ScheduleNodeBottomUp(SSU, CurCycle, false);
591 Pressure = OpenNodes[RegClass].size();
592 if (Pressure >= Limit)
593 DUSU = NULL;
594 }
595 }
596 }
597
598 if (DUSU) {
599 DEBUG(std::cerr<<"*** Low register pressure: scheduling D&U operand\n");
600 AvailableQueue->RemoveFromPriorityQueue(DUSU);
601 ScheduleNodeBottomUp(DUSU, CurCycle, false);
602 Pressure = OpenNodes[RegClass].size();
603 ScheduleVertically(DUSU, CurCycle);
604 }
605}
606
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000607/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
608/// count of its predecessors. If a predecessor pending count is zero, add it to
609/// the Available queue.
Evan Cheng9add8802006-05-04 19:16:39 +0000610void ScheduleDAGList::ScheduleNodeBottomUp(SUnit *SU, unsigned& CurCycle,
611 bool Vertical) {
Chris Lattner572003c2006-03-12 00:38:57 +0000612 DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
Chris Lattnerd4130372006-03-09 07:15:18 +0000613 DEBUG(SU->dump(&DAG));
Chris Lattner356183d2006-03-11 22:44:37 +0000614 SU->Cycle = CurCycle;
Evan Cheng5e9a6952006-03-03 06:23:43 +0000615
Evan Chengffef8b92006-05-03 02:10:45 +0000616 AvailableQueue->ScheduledNode(SU);
Evan Chengab495562006-01-25 09:14:32 +0000617 Sequence.push_back(SU);
Evan Chengab495562006-01-25 09:14:32 +0000618
619 // Bottom up: release predecessors
Chris Lattner578d8fc2006-03-11 22:24:20 +0000620 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
Evan Cheng9add8802006-05-04 19:16:39 +0000621 E = SU->Preds.end(); I != E; ++I)
Chris Lattner356183d2006-03-11 22:44:37 +0000622 ReleasePred(I->first, I->second, CurCycle);
Evan Cheng9add8802006-05-04 19:16:39 +0000623 SU->isScheduled = true;
624 CurCycle++;
625
626 if (getNumResults(SU) != 0) {
627 const TargetRegisterClass *RegClass = getRegClass(SU, TII, MRI, RegMap);
628 OpenNodes[RegClass].erase(SU);
629
630 if (SchedVertically && Vertical)
631 ScheduleVertically(SU, CurCycle);
Evan Cheng4e3904f2006-03-02 21:38:29 +0000632 }
Evan Chengab495562006-01-25 09:14:32 +0000633}
634
635/// isReady - True if node's lower cycle bound is less or equal to the current
636/// scheduling cycle. Always true if all nodes have uniform latency 1.
Evan Cheng9add8802006-05-04 19:16:39 +0000637static inline bool isReady(SUnit *SU, unsigned CurCycle) {
638 return SU->CycleBound <= CurCycle;
Evan Chengab495562006-01-25 09:14:32 +0000639}
640
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000641/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
642/// schedulers.
Chris Lattner399bee22006-03-09 06:48:37 +0000643void ScheduleDAGList::ListScheduleBottomUp() {
Evan Cheng9add8802006-05-04 19:16:39 +0000644 // Determine rough register pressure limit.
645 for (MRegisterInfo::regclass_iterator RCI = MRI->regclass_begin(),
646 E = MRI->regclass_end(); RCI != E; ++RCI) {
647 const TargetRegisterClass *RC = *RCI;
648 unsigned Limit = RC->getNumRegs();
649 Limit = (Limit > 2) ? Limit - 2 : 0;
650 std::map<const TargetRegisterClass*, unsigned>::iterator RPI =
651 RegPressureLimits.find(RC);
652 if (RPI == RegPressureLimits.end())
653 RegPressureLimits[RC] = Limit;
654 else {
655 unsigned &OldLimit = RegPressureLimits[RC];
656 if (Limit < OldLimit)
657 OldLimit = Limit;
658 }
659 }
660
661 unsigned CurCycle = 0;
Chris Lattner7a36d972006-03-05 20:21:55 +0000662 // Add root to Available queue.
Chris Lattner356183d2006-03-11 22:44:37 +0000663 AvailableQueue->push(SUnitMap[DAG.getRoot().Val]);
Evan Chengab495562006-01-25 09:14:32 +0000664
665 // While Available queue is not empty, grab the node with the highest
666 // priority. If it is not ready put it back. Schedule the node.
667 std::vector<SUnit*> NotReady;
Evan Cheng9add8802006-05-04 19:16:39 +0000668 SUnit *CurNode = NULL;
Chris Lattner356183d2006-03-11 22:44:37 +0000669 while (!AvailableQueue->empty()) {
Evan Cheng9add8802006-05-04 19:16:39 +0000670 SUnit *CurNode = AvailableQueue->pop();
671 while (!isReady(CurNode, CurCycle)) {
672 NotReady.push_back(CurNode);
673 CurNode = AvailableQueue->pop();
Evan Chengab495562006-01-25 09:14:32 +0000674 }
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000675
676 // Add the nodes that aren't ready back onto the available list.
Chris Lattner356183d2006-03-11 22:44:37 +0000677 AvailableQueue->push_all(NotReady);
Chris Lattner25e25562006-03-10 04:32:49 +0000678 NotReady.clear();
Evan Chengab495562006-01-25 09:14:32 +0000679
Evan Cheng9add8802006-05-04 19:16:39 +0000680 ScheduleNodeBottomUp(CurNode, CurCycle);
Evan Chengab495562006-01-25 09:14:32 +0000681 }
682
683 // Add entry node last
684 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
685 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
Evan Chengab495562006-01-25 09:14:32 +0000686 Sequence.push_back(Entry);
687 }
688
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000689 // Reverse the order if it is bottom up.
690 std::reverse(Sequence.begin(), Sequence.end());
691
692
Evan Chengab495562006-01-25 09:14:32 +0000693#ifndef NDEBUG
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000694 // Verify that all SUnits were scheduled.
Evan Chengc4c339c2006-01-26 00:30:29 +0000695 bool AnyNotSched = false;
Chris Lattner42e20262006-03-08 04:54:34 +0000696 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
697 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
Evan Chengc4c339c2006-01-26 00:30:29 +0000698 if (!AnyNotSched)
699 std::cerr << "*** List scheduling failed! ***\n";
Chris Lattner42e20262006-03-08 04:54:34 +0000700 SUnits[i].dump(&DAG);
Evan Chengc4c339c2006-01-26 00:30:29 +0000701 std::cerr << "has not been scheduled!\n";
702 AnyNotSched = true;
Evan Chengab495562006-01-25 09:14:32 +0000703 }
Evan Chengab495562006-01-25 09:14:32 +0000704 }
Evan Chengc4c339c2006-01-26 00:30:29 +0000705 assert(!AnyNotSched);
Reid Spencer5edde662006-01-25 21:49:13 +0000706#endif
Evan Chengab495562006-01-25 09:14:32 +0000707}
708
Chris Lattner9995a0c2006-03-11 22:28:35 +0000709//===----------------------------------------------------------------------===//
710// Top-Down Scheduling
711//===----------------------------------------------------------------------===//
712
713/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
Chris Lattner572003c2006-03-12 00:38:57 +0000714/// the PendingQueue if the count reaches zero.
715void ScheduleDAGList::ReleaseSucc(SUnit *SuccSU, bool isChain) {
Chris Lattner9995a0c2006-03-11 22:28:35 +0000716 if (!isChain)
717 SuccSU->NumPredsLeft--;
718 else
719 SuccSU->NumChainPredsLeft--;
720
Chris Lattner572003c2006-03-12 00:38:57 +0000721 assert(SuccSU->NumPredsLeft >= 0 && SuccSU->NumChainPredsLeft >= 0 &&
722 "List scheduling internal error");
Chris Lattner9995a0c2006-03-11 22:28:35 +0000723
724 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
Chris Lattner572003c2006-03-12 00:38:57 +0000725 // Compute how many cycles it will be before this actually becomes
726 // available. This is the max of the start time of all predecessors plus
727 // their latencies.
728 unsigned AvailableCycle = 0;
729 for (std::set<std::pair<SUnit*, bool> >::iterator I = SuccSU->Preds.begin(),
730 E = SuccSU->Preds.end(); I != E; ++I) {
Chris Lattnera767dbf2006-03-12 09:01:41 +0000731 // If this is a token edge, we don't need to wait for the latency of the
732 // preceeding instruction (e.g. a long-latency load) unless there is also
733 // some other data dependence.
Chris Lattner86a9b602006-03-12 03:52:09 +0000734 unsigned PredDoneCycle = I->first->Cycle;
735 if (!I->second)
736 PredDoneCycle += I->first->Latency;
Chris Lattnera767dbf2006-03-12 09:01:41 +0000737 else if (I->first->Latency)
738 PredDoneCycle += 1;
Chris Lattner86a9b602006-03-12 03:52:09 +0000739
740 AvailableCycle = std::max(AvailableCycle, PredDoneCycle);
Chris Lattner572003c2006-03-12 00:38:57 +0000741 }
742
743 PendingQueue.push_back(std::make_pair(AvailableCycle, SuccSU));
Chris Lattner9995a0c2006-03-11 22:28:35 +0000744 }
745}
746
747/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
748/// count of its successors. If a successor pending count is zero, add it to
749/// the Available queue.
Chris Lattner356183d2006-03-11 22:44:37 +0000750void ScheduleDAGList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
Chris Lattner572003c2006-03-12 00:38:57 +0000751 DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
Chris Lattner9995a0c2006-03-11 22:28:35 +0000752 DEBUG(SU->dump(&DAG));
753
754 Sequence.push_back(SU);
Chris Lattner356183d2006-03-11 22:44:37 +0000755 SU->Cycle = CurCycle;
Chris Lattner9995a0c2006-03-11 22:28:35 +0000756
757 // Bottom up: release successors.
758 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Succs.begin(),
Chris Lattner356183d2006-03-11 22:44:37 +0000759 E = SU->Succs.end(); I != E; ++I)
Chris Lattner572003c2006-03-12 00:38:57 +0000760 ReleaseSucc(I->first, I->second);
Chris Lattner9995a0c2006-03-11 22:28:35 +0000761}
762
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000763/// ListScheduleTopDown - The main loop of list scheduling for top-down
764/// schedulers.
Chris Lattner399bee22006-03-09 06:48:37 +0000765void ScheduleDAGList::ListScheduleTopDown() {
Chris Lattner572003c2006-03-12 00:38:57 +0000766 unsigned CurCycle = 0;
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000767 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
Chris Lattner572003c2006-03-12 00:38:57 +0000768
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000769 // All leaves to Available queue.
Chris Lattner42e20262006-03-08 04:54:34 +0000770 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000771 // It is available if it has no predecessors.
Chris Lattner572003c2006-03-12 00:38:57 +0000772 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
Chris Lattner356183d2006-03-11 22:44:37 +0000773 AvailableQueue->push(&SUnits[i]);
Chris Lattner572003c2006-03-12 00:38:57 +0000774 SUnits[i].isAvailable = SUnits[i].isPending = true;
775 }
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000776 }
777
Chris Lattner572003c2006-03-12 00:38:57 +0000778 // Emit the entry node first.
779 ScheduleNodeTopDown(Entry, CurCycle);
780 HazardRec->EmitInstruction(Entry->Node);
781
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000782 // While Available queue is not empty, grab the node with the highest
783 // priority. If it is not ready put it back. Schedule the node.
784 std::vector<SUnit*> NotReady;
Chris Lattner572003c2006-03-12 00:38:57 +0000785 while (!AvailableQueue->empty() || !PendingQueue.empty()) {
786 // Check to see if any of the pending instructions are ready to issue. If
787 // so, add them to the available queue.
Chris Lattnera767dbf2006-03-12 09:01:41 +0000788 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
Chris Lattner572003c2006-03-12 00:38:57 +0000789 if (PendingQueue[i].first == CurCycle) {
790 AvailableQueue->push(PendingQueue[i].second);
791 PendingQueue[i].second->isAvailable = true;
792 PendingQueue[i] = PendingQueue.back();
793 PendingQueue.pop_back();
794 --i; --e;
795 } else {
796 assert(PendingQueue[i].first > CurCycle && "Negative latency?");
797 }
Chris Lattnera767dbf2006-03-12 09:01:41 +0000798 }
Chris Lattner572003c2006-03-12 00:38:57 +0000799
Chris Lattnera767dbf2006-03-12 09:01:41 +0000800 // If there are no instructions available, don't try to issue anything, and
801 // don't advance the hazard recognizer.
802 if (AvailableQueue->empty()) {
803 ++CurCycle;
804 continue;
805 }
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000806
Chris Lattnera767dbf2006-03-12 09:01:41 +0000807 SUnit *FoundSUnit = 0;
808 SDNode *FoundNode = 0;
809
Chris Lattnere50c0922006-03-05 22:45:01 +0000810 bool HasNoopHazards = false;
Chris Lattner572003c2006-03-12 00:38:57 +0000811 while (!AvailableQueue->empty()) {
Chris Lattnera767dbf2006-03-12 09:01:41 +0000812 SUnit *CurSUnit = AvailableQueue->pop();
Chris Lattner0c801bd2006-03-07 05:40:43 +0000813
814 // Get the node represented by this SUnit.
Chris Lattnera767dbf2006-03-12 09:01:41 +0000815 FoundNode = CurSUnit->Node;
816
Chris Lattner0c801bd2006-03-07 05:40:43 +0000817 // If this is a pseudo op, like copyfromreg, look to see if there is a
818 // real target node flagged to it. If so, use the target node.
Chris Lattnera767dbf2006-03-12 09:01:41 +0000819 for (unsigned i = 0, e = CurSUnit->FlaggedNodes.size();
820 FoundNode->getOpcode() < ISD::BUILTIN_OP_END && i != e; ++i)
821 FoundNode = CurSUnit->FlaggedNodes[i];
Chris Lattner0c801bd2006-03-07 05:40:43 +0000822
Chris Lattnera767dbf2006-03-12 09:01:41 +0000823 HazardRecognizer::HazardType HT = HazardRec->getHazardType(FoundNode);
Chris Lattnere50c0922006-03-05 22:45:01 +0000824 if (HT == HazardRecognizer::NoHazard) {
Chris Lattnera767dbf2006-03-12 09:01:41 +0000825 FoundSUnit = CurSUnit;
Chris Lattnere50c0922006-03-05 22:45:01 +0000826 break;
827 }
828
829 // Remember if this is a noop hazard.
830 HasNoopHazards |= HT == HazardRecognizer::NoopHazard;
831
Chris Lattnera767dbf2006-03-12 09:01:41 +0000832 NotReady.push_back(CurSUnit);
Chris Lattner572003c2006-03-12 00:38:57 +0000833 }
Chris Lattnere50c0922006-03-05 22:45:01 +0000834
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000835 // Add the nodes that aren't ready back onto the available list.
Chris Lattnera767dbf2006-03-12 09:01:41 +0000836 if (!NotReady.empty()) {
837 AvailableQueue->push_all(NotReady);
838 NotReady.clear();
839 }
Chris Lattnere50c0922006-03-05 22:45:01 +0000840
841 // If we found a node to schedule, do it now.
Chris Lattnera767dbf2006-03-12 09:01:41 +0000842 if (FoundSUnit) {
843 ScheduleNodeTopDown(FoundSUnit, CurCycle);
844 HazardRec->EmitInstruction(FoundNode);
845 FoundSUnit->isScheduled = true;
846 AvailableQueue->ScheduledNode(FoundSUnit);
Chris Lattner572003c2006-03-12 00:38:57 +0000847
848 // If this is a pseudo-op node, we don't want to increment the current
849 // cycle.
Chris Lattnera767dbf2006-03-12 09:01:41 +0000850 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
851 ++CurCycle;
Chris Lattnere50c0922006-03-05 22:45:01 +0000852 } else if (!HasNoopHazards) {
853 // Otherwise, we have a pipeline stall, but no other problem, just advance
854 // the current cycle and try again.
Chris Lattner0c801bd2006-03-07 05:40:43 +0000855 DEBUG(std::cerr << "*** Advancing cycle, no work to do\n");
Chris Lattner543832d2006-03-08 04:25:59 +0000856 HazardRec->AdvanceCycle();
Chris Lattnerfa5e1c92006-03-05 23:13:56 +0000857 ++NumStalls;
Chris Lattnera767dbf2006-03-12 09:01:41 +0000858 ++CurCycle;
Chris Lattnere50c0922006-03-05 22:45:01 +0000859 } else {
860 // Otherwise, we have no instructions to issue and we have instructions
861 // that will fault if we don't do this right. This is the case for
862 // processors without pipeline interlocks and other cases.
Chris Lattner0c801bd2006-03-07 05:40:43 +0000863 DEBUG(std::cerr << "*** Emitting noop\n");
Chris Lattner543832d2006-03-08 04:25:59 +0000864 HazardRec->EmitNoop();
Chris Lattner00b52ea2006-03-05 23:59:20 +0000865 Sequence.push_back(0); // NULL SUnit* -> noop
Chris Lattnerfa5e1c92006-03-05 23:13:56 +0000866 ++NumNoops;
Chris Lattnera767dbf2006-03-12 09:01:41 +0000867 ++CurCycle;
Chris Lattnere50c0922006-03-05 22:45:01 +0000868 }
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000869 }
870
871#ifndef NDEBUG
872 // Verify that all SUnits were scheduled.
873 bool AnyNotSched = false;
Chris Lattner42e20262006-03-08 04:54:34 +0000874 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
875 if (SUnits[i].NumPredsLeft != 0 || SUnits[i].NumChainPredsLeft != 0) {
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000876 if (!AnyNotSched)
877 std::cerr << "*** List scheduling failed! ***\n";
Chris Lattner42e20262006-03-08 04:54:34 +0000878 SUnits[i].dump(&DAG);
Chris Lattner98ecb8e2006-03-05 21:10:33 +0000879 std::cerr << "has not been scheduled!\n";
880 AnyNotSched = true;
881 }
882 }
883 assert(!AnyNotSched);
884#endif
885}
886
Chris Lattner9df64752006-03-09 06:35:14 +0000887//===----------------------------------------------------------------------===//
888// RegReductionPriorityQueue Implementation
889//===----------------------------------------------------------------------===//
890//
891// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
892// to reduce register pressure.
893//
894namespace {
Evan Cheng9665ba02006-05-10 06:16:44 +0000895 template<class SF>
Chris Lattner9df64752006-03-09 06:35:14 +0000896 class RegReductionPriorityQueue;
897
898 /// Sorting functions for the Available queue.
899 struct ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
Evan Cheng9665ba02006-05-10 06:16:44 +0000900 RegReductionPriorityQueue<ls_rr_sort> *SPQ;
901 ls_rr_sort(RegReductionPriorityQueue<ls_rr_sort> *spq) : SPQ(spq) {}
Chris Lattner9df64752006-03-09 06:35:14 +0000902 ls_rr_sort(const ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
903
904 bool operator()(const SUnit* left, const SUnit* right) const;
905 };
906} // end anonymous namespace
907
908namespace {
Evan Cheng9665ba02006-05-10 06:16:44 +0000909 template<class SF>
Chris Lattner9df64752006-03-09 06:35:14 +0000910 class RegReductionPriorityQueue : public SchedulingPriorityQueue {
911 // SUnits - The SUnits for the current graph.
912 const std::vector<SUnit> *SUnits;
913
914 // SethiUllmanNumbers - The SethiUllman number for each node.
Evan Chengffef8b92006-05-03 02:10:45 +0000915 std::vector<int> SethiUllmanNumbers;
Chris Lattner9df64752006-03-09 06:35:14 +0000916
Evan Cheng9665ba02006-05-10 06:16:44 +0000917 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
Chris Lattner9df64752006-03-09 06:35:14 +0000918 public:
Evan Chengffef8b92006-05-03 02:10:45 +0000919 RegReductionPriorityQueue() :
920 Queue(ls_rr_sort(this)) {}
Chris Lattner9df64752006-03-09 06:35:14 +0000921
922 void initNodes(const std::vector<SUnit> &sunits) {
923 SUnits = &sunits;
Evan Cheng7d693892006-05-09 07:13:34 +0000924 // Add pseudo dependency edges for two-address nodes.
925 if (SchedLowerDefNUse)
926 AddPseudoTwoAddrDeps();
Chris Lattner9df64752006-03-09 06:35:14 +0000927 // Calculate node priorities.
928 CalculatePriorities();
929 }
930 void releaseState() {
931 SUnits = 0;
932 SethiUllmanNumbers.clear();
933 }
934
Evan Chengffef8b92006-05-03 02:10:45 +0000935 int getSethiUllmanNumber(unsigned NodeNum) const {
Chris Lattner9df64752006-03-09 06:35:14 +0000936 assert(NodeNum < SethiUllmanNumbers.size());
937 return SethiUllmanNumbers[NodeNum];
938 }
939
940 bool empty() const { return Queue.empty(); }
941
942 void push(SUnit *U) {
943 Queue.push(U);
944 }
Chris Lattner25e25562006-03-10 04:32:49 +0000945 void push_all(const std::vector<SUnit *> &Nodes) {
946 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
947 Queue.push(Nodes[i]);
948 }
949
Chris Lattner9df64752006-03-09 06:35:14 +0000950 SUnit *pop() {
951 SUnit *V = Queue.top();
952 Queue.pop();
953 return V;
954 }
Evan Chengffef8b92006-05-03 02:10:45 +0000955
Evan Cheng9add8802006-05-04 19:16:39 +0000956 /// RemoveFromPriorityQueue - This is a really inefficient way to remove a
957 /// node from a priority queue. We should roll our own heap to make this
958 /// better or something.
959 void RemoveFromPriorityQueue(SUnit *SU) {
960 std::vector<SUnit*> Temp;
961
962 assert(!Queue.empty() && "Not in queue!");
963 while (Queue.top() != SU) {
964 Temp.push_back(Queue.top());
965 Queue.pop();
966 assert(!Queue.empty() && "Not in queue!");
967 }
968
969 // Remove the node from the PQ.
970 Queue.pop();
971
972 // Add all the other nodes back.
973 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
974 Queue.push(Temp[i]);
975 }
976
Chris Lattner9df64752006-03-09 06:35:14 +0000977 private:
Evan Cheng7d693892006-05-09 07:13:34 +0000978 void AddPseudoTwoAddrDeps();
Chris Lattner9df64752006-03-09 06:35:14 +0000979 void CalculatePriorities();
Evan Chengffef8b92006-05-03 02:10:45 +0000980 int CalcNodePriority(const SUnit *SU);
Chris Lattner9df64752006-03-09 06:35:14 +0000981 };
982}
983
984bool ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
985 unsigned LeftNum = left->NodeNum;
986 unsigned RightNum = right->NodeNum;
Evan Chengffef8b92006-05-03 02:10:45 +0000987 bool LIsTarget = left->Node->isTargetOpcode();
988 bool RIsTarget = right->Node->isTargetOpcode();
989 int LPriority = SPQ->getSethiUllmanNumber(LeftNum);
990 int RPriority = SPQ->getSethiUllmanNumber(RightNum);
991 bool LIsFloater = LIsTarget && (LPriority == 1 || LPriority == 0);
992 bool RIsFloater = RIsTarget && (RPriority == 1 || RPriority == 0);
Evan Cheng9add8802006-05-04 19:16:39 +0000993 int LBonus = 0;
994 int RBonus = 0;
Evan Cheng24e79542006-05-01 09:14:40 +0000995
Evan Cheng9add8802006-05-04 19:16:39 +0000996 // Schedule floaters (e.g. load from some constant address) and those nodes
997 // with a single predecessor each first. They maintain / reduce register
998 // pressure.
999 if (LIsFloater)
1000 LBonus += 2;
1001 if (RIsFloater)
1002 RBonus += 2;
Evan Cheng24e79542006-05-01 09:14:40 +00001003
Evan Cheng7d693892006-05-09 07:13:34 +00001004 if (!SchedLowerDefNUse) {
1005 // Special tie breaker: if two nodes share a operand, the one that use it
1006 // as a def&use operand is preferred.
1007 if (LIsTarget && RIsTarget) {
1008 if (left->isTwoAddress && !right->isTwoAddress) {
1009 SDNode *DUNode = left->Node->getOperand(0).Val;
1010 if (DUNode->isOperand(right->Node))
1011 LBonus += 2;
1012 }
1013 if (!left->isTwoAddress && right->isTwoAddress) {
1014 SDNode *DUNode = right->Node->getOperand(0).Val;
1015 if (DUNode->isOperand(left->Node))
1016 RBonus += 2;
1017 }
Evan Chengffef8b92006-05-03 02:10:45 +00001018 }
1019 }
1020
Evan Cheng9add8802006-05-04 19:16:39 +00001021 if (LPriority+LBonus < RPriority+RBonus)
Chris Lattner9df64752006-03-09 06:35:14 +00001022 return true;
Evan Cheng9add8802006-05-04 19:16:39 +00001023 else if (LPriority+LBonus == RPriority+RBonus)
Evan Chengffef8b92006-05-03 02:10:45 +00001024 if (left->NumPredsLeft > right->NumPredsLeft)
Chris Lattner9df64752006-03-09 06:35:14 +00001025 return true;
Evan Cheng9add8802006-05-04 19:16:39 +00001026 else if (left->NumPredsLeft+LBonus == right->NumPredsLeft+RBonus)
Chris Lattner9df64752006-03-09 06:35:14 +00001027 if (left->CycleBound > right->CycleBound)
1028 return true;
Chris Lattner9df64752006-03-09 06:35:14 +00001029 return false;
1030}
1031
Evan Cheng7d693892006-05-09 07:13:34 +00001032static inline bool isCopyFromLiveIn(const SUnit *SU) {
1033 SDNode *N = SU->Node;
1034 return N->getOpcode() == ISD::CopyFromReg &&
1035 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1036}
1037
1038// FIXME: This is probably too slow!
1039static void isReachable(SUnit *SU, SUnit *TargetSU,
1040 std::set<SUnit *> &Visited, bool &Reached) {
1041 if (Reached) return;
1042 if (SU == TargetSU) {
1043 Reached = true;
1044 return;
1045 }
1046 if (!Visited.insert(SU).second) return;
1047
1048 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
1049 E = SU->Preds.end(); I != E; ++I)
1050 isReachable(I->first, TargetSU, Visited, Reached);
1051}
1052
1053static bool isReachable(SUnit *SU, SUnit *TargetSU) {
1054 std::set<SUnit *> Visited;
1055 bool Reached = false;
1056 isReachable(SU, TargetSU, Visited, Reached);
1057 return Reached;
1058}
1059
1060static SUnit *getDefUsePredecessor(SUnit *SU) {
1061 SDNode *DU = SU->Node->getOperand(0).Val;
1062 for (std::set<std::pair<SUnit*, bool> >::iterator
1063 I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
1064 if (I->second) continue; // ignore chain preds
1065 SUnit *PredSU = I->first;
1066 if (PredSU->Node == DU)
1067 return PredSU;
1068 }
1069
1070 // Must be flagged.
1071 return NULL;
1072}
1073
1074static bool canClobber(SUnit *SU, SUnit *Op) {
1075 if (SU->isTwoAddress)
1076 return Op == getDefUsePredecessor(SU);
1077 return false;
1078}
1079
1080/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1081/// it as a def&use operand. Add a pseudo control edge from it to the other
1082/// node (if it won't create a cycle) so the two-address one will be scheduled
1083/// first (lower in the schedule).
Evan Cheng9665ba02006-05-10 06:16:44 +00001084template<class SF>
1085void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
Evan Cheng7d693892006-05-09 07:13:34 +00001086 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1087 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1088 SDNode *Node = SU->Node;
1089 if (!Node->isTargetOpcode())
1090 continue;
1091
1092 if (SU->isTwoAddress) {
1093 unsigned Depth = SU->Node->getNodeDepth();
1094 SUnit *DUSU = getDefUsePredecessor(SU);
1095 if (!DUSU) continue;
1096
1097 for (std::set<std::pair<SUnit*, bool> >::iterator I = DUSU->Succs.begin(),
1098 E = DUSU->Succs.end(); I != E; ++I) {
1099 SUnit *SuccSU = I->first;
1100 if (SuccSU != SU && !canClobber(SuccSU, DUSU)) {
1101 if (SuccSU->Node->getNodeDepth() <= Depth+2 &&
1102 !isReachable(SuccSU, SU)) {
1103 DEBUG(std::cerr << "Adding an edge from SU # " << SU->NodeNum
1104 << " to SU #" << SuccSU->NodeNum << "\n");
1105 if (SU->Preds.insert(std::make_pair(SuccSU, true)).second)
1106 SU->NumChainPredsLeft++;
1107 if (SuccSU->Succs.insert(std::make_pair(SU, true)).second)
1108 SuccSU->NumChainSuccsLeft++;
1109 }
1110 }
1111 }
1112 }
1113 }
1114}
Chris Lattner9df64752006-03-09 06:35:14 +00001115
1116/// CalcNodePriority - Priority is the Sethi Ullman number.
1117/// Smaller number is the higher priority.
Evan Cheng9665ba02006-05-10 06:16:44 +00001118template<class SF>
1119int RegReductionPriorityQueue<SF>::CalcNodePriority(const SUnit *SU) {
Evan Chengffef8b92006-05-03 02:10:45 +00001120 int &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
Evan Cheng24e79542006-05-01 09:14:40 +00001121 if (SethiUllmanNumber != 0)
Chris Lattner9df64752006-03-09 06:35:14 +00001122 return SethiUllmanNumber;
Evan Chengffef8b92006-05-03 02:10:45 +00001123
1124 unsigned Opc = SU->Node->getOpcode();
1125 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1126 SethiUllmanNumber = INT_MAX - 10;
1127 else if (SU->NumSuccsLeft == 0)
1128 // If SU does not have a use, i.e. it doesn't produce a value that would
1129 // be consumed (e.g. store), then it terminates a chain of computation.
1130 // Give it a small SethiUllman number so it will be scheduled right before its
1131 // predecessors that it doesn't lengthen their live ranges.
1132 SethiUllmanNumber = INT_MIN + 10;
Evan Cheng7d693892006-05-09 07:13:34 +00001133 else if (SU->NumPredsLeft == 0 &&
1134 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
Chris Lattner9df64752006-03-09 06:35:14 +00001135 SethiUllmanNumber = 1;
Evan Chengffef8b92006-05-03 02:10:45 +00001136 else {
Chris Lattner9df64752006-03-09 06:35:14 +00001137 int Extra = 0;
Chris Lattner578d8fc2006-03-11 22:24:20 +00001138 for (std::set<std::pair<SUnit*, bool> >::const_iterator
1139 I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
Evan Chengffef8b92006-05-03 02:10:45 +00001140 if (I->second) continue; // ignore chain preds
Chris Lattner578d8fc2006-03-11 22:24:20 +00001141 SUnit *PredSU = I->first;
Evan Chengffef8b92006-05-03 02:10:45 +00001142 int PredSethiUllman = CalcNodePriority(PredSU);
Chris Lattner9df64752006-03-09 06:35:14 +00001143 if (PredSethiUllman > SethiUllmanNumber) {
1144 SethiUllmanNumber = PredSethiUllman;
1145 Extra = 0;
Evan Chengffef8b92006-05-03 02:10:45 +00001146 } else if (PredSethiUllman == SethiUllmanNumber && !I->second)
Chris Lattner9df64752006-03-09 06:35:14 +00001147 Extra++;
1148 }
Evan Chengffef8b92006-05-03 02:10:45 +00001149
Evan Cheng24e79542006-05-01 09:14:40 +00001150 SethiUllmanNumber += Extra;
Chris Lattner9df64752006-03-09 06:35:14 +00001151 }
1152
1153 return SethiUllmanNumber;
1154}
1155
1156/// CalculatePriorities - Calculate priorities of all scheduling units.
Evan Cheng9665ba02006-05-10 06:16:44 +00001157template<class SF>
1158void RegReductionPriorityQueue<SF>::CalculatePriorities() {
Evan Cheng24e79542006-05-01 09:14:40 +00001159 SethiUllmanNumbers.assign(SUnits->size(), 0);
Chris Lattner9df64752006-03-09 06:35:14 +00001160
1161 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1162 CalcNodePriority(&(*SUnits)[i]);
1163}
1164
Chris Lattner6398c132006-03-09 07:38:27 +00001165//===----------------------------------------------------------------------===//
1166// LatencyPriorityQueue Implementation
1167//===----------------------------------------------------------------------===//
1168//
1169// This is a SchedulingPriorityQueue that schedules using latency information to
1170// reduce the length of the critical path through the basic block.
1171//
1172namespace {
1173 class LatencyPriorityQueue;
1174
1175 /// Sorting functions for the Available queue.
1176 struct latency_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1177 LatencyPriorityQueue *PQ;
1178 latency_sort(LatencyPriorityQueue *pq) : PQ(pq) {}
1179 latency_sort(const latency_sort &RHS) : PQ(RHS.PQ) {}
1180
1181 bool operator()(const SUnit* left, const SUnit* right) const;
1182 };
1183} // end anonymous namespace
1184
1185namespace {
1186 class LatencyPriorityQueue : public SchedulingPriorityQueue {
1187 // SUnits - The SUnits for the current graph.
1188 const std::vector<SUnit> *SUnits;
1189
1190 // Latencies - The latency (max of latency from this node to the bb exit)
1191 // for each node.
1192 std::vector<int> Latencies;
Chris Lattner349e9dd2006-03-10 05:51:05 +00001193
1194 /// NumNodesSolelyBlocking - This vector contains, for every node in the
1195 /// Queue, the number of nodes that the node is the sole unscheduled
1196 /// predecessor for. This is used as a tie-breaker heuristic for better
1197 /// mobility.
1198 std::vector<unsigned> NumNodesSolelyBlocking;
1199
Chris Lattner6398c132006-03-09 07:38:27 +00001200 std::priority_queue<SUnit*, std::vector<SUnit*>, latency_sort> Queue;
1201public:
1202 LatencyPriorityQueue() : Queue(latency_sort(this)) {
1203 }
1204
1205 void initNodes(const std::vector<SUnit> &sunits) {
1206 SUnits = &sunits;
1207 // Calculate node priorities.
1208 CalculatePriorities();
1209 }
1210 void releaseState() {
1211 SUnits = 0;
1212 Latencies.clear();
1213 }
1214
1215 unsigned getLatency(unsigned NodeNum) const {
1216 assert(NodeNum < Latencies.size());
1217 return Latencies[NodeNum];
1218 }
1219
Chris Lattner349e9dd2006-03-10 05:51:05 +00001220 unsigned getNumSolelyBlockNodes(unsigned NodeNum) const {
1221 assert(NodeNum < NumNodesSolelyBlocking.size());
1222 return NumNodesSolelyBlocking[NodeNum];
1223 }
1224
Chris Lattner6398c132006-03-09 07:38:27 +00001225 bool empty() const { return Queue.empty(); }
1226
Chris Lattner349e9dd2006-03-10 05:51:05 +00001227 virtual void push(SUnit *U) {
1228 push_impl(U);
Chris Lattner6398c132006-03-09 07:38:27 +00001229 }
Chris Lattner349e9dd2006-03-10 05:51:05 +00001230 void push_impl(SUnit *U);
1231
Chris Lattner25e25562006-03-10 04:32:49 +00001232 void push_all(const std::vector<SUnit *> &Nodes) {
1233 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
Chris Lattner349e9dd2006-03-10 05:51:05 +00001234 push_impl(Nodes[i]);
Chris Lattner25e25562006-03-10 04:32:49 +00001235 }
1236
Chris Lattner6398c132006-03-09 07:38:27 +00001237 SUnit *pop() {
1238 SUnit *V = Queue.top();
1239 Queue.pop();
Chris Lattner6398c132006-03-09 07:38:27 +00001240 return V;
1241 }
Evan Cheng7d693892006-05-09 07:13:34 +00001242
Chris Lattner349e9dd2006-03-10 05:51:05 +00001243 /// RemoveFromPriorityQueue - This is a really inefficient way to remove a
1244 /// node from a priority queue. We should roll our own heap to make this
1245 /// better or something.
1246 void RemoveFromPriorityQueue(SUnit *SU) {
1247 std::vector<SUnit*> Temp;
1248
1249 assert(!Queue.empty() && "Not in queue!");
1250 while (Queue.top() != SU) {
1251 Temp.push_back(Queue.top());
1252 Queue.pop();
1253 assert(!Queue.empty() && "Not in queue!");
1254 }
1255
1256 // Remove the node from the PQ.
1257 Queue.pop();
1258
1259 // Add all the other nodes back.
1260 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
1261 Queue.push(Temp[i]);
1262 }
Evan Cheng9add8802006-05-04 19:16:39 +00001263
1264 // ScheduledNode - As nodes are scheduled, we look to see if there are any
1265 // successor nodes that have a single unscheduled predecessor. If so, that
1266 // single predecessor has a higher priority, since scheduling it will make
1267 // the node available.
1268 void ScheduledNode(SUnit *Node);
1269
1270private:
1271 void CalculatePriorities();
1272 int CalcLatency(const SUnit &SU);
1273 void AdjustPriorityOfUnscheduledPreds(SUnit *SU);
Chris Lattner6398c132006-03-09 07:38:27 +00001274 };
1275}
1276
1277bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
1278 unsigned LHSNum = LHS->NodeNum;
1279 unsigned RHSNum = RHS->NodeNum;
Chris Lattner349e9dd2006-03-10 05:51:05 +00001280
1281 // The most important heuristic is scheduling the critical path.
1282 unsigned LHSLatency = PQ->getLatency(LHSNum);
1283 unsigned RHSLatency = PQ->getLatency(RHSNum);
1284 if (LHSLatency < RHSLatency) return true;
1285 if (LHSLatency > RHSLatency) return false;
Chris Lattner6398c132006-03-09 07:38:27 +00001286
Chris Lattner349e9dd2006-03-10 05:51:05 +00001287 // After that, if two nodes have identical latencies, look to see if one will
1288 // unblock more other nodes than the other.
1289 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum);
1290 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum);
1291 if (LHSBlocked < RHSBlocked) return true;
1292 if (LHSBlocked > RHSBlocked) return false;
1293
1294 // Finally, just to provide a stable ordering, use the node number as a
1295 // deciding factor.
1296 return LHSNum < RHSNum;
Chris Lattner6398c132006-03-09 07:38:27 +00001297}
1298
1299
1300/// CalcNodePriority - Calculate the maximal path from the node to the exit.
1301///
1302int LatencyPriorityQueue::CalcLatency(const SUnit &SU) {
1303 int &Latency = Latencies[SU.NodeNum];
1304 if (Latency != -1)
1305 return Latency;
1306
1307 int MaxSuccLatency = 0;
Chris Lattner578d8fc2006-03-11 22:24:20 +00001308 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU.Succs.begin(),
Chris Lattner6398c132006-03-09 07:38:27 +00001309 E = SU.Succs.end(); I != E; ++I)
Chris Lattner578d8fc2006-03-11 22:24:20 +00001310 MaxSuccLatency = std::max(MaxSuccLatency, CalcLatency(*I->first));
Chris Lattner6398c132006-03-09 07:38:27 +00001311
1312 return Latency = MaxSuccLatency + SU.Latency;
1313}
1314
1315/// CalculatePriorities - Calculate priorities of all scheduling units.
1316void LatencyPriorityQueue::CalculatePriorities() {
1317 Latencies.assign(SUnits->size(), -1);
Chris Lattner349e9dd2006-03-10 05:51:05 +00001318 NumNodesSolelyBlocking.assign(SUnits->size(), 0);
Chris Lattner6398c132006-03-09 07:38:27 +00001319
1320 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1321 CalcLatency((*SUnits)[i]);
1322}
1323
Chris Lattner349e9dd2006-03-10 05:51:05 +00001324/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
1325/// of SU, return it, otherwise return null.
1326static SUnit *getSingleUnscheduledPred(SUnit *SU) {
1327 SUnit *OnlyAvailablePred = 0;
Chris Lattner578d8fc2006-03-11 22:24:20 +00001328 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Preds.begin(),
Chris Lattner349e9dd2006-03-10 05:51:05 +00001329 E = SU->Preds.end(); I != E; ++I)
Chris Lattner578d8fc2006-03-11 22:24:20 +00001330 if (!I->first->isScheduled) {
Chris Lattner349e9dd2006-03-10 05:51:05 +00001331 // We found an available, but not scheduled, predecessor. If it's the
1332 // only one we have found, keep track of it... otherwise give up.
Chris Lattner578d8fc2006-03-11 22:24:20 +00001333 if (OnlyAvailablePred && OnlyAvailablePred != I->first)
Chris Lattner349e9dd2006-03-10 05:51:05 +00001334 return 0;
Chris Lattner578d8fc2006-03-11 22:24:20 +00001335 OnlyAvailablePred = I->first;
Chris Lattner349e9dd2006-03-10 05:51:05 +00001336 }
1337
1338 return OnlyAvailablePred;
1339}
1340
1341void LatencyPriorityQueue::push_impl(SUnit *SU) {
1342 // Look at all of the successors of this node. Count the number of nodes that
1343 // this node is the sole unscheduled node for.
1344 unsigned NumNodesBlocking = 0;
Chris Lattner578d8fc2006-03-11 22:24:20 +00001345 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Succs.begin(),
Chris Lattner349e9dd2006-03-10 05:51:05 +00001346 E = SU->Succs.end(); I != E; ++I)
Chris Lattner578d8fc2006-03-11 22:24:20 +00001347 if (getSingleUnscheduledPred(I->first) == SU)
Chris Lattner349e9dd2006-03-10 05:51:05 +00001348 ++NumNodesBlocking;
Chris Lattner578d8fc2006-03-11 22:24:20 +00001349 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking;
Chris Lattner349e9dd2006-03-10 05:51:05 +00001350
1351 Queue.push(SU);
1352}
1353
1354
1355// ScheduledNode - As nodes are scheduled, we look to see if there are any
1356// successor nodes that have a single unscheduled predecessor. If so, that
1357// single predecessor has a higher priority, since scheduling it will make
1358// the node available.
1359void LatencyPriorityQueue::ScheduledNode(SUnit *SU) {
Chris Lattner578d8fc2006-03-11 22:24:20 +00001360 for (std::set<std::pair<SUnit*, bool> >::const_iterator I = SU->Succs.begin(),
Chris Lattner349e9dd2006-03-10 05:51:05 +00001361 E = SU->Succs.end(); I != E; ++I)
Chris Lattner578d8fc2006-03-11 22:24:20 +00001362 AdjustPriorityOfUnscheduledPreds(I->first);
Chris Lattner349e9dd2006-03-10 05:51:05 +00001363}
1364
1365/// AdjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just
1366/// scheduled. If SU is not itself available, then there is at least one
1367/// predecessor node that has not been scheduled yet. If SU has exactly ONE
1368/// unscheduled predecessor, we want to increase its priority: it getting
1369/// scheduled will make this node available, so it is better than some other
1370/// node of the same priority that will not make a node available.
1371void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
Chris Lattner572003c2006-03-12 00:38:57 +00001372 if (SU->isPending) return; // All preds scheduled.
Chris Lattner349e9dd2006-03-10 05:51:05 +00001373
1374 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
1375 if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
1376
1377 // Okay, we found a single predecessor that is available, but not scheduled.
1378 // Since it is available, it must be in the priority queue. First remove it.
1379 RemoveFromPriorityQueue(OnlyAvailablePred);
1380
1381 // Reinsert the node into the priority queue, which recomputes its
1382 // NumNodesSolelyBlocking value.
1383 push(OnlyAvailablePred);
1384}
1385
Chris Lattner9df64752006-03-09 06:35:14 +00001386
1387//===----------------------------------------------------------------------===//
1388// Public Constructor Functions
1389//===----------------------------------------------------------------------===//
1390
Evan Chengab495562006-01-25 09:14:32 +00001391llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAG &DAG,
1392 MachineBasicBlock *BB) {
Chris Lattner543832d2006-03-08 04:25:59 +00001393 return new ScheduleDAGList(DAG, BB, DAG.getTarget(), true,
Evan Cheng9665ba02006-05-10 06:16:44 +00001394 new RegReductionPriorityQueue<ls_rr_sort>(),
Chris Lattner543832d2006-03-08 04:25:59 +00001395 new HazardRecognizer());
Chris Lattner98ecb8e2006-03-05 21:10:33 +00001396}
1397
Chris Lattner47639db2006-03-06 00:22:00 +00001398/// createTDListDAGScheduler - This creates a top-down list scheduler with the
1399/// specified hazard recognizer.
1400ScheduleDAG* llvm::createTDListDAGScheduler(SelectionDAG &DAG,
1401 MachineBasicBlock *BB,
Chris Lattner543832d2006-03-08 04:25:59 +00001402 HazardRecognizer *HR) {
Chris Lattner9df64752006-03-09 06:35:14 +00001403 return new ScheduleDAGList(DAG, BB, DAG.getTarget(), false,
Chris Lattner6398c132006-03-09 07:38:27 +00001404 new LatencyPriorityQueue(),
Chris Lattner9df64752006-03-09 06:35:14 +00001405 HR);
Evan Cheng31272342006-01-23 08:26:10 +00001406}