Dan Gohman | 343f0c0 | 2008-11-19 23:18:57 +0000 | [diff] [blame] | 1 | //===---- ScheduleDAG.cpp - Implement the ScheduleDAG class ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This implements the ScheduleDAG class, which is a base class used by |
| 11 | // scheduling implementation classes. |
| 12 | // |
| 13 | //===----------------------------------------------------------------------===// |
| 14 | |
| 15 | #define DEBUG_TYPE "pre-RA-sched" |
| 16 | #include "llvm/CodeGen/ScheduleDAG.h" |
| 17 | #include "llvm/Target/TargetMachine.h" |
| 18 | #include "llvm/Target/TargetInstrInfo.h" |
| 19 | #include "llvm/Target/TargetRegisterInfo.h" |
| 20 | #include "llvm/Support/Debug.h" |
Dan Gohman | 4036206 | 2008-11-20 01:41:34 +0000 | [diff] [blame] | 21 | #include <climits> |
Dan Gohman | 343f0c0 | 2008-11-19 23:18:57 +0000 | [diff] [blame] | 22 | using namespace llvm; |
| 23 | |
| 24 | ScheduleDAG::ScheduleDAG(SelectionDAG *dag, MachineBasicBlock *bb, |
| 25 | const TargetMachine &tm) |
| 26 | : DAG(dag), BB(bb), TM(tm), MRI(BB->getParent()->getRegInfo()) { |
| 27 | TII = TM.getInstrInfo(); |
| 28 | MF = BB->getParent(); |
| 29 | TRI = TM.getRegisterInfo(); |
| 30 | TLI = TM.getTargetLowering(); |
| 31 | ConstPool = MF->getConstantPool(); |
| 32 | } |
| 33 | |
| 34 | ScheduleDAG::~ScheduleDAG() {} |
| 35 | |
| 36 | /// CalculateDepths - compute depths using algorithms for the longest |
| 37 | /// paths in the DAG |
| 38 | void ScheduleDAG::CalculateDepths() { |
| 39 | unsigned DAGSize = SUnits.size(); |
| 40 | std::vector<SUnit*> WorkList; |
| 41 | WorkList.reserve(DAGSize); |
| 42 | |
| 43 | // Initialize the data structures |
| 44 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 45 | SUnit *SU = &SUnits[i]; |
| 46 | unsigned Degree = SU->Preds.size(); |
| 47 | // Temporarily use the Depth field as scratch space for the degree count. |
| 48 | SU->Depth = Degree; |
| 49 | |
| 50 | // Is it a node without dependencies? |
| 51 | if (Degree == 0) { |
| 52 | assert(SU->Preds.empty() && "SUnit should have no predecessors"); |
| 53 | // Collect leaf nodes |
| 54 | WorkList.push_back(SU); |
| 55 | } |
| 56 | } |
| 57 | |
| 58 | // Process nodes in the topological order |
| 59 | while (!WorkList.empty()) { |
| 60 | SUnit *SU = WorkList.back(); |
| 61 | WorkList.pop_back(); |
| 62 | unsigned SUDepth = 0; |
| 63 | |
| 64 | // Use dynamic programming: |
| 65 | // When current node is being processed, all of its dependencies |
| 66 | // are already processed. |
| 67 | // So, just iterate over all predecessors and take the longest path |
| 68 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 69 | I != E; ++I) { |
| 70 | unsigned PredDepth = I->Dep->Depth; |
| 71 | if (PredDepth+1 > SUDepth) { |
| 72 | SUDepth = PredDepth + 1; |
| 73 | } |
| 74 | } |
| 75 | |
| 76 | SU->Depth = SUDepth; |
| 77 | |
| 78 | // Update degrees of all nodes depending on current SUnit |
| 79 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 80 | I != E; ++I) { |
| 81 | SUnit *SU = I->Dep; |
| 82 | if (!--SU->Depth) |
| 83 | // If all dependencies of the node are processed already, |
| 84 | // then the longest path for the node can be computed now |
| 85 | WorkList.push_back(SU); |
| 86 | } |
| 87 | } |
| 88 | } |
| 89 | |
| 90 | /// CalculateHeights - compute heights using algorithms for the longest |
| 91 | /// paths in the DAG |
| 92 | void ScheduleDAG::CalculateHeights() { |
| 93 | unsigned DAGSize = SUnits.size(); |
| 94 | std::vector<SUnit*> WorkList; |
| 95 | WorkList.reserve(DAGSize); |
| 96 | |
| 97 | // Initialize the data structures |
| 98 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 99 | SUnit *SU = &SUnits[i]; |
| 100 | unsigned Degree = SU->Succs.size(); |
| 101 | // Temporarily use the Height field as scratch space for the degree count. |
| 102 | SU->Height = Degree; |
| 103 | |
| 104 | // Is it a node without dependencies? |
| 105 | if (Degree == 0) { |
| 106 | assert(SU->Succs.empty() && "Something wrong"); |
| 107 | assert(WorkList.empty() && "Should be empty"); |
| 108 | // Collect leaf nodes |
| 109 | WorkList.push_back(SU); |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | // Process nodes in the topological order |
| 114 | while (!WorkList.empty()) { |
| 115 | SUnit *SU = WorkList.back(); |
| 116 | WorkList.pop_back(); |
| 117 | unsigned SUHeight = 0; |
| 118 | |
| 119 | // Use dynamic programming: |
| 120 | // When current node is being processed, all of its dependencies |
| 121 | // are already processed. |
| 122 | // So, just iterate over all successors and take the longest path |
| 123 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 124 | I != E; ++I) { |
| 125 | unsigned SuccHeight = I->Dep->Height; |
| 126 | if (SuccHeight+1 > SUHeight) { |
| 127 | SUHeight = SuccHeight + 1; |
| 128 | } |
| 129 | } |
| 130 | |
| 131 | SU->Height = SUHeight; |
| 132 | |
| 133 | // Update degrees of all nodes depending on current SUnit |
| 134 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 135 | I != E; ++I) { |
| 136 | SUnit *SU = I->Dep; |
| 137 | if (!--SU->Height) |
| 138 | // If all dependencies of the node are processed already, |
| 139 | // then the longest path for the node can be computed now |
| 140 | WorkList.push_back(SU); |
| 141 | } |
| 142 | } |
| 143 | } |
| 144 | |
| 145 | /// dump - dump the schedule. |
| 146 | void ScheduleDAG::dumpSchedule() const { |
| 147 | for (unsigned i = 0, e = Sequence.size(); i != e; i++) { |
| 148 | if (SUnit *SU = Sequence[i]) |
| 149 | SU->dump(this); |
| 150 | else |
| 151 | cerr << "**** NOOP ****\n"; |
| 152 | } |
| 153 | } |
| 154 | |
| 155 | |
| 156 | /// Run - perform scheduling. |
| 157 | /// |
| 158 | void ScheduleDAG::Run() { |
| 159 | Schedule(); |
| 160 | |
| 161 | DOUT << "*** Final schedule ***\n"; |
| 162 | DEBUG(dumpSchedule()); |
| 163 | DOUT << "\n"; |
| 164 | } |
| 165 | |
| 166 | /// SUnit - Scheduling unit. It's an wrapper around either a single SDNode or |
| 167 | /// a group of nodes flagged together. |
| 168 | void SUnit::dump(const ScheduleDAG *G) const { |
| 169 | cerr << "SU(" << NodeNum << "): "; |
| 170 | G->dumpNode(this); |
| 171 | } |
| 172 | |
| 173 | void SUnit::dumpAll(const ScheduleDAG *G) const { |
| 174 | dump(G); |
| 175 | |
| 176 | cerr << " # preds left : " << NumPredsLeft << "\n"; |
| 177 | cerr << " # succs left : " << NumSuccsLeft << "\n"; |
| 178 | cerr << " Latency : " << Latency << "\n"; |
| 179 | cerr << " Depth : " << Depth << "\n"; |
| 180 | cerr << " Height : " << Height << "\n"; |
| 181 | |
| 182 | if (Preds.size() != 0) { |
| 183 | cerr << " Predecessors:\n"; |
| 184 | for (SUnit::const_succ_iterator I = Preds.begin(), E = Preds.end(); |
| 185 | I != E; ++I) { |
| 186 | if (I->isCtrl) |
| 187 | cerr << " ch #"; |
| 188 | else |
| 189 | cerr << " val #"; |
| 190 | cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")"; |
Dan Gohman | 98adea1 | 2008-11-21 02:18:56 +0000 | [diff] [blame^] | 191 | if (I->isArtificial) |
Dan Gohman | 343f0c0 | 2008-11-19 23:18:57 +0000 | [diff] [blame] | 192 | cerr << " *"; |
| 193 | cerr << "\n"; |
| 194 | } |
| 195 | } |
| 196 | if (Succs.size() != 0) { |
| 197 | cerr << " Successors:\n"; |
| 198 | for (SUnit::const_succ_iterator I = Succs.begin(), E = Succs.end(); |
| 199 | I != E; ++I) { |
| 200 | if (I->isCtrl) |
| 201 | cerr << " ch #"; |
| 202 | else |
| 203 | cerr << " val #"; |
| 204 | cerr << I->Dep << " - SU(" << I->Dep->NodeNum << ")"; |
Dan Gohman | 98adea1 | 2008-11-21 02:18:56 +0000 | [diff] [blame^] | 205 | if (I->isArtificial) |
Dan Gohman | 343f0c0 | 2008-11-19 23:18:57 +0000 | [diff] [blame] | 206 | cerr << " *"; |
| 207 | cerr << "\n"; |
| 208 | } |
| 209 | } |
| 210 | cerr << "\n"; |
| 211 | } |
Dan Gohman | a1e6d36 | 2008-11-20 01:26:25 +0000 | [diff] [blame] | 212 | |
| 213 | #ifndef NDEBUG |
| 214 | /// VerifySchedule - Verify that all SUnits were scheduled and that |
| 215 | /// their state is consistent. |
| 216 | /// |
| 217 | void ScheduleDAG::VerifySchedule(bool isBottomUp) { |
| 218 | bool AnyNotSched = false; |
| 219 | unsigned DeadNodes = 0; |
| 220 | unsigned Noops = 0; |
| 221 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 222 | if (!SUnits[i].isScheduled) { |
| 223 | if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { |
| 224 | ++DeadNodes; |
| 225 | continue; |
| 226 | } |
| 227 | if (!AnyNotSched) |
| 228 | cerr << "*** Scheduling failed! ***\n"; |
| 229 | SUnits[i].dump(this); |
| 230 | cerr << "has not been scheduled!\n"; |
| 231 | AnyNotSched = true; |
| 232 | } |
| 233 | if (SUnits[i].isScheduled && SUnits[i].Cycle > (unsigned)INT_MAX) { |
| 234 | if (!AnyNotSched) |
| 235 | cerr << "*** Scheduling failed! ***\n"; |
| 236 | SUnits[i].dump(this); |
| 237 | cerr << "has an unexpected Cycle value!\n"; |
| 238 | AnyNotSched = true; |
| 239 | } |
| 240 | if (isBottomUp) { |
| 241 | if (SUnits[i].NumSuccsLeft != 0) { |
| 242 | if (!AnyNotSched) |
| 243 | cerr << "*** Scheduling failed! ***\n"; |
| 244 | SUnits[i].dump(this); |
| 245 | cerr << "has successors left!\n"; |
| 246 | AnyNotSched = true; |
| 247 | } |
| 248 | } else { |
| 249 | if (SUnits[i].NumPredsLeft != 0) { |
| 250 | if (!AnyNotSched) |
| 251 | cerr << "*** Scheduling failed! ***\n"; |
| 252 | SUnits[i].dump(this); |
| 253 | cerr << "has predecessors left!\n"; |
| 254 | AnyNotSched = true; |
| 255 | } |
| 256 | } |
| 257 | } |
| 258 | for (unsigned i = 0, e = Sequence.size(); i != e; ++i) |
| 259 | if (!Sequence[i]) |
| 260 | ++Noops; |
| 261 | assert(!AnyNotSched); |
| 262 | assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && |
| 263 | "The number of nodes scheduled doesn't match the expected number!"); |
| 264 | } |
| 265 | #endif |