Chris Lattner | d32b236 | 2005-08-18 18:45:24 +0000 | [diff] [blame] | 1 | //===-- ScheduleDAG.cpp - Implement a trivial DAG scheduler ---------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file was developed by Chris Lattner and is distributed under the |
| 6 | // University of Illinois Open Source License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 10 | // This implements a simple two pass scheduler. The first pass attempts to push |
| 11 | // backward any lengthy instructions and critical paths. The second pass packs |
| 12 | // instructions into semi-optimal time slots. |
Chris Lattner | d32b236 | 2005-08-18 18:45:24 +0000 | [diff] [blame] | 13 | // |
| 14 | //===----------------------------------------------------------------------===// |
| 15 | |
| 16 | #define DEBUG_TYPE "sched" |
Chris Lattner | 5839bf2 | 2005-08-26 17:15:30 +0000 | [diff] [blame] | 17 | #include "llvm/CodeGen/MachineConstantPool.h" |
Chris Lattner | 4ccd406 | 2005-08-19 20:45:43 +0000 | [diff] [blame] | 18 | #include "llvm/CodeGen/MachineFunction.h" |
Chris Lattner | d32b236 | 2005-08-18 18:45:24 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/SelectionDAGISel.h" |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/SelectionDAG.h" |
Chris Lattner | 4ccd406 | 2005-08-19 20:45:43 +0000 | [diff] [blame] | 21 | #include "llvm/CodeGen/SSARegMap.h" |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 22 | #include "llvm/Target/TargetMachine.h" |
| 23 | #include "llvm/Target/TargetInstrInfo.h" |
Chris Lattner | 025c39b | 2005-08-26 20:54:47 +0000 | [diff] [blame] | 24 | #include "llvm/Target/TargetLowering.h" |
Chris Lattner | 068ca15 | 2005-08-18 20:11:49 +0000 | [diff] [blame] | 25 | #include "llvm/Support/CommandLine.h" |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 26 | #include "llvm/Support/Debug.h" |
| 27 | #include <iostream> |
Chris Lattner | d32b236 | 2005-08-18 18:45:24 +0000 | [diff] [blame] | 28 | using namespace llvm; |
| 29 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 30 | namespace { |
| 31 | // Style of scheduling to use. |
| 32 | enum ScheduleChoices { |
| 33 | noScheduling, |
| 34 | simpleScheduling, |
| 35 | }; |
| 36 | } // namespace |
| 37 | |
| 38 | cl::opt<ScheduleChoices> ScheduleStyle("sched", |
| 39 | cl::desc("Choose scheduling style"), |
| 40 | cl::init(noScheduling), |
| 41 | cl::values( |
| 42 | clEnumValN(noScheduling, "none", |
| 43 | "Trivial emission with no analysis"), |
| 44 | clEnumValN(simpleScheduling, "simple", |
| 45 | "Minimize critical path and maximize processor utilization"), |
| 46 | clEnumValEnd)); |
| 47 | |
| 48 | |
Chris Lattner | da8abb0 | 2005-09-01 18:44:10 +0000 | [diff] [blame] | 49 | #ifndef NDEBUG |
Chris Lattner | 068ca15 | 2005-08-18 20:11:49 +0000 | [diff] [blame] | 50 | static cl::opt<bool> |
| 51 | ViewDAGs("view-sched-dags", cl::Hidden, |
| 52 | cl::desc("Pop up a window to show sched dags as they are processed")); |
| 53 | #else |
Chris Lattner | a639a43 | 2005-09-02 07:09:28 +0000 | [diff] [blame] | 54 | static const bool ViewDAGs = 0; |
Chris Lattner | 068ca15 | 2005-08-18 20:11:49 +0000 | [diff] [blame] | 55 | #endif |
| 56 | |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 57 | namespace { |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 58 | //===----------------------------------------------------------------------===// |
| 59 | /// |
| 60 | /// BitsIterator - Provides iteration through individual bits in a bit vector. |
| 61 | /// |
| 62 | template<class T> |
| 63 | class BitsIterator { |
| 64 | private: |
| 65 | T Bits; // Bits left to iterate through |
| 66 | |
| 67 | public: |
| 68 | /// Ctor. |
| 69 | BitsIterator(T Initial) : Bits(Initial) {} |
| 70 | |
| 71 | /// Next - Returns the next bit set or zero if exhausted. |
| 72 | inline T Next() { |
| 73 | // Get the rightmost bit set |
| 74 | T Result = Bits & -Bits; |
| 75 | // Remove from rest |
| 76 | Bits &= ~Result; |
| 77 | // Return single bit or zero |
| 78 | return Result; |
| 79 | } |
| 80 | }; |
| 81 | |
| 82 | //===----------------------------------------------------------------------===// |
| 83 | |
| 84 | |
| 85 | //===----------------------------------------------------------------------===// |
| 86 | /// |
| 87 | /// ResourceTally - Manages the use of resources over time intervals. Each |
| 88 | /// item (slot) in the tally vector represents the resources used at a given |
| 89 | /// moment. A bit set to 1 indicates that a resource is in use, otherwise |
| 90 | /// available. An assumption is made that the tally is large enough to schedule |
| 91 | /// all current instructions (asserts otherwise.) |
| 92 | /// |
| 93 | template<class T> |
| 94 | class ResourceTally { |
| 95 | private: |
| 96 | std::vector<T> Tally; // Resources used per slot |
| 97 | typedef typename std::vector<T>::iterator Iter; |
| 98 | // Tally iterator |
| 99 | |
| 100 | /// AllInUse - Test to see if all of the resources in the slot are busy (set.) |
| 101 | inline bool AllInUse(Iter Cursor, unsigned ResourceSet) { |
| 102 | return (*Cursor & ResourceSet) == ResourceSet; |
| 103 | } |
| 104 | |
| 105 | /// Skip - Skip over slots that use all of the specified resource (all are |
| 106 | /// set.) |
| 107 | Iter Skip(Iter Cursor, unsigned ResourceSet) { |
| 108 | assert(ResourceSet && "At least one resource bit needs to bet set"); |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 109 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 110 | // Continue to the end |
| 111 | while (true) { |
| 112 | // Break out if one of the resource bits is not set |
| 113 | if (!AllInUse(Cursor, ResourceSet)) return Cursor; |
| 114 | // Try next slot |
| 115 | Cursor++; |
| 116 | assert(Cursor < Tally.end() && "Tally is not large enough for schedule"); |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 117 | } |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 118 | } |
| 119 | |
| 120 | /// FindSlots - Starting from Begin, locate N consecutive slots where at least |
| 121 | /// one of the resource bits is available. Returns the address of first slot. |
| 122 | Iter FindSlots(Iter Begin, unsigned N, unsigned ResourceSet, |
| 123 | unsigned &Resource) { |
| 124 | // Track position |
| 125 | Iter Cursor = Begin; |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 126 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 127 | // Try all possible slots forward |
| 128 | while (true) { |
| 129 | // Skip full slots |
| 130 | Cursor = Skip(Cursor, ResourceSet); |
| 131 | // Determine end of interval |
| 132 | Iter End = Cursor + N; |
| 133 | assert(End <= Tally.end() && "Tally is not large enough for schedule"); |
| 134 | |
| 135 | // Iterate thru each resource |
| 136 | BitsIterator<T> Resources(ResourceSet & ~*Cursor); |
| 137 | while (unsigned Res = Resources.Next()) { |
| 138 | // Check if resource is available for next N slots |
| 139 | // Break out if resource is busy |
| 140 | Iter Interval = Cursor; |
| 141 | for (; Interval < End && !(*Interval & Res); Interval++) {} |
| 142 | |
| 143 | // If available for interval, return where and which resource |
| 144 | if (Interval == End) { |
| 145 | Resource = Res; |
| 146 | return Cursor; |
| 147 | } |
| 148 | // Otherwise, check if worth checking other resources |
| 149 | if (AllInUse(Interval, ResourceSet)) { |
| 150 | // Start looking beyond interval |
| 151 | Cursor = Interval; |
| 152 | break; |
| 153 | } |
| 154 | } |
| 155 | Cursor++; |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 156 | } |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 157 | } |
| 158 | |
| 159 | /// Reserve - Mark busy (set) the specified N slots. |
| 160 | void Reserve(Iter Begin, unsigned N, unsigned Resource) { |
| 161 | // Determine end of interval |
| 162 | Iter End = Begin + N; |
| 163 | assert(End <= Tally.end() && "Tally is not large enough for schedule"); |
| 164 | |
| 165 | // Set resource bit in each slot |
| 166 | for (; Begin < End; Begin++) |
| 167 | *Begin |= Resource; |
| 168 | } |
| 169 | |
| 170 | public: |
| 171 | /// Initialize - Resize and zero the tally to the specified number of time |
| 172 | /// slots. |
| 173 | inline void Initialize(unsigned N) { |
| 174 | Tally.assign(N, 0); // Initialize tally to all zeros. |
| 175 | } |
| 176 | |
| 177 | // FindAndReserve - Locate and mark busy (set) N bits started at slot I, using |
| 178 | // ResourceSet for choices. |
| 179 | unsigned FindAndReserve(unsigned I, unsigned N, unsigned ResourceSet) { |
| 180 | // Which resource used |
| 181 | unsigned Resource; |
| 182 | // Find slots for instruction. |
| 183 | Iter Where = FindSlots(Tally.begin() + I, N, ResourceSet, Resource); |
| 184 | // Reserve the slots |
| 185 | Reserve(Where, N, Resource); |
| 186 | // Return time slot (index) |
| 187 | return Where - Tally.begin(); |
| 188 | } |
| 189 | |
| 190 | }; |
| 191 | //===----------------------------------------------------------------------===// |
| 192 | |
| 193 | |
| 194 | //===----------------------------------------------------------------------===// |
| 195 | // This struct tracks information used to schedule the a node. |
| 196 | struct ScheduleInfo { |
| 197 | SDOperand Op; // Operand information |
| 198 | unsigned Latency; // Cycles to complete instruction |
| 199 | unsigned ResourceSet; // Bit vector of usable resources |
| 200 | bool IsBoundary; // Do not shift passed this instruction. |
| 201 | unsigned Slot; // Operand's time slot |
| 202 | |
| 203 | // Ctor. |
| 204 | ScheduleInfo(SDOperand op) |
| 205 | : Op(op) |
| 206 | , Latency(0) |
| 207 | , ResourceSet(0) |
| 208 | , IsBoundary(false) |
| 209 | , Slot(0) |
| 210 | {} |
| 211 | }; |
| 212 | //===----------------------------------------------------------------------===// |
| 213 | |
| 214 | |
| 215 | //===----------------------------------------------------------------------===// |
| 216 | class SimpleSched { |
| 217 | private: |
| 218 | // TODO - get ResourceSet from TII |
| 219 | enum { |
| 220 | RSInteger = 0x3, // Two integer units |
| 221 | RSFloat = 0xC, // Two float units |
| 222 | RSLoadStore = 0x30, // Two load store units |
| 223 | RSOther = 0 // Processing unit independent |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 224 | }; |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 225 | |
| 226 | MachineBasicBlock *BB; // Current basic block |
| 227 | SelectionDAG &DAG; // DAG of the current basic block |
| 228 | const TargetMachine &TM; // Target processor |
| 229 | const TargetInstrInfo &TII; // Target instruction information |
| 230 | const MRegisterInfo &MRI; // Target processor register information |
| 231 | SSARegMap *RegMap; // Virtual/real register map |
| 232 | MachineConstantPool *ConstPool; // Target constant pool |
| 233 | std::vector<ScheduleInfo> Operands; // All operands to be scheduled |
| 234 | std::vector<ScheduleInfo*> Ordering; // Emit ordering of operands |
| 235 | std::map<SDNode *, int> Visited; // Operands that have been visited |
| 236 | ResourceTally<unsigned> Tally; // Resource usage tally |
| 237 | unsigned NSlots; // Total latency |
| 238 | std::map<SDNode *, unsigned>VRMap; // Operand to VR map |
| 239 | static const unsigned NotFound = ~0U; // Search marker |
| 240 | |
| 241 | public: |
| 242 | |
| 243 | // Ctor. |
| 244 | SimpleSched(SelectionDAG &D, MachineBasicBlock *bb) |
| 245 | : BB(bb), DAG(D), TM(D.getTarget()), TII(*TM.getInstrInfo()), |
| 246 | MRI(*TM.getRegisterInfo()), RegMap(BB->getParent()->getSSARegMap()), |
| 247 | ConstPool(BB->getParent()->getConstantPool()), |
| 248 | NSlots(0) { |
| 249 | assert(&TII && "Target doesn't provide instr info?"); |
| 250 | assert(&MRI && "Target doesn't provide register info?"); |
| 251 | } |
| 252 | |
| 253 | // Run - perform scheduling. |
| 254 | MachineBasicBlock *Run() { |
| 255 | Schedule(); |
| 256 | return BB; |
| 257 | } |
| 258 | |
| 259 | private: |
| 260 | static bool isFlagDefiner(SDOperand Op) { return isFlagDefiner(Op.Val); } |
| 261 | static bool isFlagUser(SDOperand Op) { return isFlagUser(Op.Val); } |
| 262 | static bool isFlagDefiner(SDNode *A); |
| 263 | static bool isFlagUser(SDNode *A); |
| 264 | static bool isDefiner(SDNode *A, SDNode *B); |
| 265 | static bool isPassiveOperand(SDOperand Op); |
| 266 | void IncludeOperand(SDOperand Op); |
| 267 | void VisitAll(); |
| 268 | void Schedule(); |
| 269 | void GatherOperandInfo(); |
| 270 | bool isStrongDependency(SDOperand A, SDOperand B) { |
| 271 | return isStrongDependency(A.Val, B.Val); |
| 272 | } |
| 273 | bool isWeakDependency(SDOperand A, SDOperand B) { |
| 274 | return isWeakDependency(A.Val, B.Val); |
| 275 | } |
| 276 | static bool isStrongDependency(SDNode *A, SDNode *B); |
| 277 | static bool isWeakDependency(SDNode *A, SDNode *B); |
| 278 | void ScheduleBackward(); |
| 279 | void ScheduleForward(); |
| 280 | void EmitAll(); |
| 281 | void EmitFlagUsers(SDOperand Op); |
| 282 | static unsigned CountResults(SDOperand Op); |
| 283 | static unsigned CountOperands(SDOperand Op); |
| 284 | unsigned CreateVirtualRegisters(SDOperand Op, MachineInstr *MI, |
| 285 | unsigned NumResults, |
| 286 | const TargetInstrDescriptor &II); |
| 287 | unsigned Emit(SDOperand A); |
| 288 | |
| 289 | void printSI(std::ostream &O, ScheduleInfo *SI) const ; |
| 290 | void print(std::ostream &O) const ; |
| 291 | inline void dump(const char *tag) const { std::cerr << tag; dump(); } |
| 292 | void dump() const; |
| 293 | }; |
| 294 | //===----------------------------------------------------------------------===// |
| 295 | |
| 296 | |
| 297 | //===----------------------------------------------------------------------===// |
| 298 | class FlagUserIterator { |
| 299 | private: |
| 300 | SDNode *Definer; // Node defining flag |
| 301 | SDNode::use_iterator UI; // User node iterator |
| 302 | SDNode::use_iterator E; // End of user nodes |
| 303 | unsigned MinRes; // Minimum flag result |
| 304 | |
| 305 | public: |
| 306 | // Ctor. |
| 307 | FlagUserIterator(SDNode *D) |
| 308 | : Definer(D) |
| 309 | , UI(D->use_begin()) |
| 310 | , E(D->use_end()) |
| 311 | , MinRes(D->getNumValues()) { |
| 312 | // Find minimum flag result. |
| 313 | while (MinRes && D->getValueType(MinRes - 1) == MVT::Flag) --MinRes; |
| 314 | } |
| 315 | |
| 316 | /// isFlagUser - Return true if node uses definer's flag. |
| 317 | bool isFlagUser(SDNode *U) { |
| 318 | // For each operand (in reverse to only look at flags) |
| 319 | for (unsigned N = U->getNumOperands(); 0 < N--;) { |
| 320 | // Get operand |
| 321 | SDOperand Op = U->getOperand(N); |
| 322 | // Not user if there are no flags |
| 323 | if (Op.getValueType() != MVT::Flag) return false; |
| 324 | // Return true if it is one of the flag results |
| 325 | if (Op.Val == Definer && Op.ResNo >= MinRes) return true; |
| 326 | } |
| 327 | // Not a flag user |
| 328 | return false; |
| 329 | } |
| 330 | |
| 331 | SDNode *next() { |
| 332 | // Continue to next user |
| 333 | while (UI != E) { |
| 334 | // Next user node |
| 335 | SDNode *User = *UI++; |
| 336 | // Return true if is a flag user |
| 337 | if (isFlagUser(User)) return User; |
| 338 | } |
| 339 | |
| 340 | // No more user nodes |
| 341 | return NULL; |
| 342 | } |
| 343 | }; |
| 344 | |
| 345 | } // namespace |
| 346 | |
| 347 | |
| 348 | //===----------------------------------------------------------------------===// |
| 349 | /// isFlagDefiner - Returns true if the operand defines a flag result. |
| 350 | bool SimpleSched::isFlagDefiner(SDNode *A) { |
| 351 | unsigned N = A->getNumValues(); |
| 352 | return N && A->getValueType(N - 1) == MVT::Flag; |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 353 | } |
| 354 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 355 | /// isFlagUser - Returns true if the operand uses a flag result. |
| 356 | /// |
| 357 | bool SimpleSched::isFlagUser(SDNode *A) { |
| 358 | unsigned N = A->getNumOperands(); |
| 359 | return N && A->getOperand(N - 1).getValueType() == MVT::Flag; |
| 360 | } |
| 361 | |
| 362 | /// isDefiner - Return true if Node A is a definder for B. |
| 363 | /// |
| 364 | bool SimpleSched::isDefiner(SDNode *A, SDNode *B) { |
| 365 | for (unsigned i = 0, N = B->getNumOperands(); i < N; i++) { |
| 366 | if (B->getOperand(i).Val == A) return true; |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 367 | } |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 368 | return false; |
| 369 | } |
| 370 | |
| 371 | /// isPassiveOperand - Return true if the operand is a non-scheduled leaf |
| 372 | /// operand. |
| 373 | bool SimpleSched::isPassiveOperand(SDOperand Op) { |
| 374 | if (isa<ConstantSDNode>(Op)) return true; |
| 375 | if (isa<RegisterSDNode>(Op)) return true; |
| 376 | if (isa<GlobalAddressSDNode>(Op)) return true; |
| 377 | if (isa<BasicBlockSDNode>(Op)) return true; |
| 378 | if (isa<FrameIndexSDNode>(Op)) return true; |
| 379 | if (isa<ConstantPoolSDNode>(Op)) return true; |
| 380 | if (isa<ExternalSymbolSDNode>(Op)) return true; |
| 381 | return false; |
| 382 | } |
| 383 | |
| 384 | /// IncludeOperand - Add operand to ScheduleInfo vector. |
| 385 | /// |
| 386 | void SimpleSched::IncludeOperand(SDOperand Op) { |
| 387 | // Ignore entry node |
| 388 | if (Op.getOpcode() == ISD::EntryToken) return; |
| 389 | // Check current count for operand |
| 390 | int Count = Visited[Op.Val]; |
| 391 | // If the operand is already in list |
| 392 | if (Count < 0) return; |
| 393 | // If this the first time then get count |
| 394 | if (!Count) Count = Op.Val->use_size(); |
| 395 | // Decrement count to indicate a visit |
| 396 | Count--; |
| 397 | // If count has gone to zero then add operand to list |
| 398 | if (!Count) { |
| 399 | // Add operand |
| 400 | Operands.push_back(ScheduleInfo(Op)); |
| 401 | // indicate operand has been added |
| 402 | Count--; |
| 403 | } |
| 404 | // Mark as visited with new count |
| 405 | Visited[Op.Val] = Count; |
| 406 | } |
| 407 | |
| 408 | /// VisitAll - Visit each operand breadth-wise to produce an initial ordering. |
| 409 | /// Note that the ordering in the Operands vector is reversed. |
| 410 | void SimpleSched::VisitAll() { |
| 411 | // Add first element to list |
| 412 | Operands.push_back(DAG.getRoot()); |
| 413 | for (unsigned i = 0; i < Operands.size(); i++) { // note: size() varies |
| 414 | // Get next operand. Need copy because Operands vector is growing and |
| 415 | // addresses can be ScheduleInfo changing. |
| 416 | SDOperand Op = Operands[i].Op; |
| 417 | // Get the number of real operands |
| 418 | unsigned NodeOperands = CountOperands(Op); |
| 419 | // Get the total number of operands |
| 420 | unsigned NumOperands = Op.getNumOperands(); |
| 421 | |
| 422 | // Visit all operands skipping the Other operand if present |
| 423 | for (unsigned i = NumOperands; 0 < i--;) { |
| 424 | SDOperand OpI = Op.getOperand(i); |
| 425 | // Ignore passive operands |
| 426 | if (isPassiveOperand(OpI)) continue; |
| 427 | // Check out operand |
| 428 | IncludeOperand(OpI); |
| 429 | } |
| 430 | } |
| 431 | |
| 432 | // Add entry node last (IncludeOperand filters entry nodes) |
| 433 | if (DAG.getEntryNode().Val != DAG.getRoot().Val) |
| 434 | Operands.push_back(DAG.getEntryNode()); |
| 435 | } |
| 436 | |
| 437 | /// GatherOperandInfo - Get latency and resource information about each operand. |
| 438 | /// |
| 439 | void SimpleSched::GatherOperandInfo() { |
| 440 | // Add addresses of operand info to ordering vector |
| 441 | // Get number of operands |
| 442 | unsigned N = Operands.size(); |
| 443 | // FIXME: This is an ugly (but temporary!) hack to test the scheduler before |
| 444 | // we have real target info. |
| 445 | |
| 446 | // For each operand being scheduled |
| 447 | for (unsigned i = 0; i < N; i++) { |
| 448 | ScheduleInfo* SI = &Operands[N - i - 1]; |
| 449 | SDOperand Op = SI->Op; |
| 450 | MVT::ValueType VT = Op.Val->getValueType(0); |
| 451 | if (Op.isTargetOpcode()) { |
| 452 | MachineOpCode TOpc = Op.getTargetOpcode(); |
| 453 | // FIXME SI->Latency = std::max(1, TII.maxLatency(TOpc)); |
| 454 | // FIXME SI->ResourceSet = TII.resources(TOpc); |
| 455 | // There is a cost for keeping values across a call. |
| 456 | SI->IsBoundary = TII.isCall(TOpc); |
| 457 | |
| 458 | if (TII.isLoad(TOpc)) { |
| 459 | SI->ResourceSet = RSLoadStore; |
| 460 | SI->Latency = 5; |
| 461 | } else if (TII.isStore(TOpc)) { |
| 462 | SI->ResourceSet = RSLoadStore; |
| 463 | SI->Latency = 2; |
| 464 | } else if (MVT::isInteger(VT)) { |
| 465 | SI->ResourceSet = RSInteger; |
| 466 | SI->Latency = 2; |
| 467 | } else if (MVT::isFloatingPoint(VT)) { |
| 468 | SI->ResourceSet = RSFloat; |
| 469 | SI->Latency = 3; |
| 470 | } else { |
| 471 | SI->ResourceSet = RSOther; |
| 472 | SI->Latency = 0; |
| 473 | } |
| 474 | } else { |
| 475 | if (MVT::isInteger(VT)) { |
| 476 | SI->ResourceSet = RSInteger; |
| 477 | SI->Latency = 2; |
| 478 | } else if (MVT::isFloatingPoint(VT)) { |
| 479 | SI->ResourceSet = RSFloat; |
| 480 | SI->Latency = 3; |
| 481 | } else { |
| 482 | SI->ResourceSet = RSOther; |
| 483 | SI->Latency = 0; |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | // Add one slot for the instruction itself |
| 488 | SI->Latency++; |
| 489 | |
| 490 | // Sum up all the latencies for max tally size |
| 491 | NSlots += SI->Latency; |
| 492 | |
| 493 | // Place in initial sorted order |
| 494 | // FIXME - PUNT - ignore flag users |
| 495 | if (!isFlagUser(Op)) Ordering.push_back(SI); |
| 496 | } |
| 497 | } |
| 498 | |
| 499 | /// isStrongDependency - Return true if operand A has results used by operand B. |
| 500 | /// I.E., B must wait for latency of A. |
| 501 | bool SimpleSched::isStrongDependency(SDNode *A, SDNode *B) { |
| 502 | // If A defines for B then it's a strong dependency |
| 503 | if (isDefiner(A, B)) return true; |
| 504 | // If A defines a flag then it's users are part of the dependency |
| 505 | if (isFlagDefiner(A)) { |
| 506 | // Check each flag user |
| 507 | FlagUserIterator FI(A); |
| 508 | while (SDNode *User = FI.next()) { |
| 509 | // If flag user has strong dependency so does B |
| 510 | if (isStrongDependency(User, B)) return true; |
| 511 | } |
| 512 | } |
| 513 | // If B defines a flag then it's users are part of the dependency |
| 514 | if (isFlagDefiner(B)) { |
| 515 | // Check each flag user |
| 516 | FlagUserIterator FI(B); |
| 517 | while (SDNode *User = FI.next()) { |
| 518 | // If flag user has strong dependency so does B |
| 519 | if (isStrongDependency(A, User)) return true; |
| 520 | } |
| 521 | } |
| 522 | return false; |
| 523 | } |
| 524 | |
| 525 | /// isWeakDependency Return true if operand A produces a result that will |
| 526 | /// conflict with operands of B. |
| 527 | bool SimpleSched::isWeakDependency(SDNode *A, SDNode *B) { |
| 528 | // TODO check for conflicting real registers and aliases |
| 529 | return A->getOpcode() == ISD::EntryToken || isStrongDependency(B, A); |
| 530 | } |
| 531 | |
| 532 | /// ScheduleBackward - Schedule instructions so that any long latency |
| 533 | /// instructions and the critical path get pushed back in time. Time is run in |
| 534 | /// reverse to allow code reuse of the Tally and eliminate the overhead of |
| 535 | /// biasing every slot indices against NSlots. |
| 536 | void SimpleSched::ScheduleBackward() { |
| 537 | // Size and clear the resource tally |
| 538 | Tally.Initialize(NSlots); |
| 539 | // Get number of operands to schedule |
| 540 | unsigned N = Ordering.size(); |
| 541 | |
| 542 | // For each operand being scheduled |
| 543 | for (unsigned i = N; 0 < i--;) { |
| 544 | ScheduleInfo *SI = Ordering[i]; |
| 545 | // Track insertion |
| 546 | unsigned Slot = NotFound; |
| 547 | |
| 548 | // Compare against those previously scheduled operands |
| 549 | for (unsigned j = i + 1; j < N; j++) { |
| 550 | // Get following instruction |
| 551 | ScheduleInfo *Other = Ordering[j]; |
| 552 | |
| 553 | // Check dependency against previously inserted operands |
| 554 | if (isStrongDependency(SI->Op, Other->Op)) { |
| 555 | Slot = Other->Slot + Other->Latency; |
| 556 | break; |
| 557 | } else if (SI->IsBoundary || Other->IsBoundary || |
| 558 | isWeakDependency(SI->Op, Other->Op)) { |
| 559 | Slot = Other->Slot; |
| 560 | break; |
| 561 | } |
| 562 | } |
| 563 | |
| 564 | // If independent of others (or first entry) |
| 565 | if (Slot == NotFound) Slot = 0; |
| 566 | |
| 567 | // Find a slot where the needed resources are available |
| 568 | if (SI->ResourceSet) |
| 569 | Slot = Tally.FindAndReserve(Slot, SI->Latency, SI->ResourceSet); |
| 570 | |
| 571 | // Set operand slot |
| 572 | SI->Slot = Slot; |
| 573 | |
| 574 | // Insert sort based on slot |
| 575 | unsigned j = i + 1; |
| 576 | for (; j < N; j++) { |
| 577 | // Get following instruction |
| 578 | ScheduleInfo *Other = Ordering[j]; |
| 579 | // Should we look further |
| 580 | if (Slot >= Other->Slot) break; |
| 581 | // Shuffle other into ordering |
| 582 | Ordering[j - 1] = Other; |
| 583 | } |
| 584 | // Insert operand in proper slot |
| 585 | if (j != i + 1) Ordering[j - 1] = SI; |
| 586 | } |
| 587 | } |
| 588 | |
| 589 | /// ScheduleForward - Schedule instructions to maximize packing. |
| 590 | /// |
| 591 | void SimpleSched::ScheduleForward() { |
| 592 | // Size and clear the resource tally |
| 593 | Tally.Initialize(NSlots); |
| 594 | // Get number of operands to schedule |
| 595 | unsigned N = Ordering.size(); |
| 596 | |
| 597 | // For each operand being scheduled |
| 598 | for (unsigned i = 0; i < N; i++) { |
| 599 | ScheduleInfo *SI = Ordering[i]; |
| 600 | // Track insertion |
| 601 | unsigned Slot = NotFound; |
| 602 | |
| 603 | // Compare against those previously scheduled operands |
| 604 | for (unsigned j = i; 0 < j--;) { |
| 605 | // Get following instruction |
| 606 | ScheduleInfo *Other = Ordering[j]; |
| 607 | |
| 608 | // Check dependency against previously inserted operands |
| 609 | if (isStrongDependency(Other->Op, SI->Op)) { |
| 610 | Slot = Other->Slot + Other->Latency; |
| 611 | break; |
| 612 | } else if (SI->IsBoundary || Other->IsBoundary || |
| 613 | isWeakDependency(Other->Op, SI->Op)) { |
| 614 | Slot = Other->Slot; |
| 615 | break; |
| 616 | } |
| 617 | } |
| 618 | |
| 619 | // If independent of others (or first entry) |
| 620 | if (Slot == NotFound) Slot = 0; |
| 621 | |
| 622 | // Find a slot where the needed resources are available |
| 623 | if (SI->ResourceSet) |
| 624 | Slot = Tally.FindAndReserve(Slot, SI->Latency, SI->ResourceSet); |
| 625 | |
| 626 | // Set operand slot |
| 627 | SI->Slot = Slot; |
| 628 | |
| 629 | // Insert sort based on slot |
| 630 | unsigned j = i; |
| 631 | for (; 0 < j--;) { |
| 632 | // Get following instruction |
| 633 | ScheduleInfo *Other = Ordering[j]; |
| 634 | // Should we look further |
| 635 | if (Slot >= Other->Slot) break; |
| 636 | // Shuffle other into ordering |
| 637 | Ordering[j + 1] = Other; |
| 638 | } |
| 639 | // Insert operand in proper slot |
| 640 | if (j != i) Ordering[j + 1] = SI; |
| 641 | } |
| 642 | } |
| 643 | |
| 644 | /// EmitAll - Emit all operands in schedule sorted order. |
| 645 | /// |
| 646 | void SimpleSched::EmitAll() { |
| 647 | // For each operand in the ordering |
| 648 | for (unsigned i = 0, N = Ordering.size(); i < N; i++) { |
| 649 | // Get the scheduling info |
| 650 | ScheduleInfo *SI = Ordering[i]; |
| 651 | // Get the operand |
| 652 | SDOperand Op = SI->Op; |
| 653 | // Emit the operand |
| 654 | Emit(Op); |
| 655 | // FIXME - PUNT - If Op defines a flag then it's users need to be emitted now |
| 656 | if (isFlagDefiner(Op)) EmitFlagUsers(Op); |
| 657 | } |
| 658 | } |
| 659 | |
| 660 | /// EmitFlagUsers - Emit users of operands flag. |
| 661 | /// |
| 662 | void SimpleSched::EmitFlagUsers(SDOperand Op) { |
| 663 | // Check each flag user |
| 664 | FlagUserIterator FI(Op.Val); |
| 665 | while (SDNode *User = FI.next()) { |
| 666 | // Construct user node as operand |
| 667 | SDOperand OpU(User, 0); |
| 668 | // Emit user node |
| 669 | Emit(OpU); |
| 670 | // If user defines a flag then it's users need to be emitted now |
| 671 | if (isFlagDefiner(User)) EmitFlagUsers(OpU); |
| 672 | } |
| 673 | } |
| 674 | |
| 675 | /// CountResults - The results of target nodes have register or immediate |
| 676 | /// operands first, then an optional chain, and optional flag operands (which do |
| 677 | /// not go into the machine instrs.) |
| 678 | unsigned SimpleSched::CountResults(SDOperand Op) { |
| 679 | unsigned N = Op.Val->getNumValues(); |
| 680 | while (N && Op.Val->getValueType(N - 1) == MVT::Flag) |
| 681 | --N; |
| 682 | if (N && Op.Val->getValueType(N - 1) == MVT::Other) |
| 683 | --N; // Skip over chain result. |
| 684 | return N; |
| 685 | } |
| 686 | |
| 687 | /// CountOperands The inputs to target nodes have any actual inputs first, |
| 688 | /// followed by an optional chain operand, then flag operands. Compute the |
| 689 | /// number of actual operands that will go into the machine instr. |
| 690 | unsigned SimpleSched::CountOperands(SDOperand Op) { |
| 691 | unsigned N = Op.getNumOperands(); |
| 692 | while (N && Op.getOperand(N - 1).getValueType() == MVT::Flag) |
| 693 | --N; |
| 694 | if (N && Op.getOperand(N - 1).getValueType() == MVT::Other) |
| 695 | --N; // Ignore chain if it exists. |
| 696 | return N; |
| 697 | } |
| 698 | |
| 699 | /// CreateVirtualRegisters - Add result register values for things that are |
| 700 | /// defined by this instruction. |
| 701 | unsigned SimpleSched::CreateVirtualRegisters(SDOperand Op, MachineInstr *MI, |
| 702 | unsigned NumResults, |
| 703 | const TargetInstrDescriptor &II) { |
| 704 | // Create the result registers for this node and add the result regs to |
| 705 | // the machine instruction. |
| 706 | const TargetOperandInfo *OpInfo = II.OpInfo; |
| 707 | unsigned ResultReg = RegMap->createVirtualRegister(OpInfo[0].RegClass); |
| 708 | MI->addRegOperand(ResultReg, MachineOperand::Def); |
| 709 | for (unsigned i = 1; i != NumResults; ++i) { |
| 710 | assert(OpInfo[i].RegClass && "Isn't a register operand!"); |
| 711 | MI->addRegOperand(RegMap->createVirtualRegister(OpInfo[0].RegClass), |
| 712 | MachineOperand::Def); |
| 713 | } |
| 714 | return ResultReg; |
| 715 | } |
| 716 | |
| 717 | /// Emit - Generate machine code for an operand and needed dependencies. |
| 718 | /// |
| 719 | unsigned SimpleSched::Emit(SDOperand Op) { |
| 720 | std::map<SDNode *, unsigned>::iterator OpI = VRMap.lower_bound(Op.Val); |
| 721 | if (OpI != VRMap.end() && OpI->first == Op.Val) |
| 722 | return OpI->second + Op.ResNo; |
| 723 | unsigned &OpSlot = VRMap.insert(OpI, std::make_pair(Op.Val, 0))->second; |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 724 | |
| 725 | unsigned ResultReg = 0; |
| 726 | if (Op.isTargetOpcode()) { |
| 727 | unsigned Opc = Op.getTargetOpcode(); |
| 728 | const TargetInstrDescriptor &II = TII.get(Opc); |
| 729 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 730 | unsigned NumResults = CountResults(Op); |
| 731 | unsigned NodeOperands = CountOperands(Op); |
| 732 | unsigned NumMIOperands = NodeOperands + NumResults; |
Chris Lattner | da8abb0 | 2005-09-01 18:44:10 +0000 | [diff] [blame] | 733 | #ifndef NDEBUG |
Chris Lattner | 14b392a | 2005-08-24 22:02:41 +0000 | [diff] [blame] | 734 | assert((unsigned(II.numOperands) == NumMIOperands || II.numOperands == -1)&& |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 735 | "#operands for dag node doesn't match .td file!"); |
Chris Lattner | ca6aa2f | 2005-08-19 01:01:34 +0000 | [diff] [blame] | 736 | #endif |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 737 | |
| 738 | // Create the new machine instruction. |
Chris Lattner | 14b392a | 2005-08-24 22:02:41 +0000 | [diff] [blame] | 739 | MachineInstr *MI = new MachineInstr(Opc, NumMIOperands, true, true); |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 740 | |
| 741 | // Add result register values for things that are defined by this |
| 742 | // instruction. |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 743 | if (NumResults) ResultReg = CreateVirtualRegisters(Op, MI, NumResults, II); |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 744 | |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 745 | // If there is a token chain operand, emit it first, as a hack to get avoid |
| 746 | // really bad cases. |
| 747 | if (Op.getNumOperands() > NodeOperands && |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 748 | Op.getOperand(NodeOperands).getValueType() == MVT::Other) { |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 749 | Emit(Op.getOperand(NodeOperands)); |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 750 | } |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 751 | |
| 752 | // Emit all of the actual operands of this instruction, adding them to the |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 753 | // instruction as appropriate. |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 754 | for (unsigned i = 0; i != NodeOperands; ++i) { |
Chris Lattner | 23553cf | 2005-08-22 01:04:32 +0000 | [diff] [blame] | 755 | if (Op.getOperand(i).isTargetOpcode()) { |
| 756 | // Note that this case is redundant with the final else block, but we |
| 757 | // include it because it is the most common and it makes the logic |
| 758 | // simpler here. |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 759 | assert(Op.getOperand(i).getValueType() != MVT::Other && |
| 760 | Op.getOperand(i).getValueType() != MVT::Flag && |
| 761 | "Chain and flag operands should occur at end of operand list!"); |
| 762 | |
| 763 | MI->addRegOperand(Emit(Op.getOperand(i)), MachineOperand::Use); |
Chris Lattner | 23553cf | 2005-08-22 01:04:32 +0000 | [diff] [blame] | 764 | } else if (ConstantSDNode *C = |
| 765 | dyn_cast<ConstantSDNode>(Op.getOperand(i))) { |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 766 | MI->addZeroExtImm64Operand(C->getValue()); |
| 767 | } else if (RegisterSDNode*R =dyn_cast<RegisterSDNode>(Op.getOperand(i))) { |
| 768 | MI->addRegOperand(R->getReg(), MachineOperand::Use); |
Chris Lattner | 9b78db7 | 2005-08-19 22:38:24 +0000 | [diff] [blame] | 769 | } else if (GlobalAddressSDNode *TGA = |
| 770 | dyn_cast<GlobalAddressSDNode>(Op.getOperand(i))) { |
| 771 | MI->addGlobalAddressOperand(TGA->getGlobal(), false, 0); |
Chris Lattner | f85ab15 | 2005-08-21 18:49:29 +0000 | [diff] [blame] | 772 | } else if (BasicBlockSDNode *BB = |
| 773 | dyn_cast<BasicBlockSDNode>(Op.getOperand(i))) { |
| 774 | MI->addMachineBasicBlockOperand(BB->getBasicBlock()); |
Chris Lattner | 81e72b1 | 2005-08-21 19:56:04 +0000 | [diff] [blame] | 775 | } else if (FrameIndexSDNode *FI = |
| 776 | dyn_cast<FrameIndexSDNode>(Op.getOperand(i))) { |
| 777 | MI->addFrameIndexOperand(FI->getIndex()); |
Chris Lattner | 23553cf | 2005-08-22 01:04:32 +0000 | [diff] [blame] | 778 | } else if (ConstantPoolSDNode *CP = |
| 779 | dyn_cast<ConstantPoolSDNode>(Op.getOperand(i))) { |
Chris Lattner | 5839bf2 | 2005-08-26 17:15:30 +0000 | [diff] [blame] | 780 | unsigned Idx = ConstPool->getConstantPoolIndex(CP->get()); |
| 781 | MI->addConstantPoolIndexOperand(Idx); |
Chris Lattner | 14b392a | 2005-08-24 22:02:41 +0000 | [diff] [blame] | 782 | } else if (ExternalSymbolSDNode *ES = |
| 783 | dyn_cast<ExternalSymbolSDNode>(Op.getOperand(i))) { |
| 784 | MI->addExternalSymbolOperand(ES->getSymbol(), false); |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 785 | } else { |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 786 | assert(Op.getOperand(i).getValueType() != MVT::Other && |
| 787 | Op.getOperand(i).getValueType() != MVT::Flag && |
| 788 | "Chain and flag operands should occur at end of operand list!"); |
| 789 | MI->addRegOperand(Emit(Op.getOperand(i)), MachineOperand::Use); |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 790 | } |
| 791 | } |
| 792 | |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 793 | // Finally, if this node has any flag operands, we *must* emit them last, to |
| 794 | // avoid emitting operations that might clobber the flags. |
| 795 | if (Op.getNumOperands() > NodeOperands) { |
| 796 | unsigned i = NodeOperands; |
| 797 | if (Op.getOperand(i).getValueType() == MVT::Other) |
| 798 | ++i; // the chain is already selected. |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 799 | for (unsigned N = Op.getNumOperands(); i < N; i++) { |
Chris Lattner | 82e14db | 2005-08-29 23:21:29 +0000 | [diff] [blame] | 800 | assert(Op.getOperand(i).getValueType() == MVT::Flag && |
| 801 | "Must be flag operands!"); |
| 802 | Emit(Op.getOperand(i)); |
| 803 | } |
| 804 | } |
| 805 | |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 806 | // Now that we have emitted all operands, emit this instruction itself. |
Chris Lattner | 025c39b | 2005-08-26 20:54:47 +0000 | [diff] [blame] | 807 | if ((II.Flags & M_USES_CUSTOM_DAG_SCHED_INSERTION) == 0) { |
| 808 | BB->insert(BB->end(), MI); |
| 809 | } else { |
| 810 | // Insert this instruction into the end of the basic block, potentially |
| 811 | // taking some custom action. |
| 812 | BB = DAG.getTargetLoweringInfo().InsertAtEndOfBasicBlock(MI, BB); |
| 813 | } |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 814 | } else { |
| 815 | switch (Op.getOpcode()) { |
Chris Lattner | ca6aa2f | 2005-08-19 01:01:34 +0000 | [diff] [blame] | 816 | default: |
| 817 | Op.Val->dump(); |
| 818 | assert(0 && "This target-independent node should have been selected!"); |
Chris Lattner | 81e72b1 | 2005-08-21 19:56:04 +0000 | [diff] [blame] | 819 | case ISD::EntryToken: break; |
Chris Lattner | 7ef3304 | 2005-08-19 21:43:53 +0000 | [diff] [blame] | 820 | case ISD::TokenFactor: |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 821 | for (unsigned i = 0, N = Op.getNumOperands(); i < N; i++) { |
Chris Lattner | 7ef3304 | 2005-08-19 21:43:53 +0000 | [diff] [blame] | 822 | Emit(Op.getOperand(i)); |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 823 | } |
Chris Lattner | 7ef3304 | 2005-08-19 21:43:53 +0000 | [diff] [blame] | 824 | break; |
Chris Lattner | ca6aa2f | 2005-08-19 01:01:34 +0000 | [diff] [blame] | 825 | case ISD::CopyToReg: { |
Chris Lattner | f155635 | 2005-08-30 01:58:51 +0000 | [diff] [blame] | 826 | SDOperand FlagOp; |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 827 | if (Op.getNumOperands() == 4) { |
Chris Lattner | f155635 | 2005-08-30 01:58:51 +0000 | [diff] [blame] | 828 | FlagOp = Op.getOperand(3); |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 829 | } |
| 830 | if (Op.getOperand(0).Val != FlagOp.Val) { |
Chris Lattner | 55334fc | 2005-08-30 01:57:23 +0000 | [diff] [blame] | 831 | Emit(Op.getOperand(0)); // Emit the chain. |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 832 | } |
Chris Lattner | ca6aa2f | 2005-08-19 01:01:34 +0000 | [diff] [blame] | 833 | unsigned Val = Emit(Op.getOperand(2)); |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 834 | if (FlagOp.Val) { |
| 835 | Emit(FlagOp); |
| 836 | } |
Chris Lattner | 0189197 | 2005-08-19 20:50:53 +0000 | [diff] [blame] | 837 | MRI.copyRegToReg(*BB, BB->end(), |
| 838 | cast<RegisterSDNode>(Op.getOperand(1))->getReg(), Val, |
| 839 | RegMap->getRegClass(Val)); |
Chris Lattner | ca6aa2f | 2005-08-19 01:01:34 +0000 | [diff] [blame] | 840 | break; |
| 841 | } |
Chris Lattner | 7ef3304 | 2005-08-19 21:43:53 +0000 | [diff] [blame] | 842 | case ISD::CopyFromReg: { |
| 843 | Emit(Op.getOperand(0)); // Emit the chain. |
| 844 | unsigned SrcReg = cast<RegisterSDNode>(Op.getOperand(1))->getReg(); |
| 845 | |
| 846 | // Figure out the register class to create for the destreg. |
Chris Lattner | fe0c2c8 | 2005-08-20 18:07:27 +0000 | [diff] [blame] | 847 | const TargetRegisterClass *TRC = 0; |
Chris Lattner | 7ef3304 | 2005-08-19 21:43:53 +0000 | [diff] [blame] | 848 | if (MRegisterInfo::isVirtualRegister(SrcReg)) { |
| 849 | TRC = RegMap->getRegClass(SrcReg); |
| 850 | } else { |
| 851 | // FIXME: we don't know what register class to generate this for. Do |
| 852 | // a brute force search and pick the first match. :( |
| 853 | for (MRegisterInfo::regclass_iterator I = MRI.regclass_begin(), |
| 854 | E = MRI.regclass_end(); I != E; ++I) |
| 855 | if ((*I)->contains(SrcReg)) { |
| 856 | TRC = *I; |
| 857 | break; |
| 858 | } |
| 859 | assert(TRC && "Couldn't find register class for reg copy!"); |
| 860 | } |
| 861 | |
| 862 | // Create the reg, emit the copy. |
| 863 | ResultReg = RegMap->createVirtualRegister(TRC); |
| 864 | MRI.copyRegToReg(*BB, BB->end(), ResultReg, SrcReg, TRC); |
| 865 | break; |
| 866 | } |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 867 | } |
| 868 | } |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 869 | |
| 870 | OpSlot = ResultReg; |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 871 | return ResultReg+Op.ResNo; |
| 872 | } |
| 873 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 874 | /// Schedule - Order operands according to selected style. |
| 875 | /// |
| 876 | void SimpleSched::Schedule() { |
| 877 | switch (ScheduleStyle) { |
| 878 | case simpleScheduling: |
| 879 | // Breadth first walk of DAG |
| 880 | VisitAll(); |
| 881 | // Get latency and resource requirements |
| 882 | GatherOperandInfo(); |
| 883 | // Don't waste time if is only entry and return |
| 884 | if (Operands.size() > 2) { |
| 885 | DEBUG(dump("Pre-")); |
| 886 | // Push back long instructions and critical path |
| 887 | ScheduleBackward(); |
| 888 | DEBUG(dump("Mid-")); |
| 889 | // Pack instructions to maximize resource utilization |
| 890 | ScheduleForward(); |
| 891 | DEBUG(dump("Post-")); |
| 892 | // Emit in scheduled order |
| 893 | EmitAll(); |
| 894 | break; |
| 895 | } // fall thru |
| 896 | case noScheduling: |
| 897 | // Emit instructions in using a DFS from the exit root |
| 898 | Emit(DAG.getRoot()); |
| 899 | break; |
| 900 | } |
| 901 | } |
Chris Lattner | 2d973e4 | 2005-08-18 20:07:59 +0000 | [diff] [blame] | 902 | |
Jim Laskey | e6b90fb | 2005-09-26 21:57:04 +0000 | [diff] [blame^] | 903 | /// printSI - Print schedule info. |
| 904 | /// |
| 905 | void SimpleSched::printSI(std::ostream &O, ScheduleInfo *SI) const { |
| 906 | #ifndef NDEBUG |
| 907 | using namespace std; |
| 908 | SDOperand Op = SI->Op; |
| 909 | O << " " |
| 910 | << hex << Op.Val |
| 911 | << ", RS=" << SI->ResourceSet |
| 912 | << ", Lat=" << SI->Latency |
| 913 | << ", Slot=" << SI->Slot |
| 914 | << ", ARITY=(" << Op.getNumOperands() << "," |
| 915 | << Op.Val->getNumValues() << ")" |
| 916 | << " " << Op.Val->getOperationName(&DAG); |
| 917 | if (isFlagDefiner(Op)) O << "<#"; |
| 918 | if (isFlagUser(Op)) O << ">#"; |
| 919 | #endif |
| 920 | } |
| 921 | |
| 922 | /// print - Print ordering to specified output stream. |
| 923 | /// |
| 924 | void SimpleSched::print(std::ostream &O) const { |
| 925 | #ifndef NDEBUG |
| 926 | using namespace std; |
| 927 | O << "Ordering\n"; |
| 928 | for (unsigned i = 0, N = Ordering.size(); i < N; i++) { |
| 929 | printSI(O, Ordering[i]); |
| 930 | O << "\n"; |
| 931 | } |
| 932 | #endif |
| 933 | } |
| 934 | |
| 935 | /// dump - Print ordering to std::cerr. |
| 936 | /// |
| 937 | void SimpleSched::dump() const { |
| 938 | print(std::cerr); |
| 939 | } |
| 940 | //===----------------------------------------------------------------------===// |
| 941 | |
| 942 | |
| 943 | //===----------------------------------------------------------------------===// |
| 944 | /// ScheduleAndEmitDAG - Pick a safe ordering and emit instructions for each |
| 945 | /// target node in the graph. |
Chris Lattner | d32b236 | 2005-08-18 18:45:24 +0000 | [diff] [blame] | 946 | void SelectionDAGISel::ScheduleAndEmitDAG(SelectionDAG &SD) { |
Chris Lattner | 068ca15 | 2005-08-18 20:11:49 +0000 | [diff] [blame] | 947 | if (ViewDAGs) SD.viewGraph(); |
Chris Lattner | 620c93c | 2005-08-27 00:58:02 +0000 | [diff] [blame] | 948 | BB = SimpleSched(SD, BB).Run(); |
Chris Lattner | d32b236 | 2005-08-18 18:45:24 +0000 | [diff] [blame] | 949 | } |