Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1 | //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This implements bottom-up and top-down register pressure reduction list |
| 11 | // schedulers, using standard algorithms. The basic approach uses a priority |
| 12 | // queue of available nodes to schedule. One at a time, nodes are taken from |
| 13 | // the priority queue (thus in priority order), checked for legality to |
| 14 | // schedule, and emitted if legal. |
| 15 | // |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
Dale Johannesen | 2182f06 | 2007-07-13 17:13:54 +0000 | [diff] [blame] | 18 | #define DEBUG_TYPE "pre-RA-sched" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/ScheduleDAG.h" |
Jim Laskey | 29e635d | 2006-08-02 12:30:23 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/SchedulerRegistry.h" |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 21 | #include "llvm/Target/TargetRegisterInfo.h" |
Owen Anderson | 8c2c1e9 | 2006-05-12 06:33:49 +0000 | [diff] [blame] | 22 | #include "llvm/Target/TargetData.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 23 | #include "llvm/Target/TargetMachine.h" |
| 24 | #include "llvm/Target/TargetInstrInfo.h" |
| 25 | #include "llvm/Support/Debug.h" |
Chris Lattner | 3d27be1 | 2006-08-27 12:54:02 +0000 | [diff] [blame] | 26 | #include "llvm/Support/Compiler.h" |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 27 | #include "llvm/ADT/SmallPtrSet.h" |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 28 | #include "llvm/ADT/SmallSet.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 29 | #include "llvm/ADT/Statistic.h" |
| 30 | #include <climits> |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 31 | #include <queue> |
| 32 | #include "llvm/Support/CommandLine.h" |
| 33 | using namespace llvm; |
| 34 | |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 35 | STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 36 | STATISTIC(NumUnfolds, "Number of nodes unfolded"); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 37 | STATISTIC(NumDups, "Number of duplicated nodes"); |
| 38 | STATISTIC(NumCCCopies, "Number of cross class copies"); |
| 39 | |
Jim Laskey | 95eda5b | 2006-08-01 14:21:23 +0000 | [diff] [blame] | 40 | static RegisterScheduler |
| 41 | burrListDAGScheduler("list-burr", |
| 42 | " Bottom-up register reduction list scheduling", |
| 43 | createBURRListDAGScheduler); |
| 44 | static RegisterScheduler |
| 45 | tdrListrDAGScheduler("list-tdrr", |
| 46 | " Top-down register reduction list scheduling", |
| 47 | createTDRRListDAGScheduler); |
| 48 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 49 | namespace { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 50 | //===----------------------------------------------------------------------===// |
| 51 | /// ScheduleDAGRRList - The actual register reduction list scheduler |
| 52 | /// implementation. This supports both top-down and bottom-up scheduling. |
| 53 | /// |
Chris Lattner | e097e6f | 2006-06-28 22:17:39 +0000 | [diff] [blame] | 54 | class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 55 | private: |
| 56 | /// isBottomUp - This is true if the scheduling problem is bottom-up, false if |
| 57 | /// it is top-down. |
| 58 | bool isBottomUp; |
| 59 | |
| 60 | /// AvailableQueue - The priority queue to use for the available SUnits. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 61 | SchedulingPriorityQueue *AvailableQueue; |
| 62 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 63 | /// LiveRegs / LiveRegDefs - A set of physical registers and their definition |
| 64 | /// that are "live". These nodes must be scheduled before any other nodes that |
| 65 | /// modifies the registers can be scheduled. |
| 66 | SmallSet<unsigned, 4> LiveRegs; |
| 67 | std::vector<SUnit*> LiveRegDefs; |
| 68 | std::vector<unsigned> LiveRegCycles; |
| 69 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 70 | public: |
| 71 | ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb, |
| 72 | const TargetMachine &tm, bool isbottomup, |
| 73 | SchedulingPriorityQueue *availqueue) |
| 74 | : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), |
| 75 | AvailableQueue(availqueue) { |
| 76 | } |
| 77 | |
| 78 | ~ScheduleDAGRRList() { |
| 79 | delete AvailableQueue; |
| 80 | } |
| 81 | |
| 82 | void Schedule(); |
| 83 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 84 | /// IsReachable - Checks if SU is reachable from TargetSU |
| 85 | bool IsReachable(SUnit *SU, SUnit *TargetSU); |
| 86 | |
| 87 | /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will |
| 88 | /// create a cycle. |
| 89 | bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); |
| 90 | |
| 91 | /// AddPred - This adds the specified node X as a predecessor of |
| 92 | /// the current node Y if not already. |
| 93 | /// This returns true if this is a new pred. |
| 94 | /// Updates the topological oredering if required. |
| 95 | bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, |
| 96 | unsigned PhyReg = 0, int Cost = 1); |
| 97 | |
| 98 | /// RemovePred - This removes the specified node N from predecessors of |
| 99 | /// the current node M. Updates the topological oredering if required |
| 100 | bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial); |
| 101 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 102 | private: |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 103 | void ReleasePred(SUnit*, bool, unsigned); |
| 104 | void ReleaseSucc(SUnit*, bool isChain, unsigned); |
| 105 | void CapturePred(SUnit*, SUnit*, bool); |
| 106 | void ScheduleNodeBottomUp(SUnit*, unsigned); |
| 107 | void ScheduleNodeTopDown(SUnit*, unsigned); |
| 108 | void UnscheduleNodeBottomUp(SUnit*); |
| 109 | void BacktrackBottomUp(SUnit*, unsigned, unsigned&); |
| 110 | SUnit *CopyAndMoveSuccessors(SUnit*); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 111 | void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned, |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 112 | const TargetRegisterClass*, |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 113 | const TargetRegisterClass*, |
| 114 | SmallVector<SUnit*, 2>&); |
| 115 | bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 116 | void ListScheduleTopDown(); |
| 117 | void ListScheduleBottomUp(); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 118 | void CommuteNodesToReducePressure(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 119 | |
| 120 | |
| 121 | /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. |
| 122 | /// Updates the topological oredering if required. |
| 123 | SUnit *CreateNewSUnit(SDNode *N) { |
| 124 | SUnit *NewNode = NewSUnit(N); |
| 125 | // Update the topologic ordering |
| 126 | if (NewNode->NodeNum >= Node2Index.size()) |
| 127 | InitDAGTopologicalSorting(); |
| 128 | return NewNode; |
| 129 | } |
| 130 | |
| 131 | /// CreateClone - Creates a new SUnit from old one. |
| 132 | /// Updates the topological oredering if required. |
| 133 | SUnit *CreateClone(SUnit *N) { |
| 134 | SUnit *NewNode = Clone(N); |
| 135 | // Update the topologic ordering |
| 136 | if (NewNode->NodeNum >= Node2Index.size()) |
| 137 | InitDAGTopologicalSorting(); |
| 138 | return NewNode; |
| 139 | } |
| 140 | |
| 141 | /// Functions for preserving the topological ordering |
| 142 | /// even after dynamic insertions of new edges. |
| 143 | /// This allows for very fast implementation of IsReachable. |
| 144 | |
| 145 | |
| 146 | /** |
| 147 | The idea of the algorithm is taken from |
| 148 | "Online algorithms for managing the topological order of |
| 149 | a directed acyclic graph" by David J.Pearce and Paul H.J. Kelly |
| 150 | This is the MNR algorithm, which is first introduced by |
| 151 | A.Marchetti-Spaccamela, U.Nanni and H.Rohnert in |
| 152 | "Maintaining a topological order under edge insertions". |
| 153 | |
| 154 | Short description of the algorithm: |
| 155 | |
| 156 | Topological ordering, ord, of a DAG maps each node to a topological |
| 157 | index so that fall all edges X->Y it is the case that ord(X) < ord(Y). |
| 158 | |
| 159 | This means that if there is a path from the node X to the node Z, |
| 160 | then ord(X) < ord(Z). |
| 161 | |
| 162 | This property can be used to check for reachability of nodes: |
| 163 | if Z is reachable from X, then an insertion of the edge Z->X would |
| 164 | create a cycle. |
| 165 | |
| 166 | Algorithm first computes a topological ordering for a DAG by initializing |
| 167 | the Index2Node and Node2Index arrays and then tries to keep the ordering |
| 168 | up-to-date after edge insertions by reordering the DAG. |
| 169 | |
| 170 | On insertion of the edge X->Y, the algorithm first marks by calling DFS the |
| 171 | nodes reachable from Y, and then shifts them using Shift to lie immediately |
| 172 | after X in Index2Node. |
| 173 | */ |
| 174 | |
| 175 | /// InitDAGTopologicalSorting - create the initial topological |
| 176 | /// ordering from the DAG to be scheduled |
| 177 | void InitDAGTopologicalSorting(); |
| 178 | |
| 179 | /// DFS - make a DFS traversal and mark all nodes affected by the |
| 180 | /// edge insertion. These nodes should later get new topological indexes |
| 181 | /// by means of Shift method |
| 182 | void DFS(SUnit *SU, int UpperBound, bool& HasLoop); |
| 183 | |
| 184 | /// Shift - reassign topological indexes for the nodes in the DAG |
| 185 | /// to preserve the topological ordering |
| 186 | void Shift(BitVector& Visited, int LowerBound, int UpperBound); |
| 187 | |
| 188 | /// Allocate - assign the topological index to a node n |
| 189 | void Allocate(int n, int index); |
| 190 | |
| 191 | /// Index2Node - Maps topological index to the node number |
| 192 | std::vector<int> Index2Node; |
| 193 | /// Node2Index - Maps the node number to its topological index |
| 194 | std::vector<int> Node2Index; |
| 195 | /// Visited - a set of nodes visited during a DFS traversal |
| 196 | BitVector Visited; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 197 | }; |
| 198 | } // end anonymous namespace |
| 199 | |
| 200 | |
| 201 | /// Schedule - Schedule the DAG using list scheduling. |
| 202 | void ScheduleDAGRRList::Schedule() { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 203 | DOUT << "********** List Scheduling **********\n"; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 204 | |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 205 | LiveRegDefs.resize(TRI->getNumRegs(), NULL); |
| 206 | LiveRegCycles.resize(TRI->getNumRegs(), 0); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 207 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 208 | // Build scheduling units. |
| 209 | BuildSchedUnits(); |
| 210 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 211 | DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 212 | SUnits[su].dumpAll(&DAG)); |
Evan Cheng | 47fbeda | 2006-10-14 08:34:06 +0000 | [diff] [blame] | 213 | CalculateDepths(); |
| 214 | CalculateHeights(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 215 | InitDAGTopologicalSorting(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 216 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 217 | AvailableQueue->initNodes(SUnitMap, SUnits); |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 218 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 219 | // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate. |
| 220 | if (isBottomUp) |
| 221 | ListScheduleBottomUp(); |
| 222 | else |
| 223 | ListScheduleTopDown(); |
| 224 | |
| 225 | AvailableQueue->releaseState(); |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 226 | |
Evan Cheng | 009f5f5 | 2006-05-25 08:37:31 +0000 | [diff] [blame] | 227 | CommuteNodesToReducePressure(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 228 | |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 229 | DOUT << "*** Final schedule ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 230 | DEBUG(dumpSchedule()); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 231 | DOUT << "\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 232 | |
| 233 | // Emit in scheduled order |
| 234 | EmitSchedule(); |
| 235 | } |
| 236 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 237 | /// CommuteNodesToReducePressure - If a node is two-address and commutable, and |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 238 | /// it is not the last use of its first operand, add it to the CommuteSet if |
| 239 | /// possible. It will be commuted when it is translated to a MI. |
| 240 | void ScheduleDAGRRList::CommuteNodesToReducePressure() { |
Evan Cheng | e3c4419 | 2007-06-22 01:35:51 +0000 | [diff] [blame] | 241 | SmallPtrSet<SUnit*, 4> OperandSeen; |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 242 | for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node. |
| 243 | SUnit *SU = Sequence[i]; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 244 | if (!SU || !SU->Node) continue; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 245 | if (SU->isCommutable) { |
| 246 | unsigned Opc = SU->Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 247 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 248 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 249 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 250 | for (unsigned j = 0; j != NumOps; ++j) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 251 | if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 252 | continue; |
| 253 | |
| 254 | SDNode *OpN = SU->Node->getOperand(j).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 255 | SUnit *OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 256 | if (OpSU && OperandSeen.count(OpSU) == 1) { |
| 257 | // Ok, so SU is not the last use of OpSU, but SU is two-address so |
| 258 | // it will clobber OpSU. Try to commute SU if no other source operands |
| 259 | // are live below. |
| 260 | bool DoCommute = true; |
| 261 | for (unsigned k = 0; k < NumOps; ++k) { |
| 262 | if (k != j) { |
| 263 | OpN = SU->Node->getOperand(k).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 264 | OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 265 | if (OpSU && OperandSeen.count(OpSU) == 1) { |
| 266 | DoCommute = false; |
| 267 | break; |
| 268 | } |
| 269 | } |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 270 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 271 | if (DoCommute) |
| 272 | CommuteSet.insert(SU->Node); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 273 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 274 | |
| 275 | // Only look at the first use&def node for now. |
| 276 | break; |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 277 | } |
| 278 | } |
| 279 | |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 280 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 281 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 282 | if (!I->isCtrl) |
| 283 | OperandSeen.insert(I->Dep); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 284 | } |
| 285 | } |
| 286 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 287 | |
| 288 | //===----------------------------------------------------------------------===// |
| 289 | // Bottom-Up Scheduling |
| 290 | //===----------------------------------------------------------------------===// |
| 291 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 292 | /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 293 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 294 | void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, |
| 295 | unsigned CurCycle) { |
| 296 | // FIXME: the distance between two nodes is not always == the predecessor's |
| 297 | // latency. For example, the reader can very well read the register written |
| 298 | // by the predecessor later than the issue cycle. It also depends on the |
| 299 | // interrupt model (drain vs. freeze). |
| 300 | PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency); |
| 301 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 302 | --PredSU->NumSuccsLeft; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 303 | |
| 304 | #ifndef NDEBUG |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 305 | if (PredSU->NumSuccsLeft < 0) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 306 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 307 | PredSU->dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 308 | cerr << " has been released too many times!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 309 | assert(0); |
| 310 | } |
| 311 | #endif |
| 312 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 313 | if (PredSU->NumSuccsLeft == 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 314 | // EntryToken has to go last! Special case it here. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 315 | if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 316 | PredSU->isAvailable = true; |
| 317 | AvailableQueue->push(PredSU); |
| 318 | } |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending |
| 323 | /// count of its predecessors. If a predecessor pending count is zero, add it to |
| 324 | /// the Available queue. |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 325 | void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 326 | DOUT << "*** Scheduling [" << CurCycle << "]: "; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 327 | DEBUG(SU->dump(&DAG)); |
| 328 | SU->Cycle = CurCycle; |
| 329 | |
| 330 | AvailableQueue->ScheduledNode(SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 331 | |
| 332 | // Bottom up: release predecessors |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 333 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 334 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 335 | ReleasePred(I->Dep, I->isCtrl, CurCycle); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 336 | if (I->Cost < 0) { |
| 337 | // This is a physical register dependency and it's impossible or |
| 338 | // expensive to copy the register. Make sure nothing that can |
| 339 | // clobber the register is scheduled between the predecessor and |
| 340 | // this node. |
| 341 | if (LiveRegs.insert(I->Reg)) { |
| 342 | LiveRegDefs[I->Reg] = I->Dep; |
| 343 | LiveRegCycles[I->Reg] = CurCycle; |
| 344 | } |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | // Release all the implicit physical register defs that are live. |
| 349 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 350 | I != E; ++I) { |
| 351 | if (I->Cost < 0) { |
| 352 | if (LiveRegCycles[I->Reg] == I->Dep->Cycle) { |
| 353 | LiveRegs.erase(I->Reg); |
| 354 | assert(LiveRegDefs[I->Reg] == SU && |
| 355 | "Physical register dependency violated?"); |
| 356 | LiveRegDefs[I->Reg] = NULL; |
| 357 | LiveRegCycles[I->Reg] = 0; |
| 358 | } |
| 359 | } |
| 360 | } |
| 361 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 362 | SU->isScheduled = true; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 363 | } |
| 364 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 365 | /// CapturePred - This does the opposite of ReleasePred. Since SU is being |
| 366 | /// unscheduled, incrcease the succ left count of its predecessors. Remove |
| 367 | /// them from AvailableQueue if necessary. |
| 368 | void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) { |
| 369 | PredSU->CycleBound = 0; |
| 370 | for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end(); |
| 371 | I != E; ++I) { |
| 372 | if (I->Dep == SU) |
| 373 | continue; |
| 374 | PredSU->CycleBound = std::max(PredSU->CycleBound, |
| 375 | I->Dep->Cycle + PredSU->Latency); |
| 376 | } |
| 377 | |
| 378 | if (PredSU->isAvailable) { |
| 379 | PredSU->isAvailable = false; |
| 380 | if (!PredSU->isPending) |
| 381 | AvailableQueue->remove(PredSU); |
| 382 | } |
| 383 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 384 | ++PredSU->NumSuccsLeft; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and |
| 388 | /// its predecessor states to reflect the change. |
| 389 | void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { |
| 390 | DOUT << "*** Unscheduling [" << SU->Cycle << "]: "; |
| 391 | DEBUG(SU->dump(&DAG)); |
| 392 | |
| 393 | AvailableQueue->UnscheduledNode(SU); |
| 394 | |
| 395 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 396 | I != E; ++I) { |
| 397 | CapturePred(I->Dep, SU, I->isCtrl); |
| 398 | if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) { |
| 399 | LiveRegs.erase(I->Reg); |
| 400 | assert(LiveRegDefs[I->Reg] == I->Dep && |
| 401 | "Physical register dependency violated?"); |
| 402 | LiveRegDefs[I->Reg] = NULL; |
| 403 | LiveRegCycles[I->Reg] = 0; |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 408 | I != E; ++I) { |
| 409 | if (I->Cost < 0) { |
| 410 | if (LiveRegs.insert(I->Reg)) { |
| 411 | assert(!LiveRegDefs[I->Reg] && |
| 412 | "Physical register dependency violated?"); |
| 413 | LiveRegDefs[I->Reg] = SU; |
| 414 | } |
| 415 | if (I->Dep->Cycle < LiveRegCycles[I->Reg]) |
| 416 | LiveRegCycles[I->Reg] = I->Dep->Cycle; |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | SU->Cycle = 0; |
| 421 | SU->isScheduled = false; |
| 422 | SU->isAvailable = true; |
| 423 | AvailableQueue->push(SU); |
| 424 | } |
| 425 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 426 | /// IsReachable - Checks if SU is reachable from TargetSU. |
| 427 | bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) { |
| 428 | // If insertion of the edge SU->TargetSU would creates a cycle |
| 429 | // then there is a path from TargetSU to SU |
| 430 | int UpperBound, LowerBound; |
| 431 | LowerBound = Node2Index[TargetSU->NodeNum]; |
| 432 | UpperBound = Node2Index[SU->NodeNum]; |
| 433 | bool HasLoop = false; |
| 434 | // Is Ord(TargetSU) < Ord(SU) ? |
| 435 | if (LowerBound < UpperBound) { |
| 436 | Visited.reset(); |
| 437 | // There may be a path from TargetSU to SU. Check for it. |
| 438 | DFS(TargetSU, UpperBound, HasLoop); |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 439 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 440 | return HasLoop; |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 441 | } |
| 442 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 443 | /// Allocate - assign the topological index to a node n |
| 444 | inline void ScheduleDAGRRList::Allocate(int n, int index) { |
| 445 | Node2Index[n] = index; |
| 446 | Index2Node[index] = n; |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 447 | } |
| 448 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 449 | /// InitDAGTopologicalSorting - create the initial topological |
| 450 | /// ordering from the DAG to be scheduled. |
| 451 | void ScheduleDAGRRList::InitDAGTopologicalSorting() { |
| 452 | unsigned DAGSize = SUnits.size(); |
| 453 | std::vector<unsigned> InDegree(DAGSize); |
| 454 | std::vector<SUnit*> WorkList; |
| 455 | WorkList.reserve(DAGSize); |
| 456 | std::vector<SUnit*> TopOrder; |
| 457 | TopOrder.reserve(DAGSize); |
| 458 | |
| 459 | // Initialize the data structures |
| 460 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 461 | SUnit *SU = &SUnits[i]; |
| 462 | int NodeNum = SU->NodeNum; |
| 463 | unsigned Degree = SU->Succs.size(); |
| 464 | InDegree[NodeNum] = Degree; |
| 465 | |
| 466 | // Is it a node without dependencies? |
| 467 | if (Degree == 0) { |
| 468 | assert(SU->Succs.empty() && "SUnit should have no successors"); |
| 469 | // Collect leaf nodes |
| 470 | WorkList.push_back(SU); |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | while (!WorkList.empty()) { |
| 475 | SUnit *SU = WorkList.back(); |
| 476 | WorkList.pop_back(); |
| 477 | TopOrder.push_back(SU); |
| 478 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 479 | I != E; ++I) { |
| 480 | SUnit *SU = I->Dep; |
| 481 | if (!--InDegree[SU->NodeNum]) |
| 482 | // If all dependencies of the node are processed already, |
| 483 | // then the node can be computed now |
| 484 | WorkList.push_back(SU); |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | // Second pass, assign the actual topological order as node ids. |
| 489 | int Id = 0; |
| 490 | |
| 491 | Index2Node.clear(); |
| 492 | Node2Index.clear(); |
| 493 | Index2Node.resize(DAGSize); |
| 494 | Node2Index.resize(DAGSize); |
| 495 | Visited.resize(DAGSize); |
| 496 | |
| 497 | for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(), |
| 498 | TE = TopOrder.rend();TI != TE; ++TI) { |
| 499 | Allocate((*TI)->NodeNum, Id); |
| 500 | Id++; |
| 501 | } |
| 502 | |
| 503 | #ifndef NDEBUG |
| 504 | // Check correctness of the ordering |
| 505 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 506 | SUnit *SU = &SUnits[i]; |
| 507 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 508 | I != E; ++I) { |
| 509 | assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] && |
| 510 | "Wrong topological sorting"); |
| 511 | } |
| 512 | } |
| 513 | #endif |
| 514 | } |
| 515 | |
| 516 | /// AddPred - adds edge from SUnit X to SUnit Y |
| 517 | /// Updates the topological oredering if required. |
| 518 | bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, |
| 519 | unsigned PhyReg, int Cost) { |
| 520 | int UpperBound, LowerBound; |
| 521 | LowerBound = Node2Index[Y->NodeNum]; |
| 522 | UpperBound = Node2Index[X->NodeNum]; |
| 523 | bool HasLoop = false; |
| 524 | // Is Ord(X) < Ord(Y) ? |
| 525 | if (LowerBound < UpperBound) { |
| 526 | // Update the topological order |
| 527 | Visited.reset(); |
| 528 | DFS(Y, UpperBound, HasLoop); |
| 529 | assert(!HasLoop && "Inserted edge creates a loop!"); |
| 530 | // Recompute topological indexes |
| 531 | Shift(Visited, LowerBound, UpperBound); |
| 532 | } |
| 533 | // Now really insert the edge |
| 534 | return Y->addPred(X,isCtrl,isSpecial,PhyReg,Cost); |
| 535 | } |
| 536 | |
| 537 | /// RemovePred - This removes the specified node N from preds of |
| 538 | /// the current node M. Updates the topological oredering if required |
| 539 | bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N, |
| 540 | bool isCtrl, bool isSpecial) { |
| 541 | // InitDAGTopologicalSorting(); |
| 542 | return M->removePred(N, isCtrl, isSpecial); |
| 543 | } |
| 544 | |
| 545 | /// DFS - make a DFS traversal to mark all nodes reachable from SU and and mark /// all nodes affected by the edge insertion. These nodes should later get new /// topological indexes by means of the Shift method |
| 546 | void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) { |
| 547 | std::vector<SUnit*> WorkList; |
| 548 | WorkList.reserve(SUnits.size()); |
| 549 | |
| 550 | WorkList.push_back(SU); |
| 551 | while (!WorkList.empty()) { |
| 552 | SU = WorkList.back(); |
| 553 | WorkList.pop_back(); |
| 554 | Visited.set(SU->NodeNum); |
| 555 | for (int I = SU->Succs.size()-1; I >= 0; --I) { |
| 556 | int s = SU->Succs[I].Dep->NodeNum; |
| 557 | if (Node2Index[s] == UpperBound) { |
| 558 | HasLoop = true; |
| 559 | return; |
| 560 | } |
| 561 | // Visit successors if not already and is in affected region |
| 562 | if (!Visited.test(s) && Node2Index[s] < UpperBound) { |
| 563 | WorkList.push_back(SU->Succs[I].Dep); |
| 564 | } |
| 565 | } |
| 566 | } |
| 567 | } |
| 568 | |
| 569 | /// Shift - renumber the nodes so that the topological ordering is |
| 570 | /// preserved |
| 571 | void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound, |
| 572 | int UpperBound) { |
| 573 | std::vector<int> L; |
| 574 | int shift = 0; |
| 575 | int i; |
| 576 | |
| 577 | for (i = LowerBound; i <= UpperBound; ++i) { |
| 578 | // w is node at topological index i |
| 579 | int w = Index2Node[i]; |
| 580 | if (Visited.test(w)) { |
| 581 | // Unmark |
| 582 | Visited.reset(w); |
| 583 | L.push_back(w); |
| 584 | shift = shift + 1; |
| 585 | } else { |
| 586 | Allocate(w, i - shift); |
| 587 | } |
| 588 | } |
| 589 | |
| 590 | for (unsigned j = 0; j < L.size(); ++j) { |
| 591 | Allocate(L[j], i - shift); |
| 592 | i = i + 1; |
| 593 | } |
| 594 | } |
| 595 | |
| 596 | |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 597 | /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 598 | /// create a cycle. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 599 | bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) { |
| 600 | if (IsReachable(TargetSU, SU)) |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 601 | return true; |
| 602 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 603 | I != E; ++I) |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 604 | if (I->Cost < 0 && IsReachable(TargetSU, I->Dep)) |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 605 | return true; |
| 606 | return false; |
| 607 | } |
| 608 | |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 609 | /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 610 | /// BTCycle in order to schedule a specific node. Returns the last unscheduled |
| 611 | /// SUnit. Also returns if a successor is unscheduled in the process. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 612 | void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, |
| 613 | unsigned &CurCycle) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 614 | SUnit *OldSU = NULL; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 615 | while (CurCycle > BtCycle) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 616 | OldSU = Sequence.back(); |
| 617 | Sequence.pop_back(); |
| 618 | if (SU->isSucc(OldSU)) |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 619 | // Don't try to remove SU from AvailableQueue. |
| 620 | SU->isAvailable = false; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 621 | UnscheduleNodeBottomUp(OldSU); |
| 622 | --CurCycle; |
| 623 | } |
| 624 | |
| 625 | |
| 626 | if (SU->isSucc(OldSU)) { |
| 627 | assert(false && "Something is wrong!"); |
| 628 | abort(); |
| 629 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 630 | |
| 631 | ++NumBacktracks; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 632 | } |
| 633 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 634 | /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled |
| 635 | /// successors to the newly created node. |
| 636 | SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 637 | if (SU->FlaggedNodes.size()) |
| 638 | return NULL; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 639 | |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 640 | SDNode *N = SU->Node; |
| 641 | if (!N) |
| 642 | return NULL; |
| 643 | |
| 644 | SUnit *NewSU; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 645 | bool TryUnfold = false; |
Evan Cheng | 84d0ebc | 2007-10-05 01:42:35 +0000 | [diff] [blame] | 646 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
| 647 | MVT::ValueType VT = N->getValueType(i); |
| 648 | if (VT == MVT::Flag) |
| 649 | return NULL; |
| 650 | else if (VT == MVT::Other) |
| 651 | TryUnfold = true; |
| 652 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 653 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 654 | const SDOperand &Op = N->getOperand(i); |
| 655 | MVT::ValueType VT = Op.Val->getValueType(Op.ResNo); |
| 656 | if (VT == MVT::Flag) |
| 657 | return NULL; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 658 | } |
| 659 | |
| 660 | if (TryUnfold) { |
| 661 | SmallVector<SDNode*, 4> NewNodes; |
Owen Anderson | 0ec92e9 | 2008-01-07 01:35:56 +0000 | [diff] [blame] | 662 | if (!TII->unfoldMemoryOperand(DAG, N, NewNodes)) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 663 | return NULL; |
| 664 | |
| 665 | DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; |
| 666 | assert(NewNodes.size() == 2 && "Expected a load folding node!"); |
| 667 | |
| 668 | N = NewNodes[1]; |
| 669 | SDNode *LoadNode = NewNodes[0]; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 670 | unsigned NumVals = N->getNumValues(); |
| 671 | unsigned OldNumVals = SU->Node->getNumValues(); |
| 672 | for (unsigned i = 0; i != NumVals; ++i) |
Chris Lattner | 3cfb56d | 2007-10-15 06:10:22 +0000 | [diff] [blame] | 673 | DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i)); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 674 | DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1), |
Chris Lattner | 3cfb56d | 2007-10-15 06:10:22 +0000 | [diff] [blame] | 675 | SDOperand(LoadNode, 1)); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 676 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 677 | SUnit *NewSU = CreateNewSUnit(N); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 678 | SUnitMap[N].push_back(NewSU); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 679 | const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); |
Dan Gohman | 856c012 | 2008-02-16 00:25:40 +0000 | [diff] [blame] | 680 | for (unsigned i = 0; i != TID.getNumOperands(); ++i) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 681 | if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 682 | NewSU->isTwoAddress = true; |
| 683 | break; |
| 684 | } |
| 685 | } |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 686 | if (TID.isCommutable()) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 687 | NewSU->isCommutable = true; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 688 | // FIXME: Calculate height / depth and propagate the changes? |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 689 | NewSU->Depth = SU->Depth; |
| 690 | NewSU->Height = SU->Height; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 691 | ComputeLatency(NewSU); |
| 692 | |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 693 | // LoadNode may already exist. This can happen when there is another |
| 694 | // load from the same location and producing the same type of value |
| 695 | // but it has different alignment or volatileness. |
| 696 | bool isNewLoad = true; |
| 697 | SUnit *LoadSU; |
| 698 | DenseMap<SDNode*, std::vector<SUnit*> >::iterator SMI = |
| 699 | SUnitMap.find(LoadNode); |
| 700 | if (SMI != SUnitMap.end()) { |
| 701 | LoadSU = SMI->second.front(); |
| 702 | isNewLoad = false; |
| 703 | } else { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 704 | LoadSU = CreateNewSUnit(LoadNode); |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 705 | SUnitMap[LoadNode].push_back(LoadSU); |
| 706 | |
| 707 | LoadSU->Depth = SU->Depth; |
| 708 | LoadSU->Height = SU->Height; |
| 709 | ComputeLatency(LoadSU); |
| 710 | } |
| 711 | |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 712 | SUnit *ChainPred = NULL; |
| 713 | SmallVector<SDep, 4> ChainSuccs; |
| 714 | SmallVector<SDep, 4> LoadPreds; |
| 715 | SmallVector<SDep, 4> NodePreds; |
| 716 | SmallVector<SDep, 4> NodeSuccs; |
| 717 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 718 | I != E; ++I) { |
| 719 | if (I->isCtrl) |
| 720 | ChainPred = I->Dep; |
Evan Cheng | 567d2e5 | 2008-03-04 00:41:45 +0000 | [diff] [blame] | 721 | else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode)) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 722 | LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); |
| 723 | else |
| 724 | NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); |
| 725 | } |
| 726 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 727 | I != E; ++I) { |
| 728 | if (I->isCtrl) |
| 729 | ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, |
| 730 | I->isCtrl, I->isSpecial)); |
| 731 | else |
| 732 | NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, |
| 733 | I->isCtrl, I->isSpecial)); |
| 734 | } |
| 735 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 736 | RemovePred(SU, ChainPred, true, false); |
| 737 | if (isNewLoad) { |
| 738 | AddPred(LoadSU,ChainPred, true, false); |
| 739 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 740 | for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { |
| 741 | SDep *Pred = &LoadPreds[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 742 | RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); |
| 743 | if (isNewLoad) { |
| 744 | AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 745 | Pred->Reg, Pred->Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 746 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 747 | } |
| 748 | for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { |
| 749 | SDep *Pred = &NodePreds[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 750 | RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); |
| 751 | AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 752 | Pred->Reg, Pred->Cost); |
| 753 | } |
| 754 | for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { |
| 755 | SDep *Succ = &NodeSuccs[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 756 | RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); |
| 757 | AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial, |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 758 | Succ->Reg, Succ->Cost); |
| 759 | } |
| 760 | for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { |
| 761 | SDep *Succ = &ChainSuccs[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 762 | RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); |
| 763 | if (isNewLoad) { |
| 764 | AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial, |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 765 | Succ->Reg, Succ->Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 766 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 767 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 768 | if (isNewLoad) { |
| 769 | AddPred(NewSU, LoadSU, false, false); |
| 770 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 771 | |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 772 | if (isNewLoad) |
| 773 | AvailableQueue->addNode(LoadSU); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 774 | AvailableQueue->addNode(NewSU); |
| 775 | |
| 776 | ++NumUnfolds; |
| 777 | |
| 778 | if (NewSU->NumSuccsLeft == 0) { |
| 779 | NewSU->isAvailable = true; |
| 780 | return NewSU; |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 781 | } |
| 782 | SU = NewSU; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 783 | } |
| 784 | |
| 785 | DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 786 | NewSU = CreateClone(SU); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 787 | |
| 788 | // New SUnit has the exact same predecessors. |
| 789 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 790 | I != E; ++I) |
| 791 | if (!I->isSpecial) { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 792 | AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 793 | NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1); |
| 794 | } |
| 795 | |
| 796 | // Only copy scheduled successors. Cut them from old node's successor |
| 797 | // list and move them over. |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 798 | SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 799 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 800 | I != E; ++I) { |
| 801 | if (I->isSpecial) |
| 802 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 803 | if (I->Dep->isScheduled) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 804 | NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 805 | AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 806 | DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 807 | } |
| 808 | } |
| 809 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 810 | SUnit *Succ = DelDeps[i].first; |
| 811 | bool isCtrl = DelDeps[i].second; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 812 | RemovePred(Succ, SU, isCtrl, false); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 813 | } |
| 814 | |
| 815 | AvailableQueue->updateNode(SU); |
| 816 | AvailableQueue->addNode(NewSU); |
| 817 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 818 | ++NumDups; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 819 | return NewSU; |
| 820 | } |
| 821 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 822 | /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies |
| 823 | /// and move all scheduled successors of the given SUnit to the last copy. |
| 824 | void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, |
| 825 | const TargetRegisterClass *DestRC, |
| 826 | const TargetRegisterClass *SrcRC, |
| 827 | SmallVector<SUnit*, 2> &Copies) { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 828 | SUnit *CopyFromSU = CreateNewSUnit(NULL); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 829 | CopyFromSU->CopySrcRC = SrcRC; |
| 830 | CopyFromSU->CopyDstRC = DestRC; |
| 831 | CopyFromSU->Depth = SU->Depth; |
| 832 | CopyFromSU->Height = SU->Height; |
| 833 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 834 | SUnit *CopyToSU = CreateNewSUnit(NULL); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 835 | CopyToSU->CopySrcRC = DestRC; |
| 836 | CopyToSU->CopyDstRC = SrcRC; |
| 837 | |
| 838 | // Only copy scheduled successors. Cut them from old node's successor |
| 839 | // list and move them over. |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 840 | SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 841 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 842 | I != E; ++I) { |
| 843 | if (I->isSpecial) |
| 844 | continue; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 845 | if (I->Dep->isScheduled) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 846 | CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 847 | AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 848 | DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 849 | } |
| 850 | } |
| 851 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 852 | SUnit *Succ = DelDeps[i].first; |
| 853 | bool isCtrl = DelDeps[i].second; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 854 | RemovePred(Succ, SU, isCtrl, false); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 855 | } |
| 856 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 857 | AddPred(CopyFromSU, SU, false, false, Reg, -1); |
| 858 | AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 859 | |
| 860 | AvailableQueue->updateNode(SU); |
| 861 | AvailableQueue->addNode(CopyFromSU); |
| 862 | AvailableQueue->addNode(CopyToSU); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 863 | Copies.push_back(CopyFromSU); |
| 864 | Copies.push_back(CopyToSU); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 865 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 866 | ++NumCCCopies; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 867 | } |
| 868 | |
| 869 | /// getPhysicalRegisterVT - Returns the ValueType of the physical register |
| 870 | /// definition of the specified node. |
| 871 | /// FIXME: Move to SelectionDAG? |
| 872 | static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg, |
| 873 | const TargetInstrInfo *TII) { |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 874 | const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 875 | assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 876 | unsigned NumRes = TID.getNumDefs(); |
| 877 | for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) { |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 878 | if (Reg == *ImpDef) |
| 879 | break; |
| 880 | ++NumRes; |
| 881 | } |
| 882 | return N->getValueType(NumRes); |
| 883 | } |
| 884 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 885 | /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay |
| 886 | /// scheduling of the given node to satisfy live physical register dependencies. |
| 887 | /// If the specific node is the last one that's available to schedule, do |
| 888 | /// whatever is necessary (i.e. backtracking or cloning) to make it possible. |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 889 | bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, |
| 890 | SmallVector<unsigned, 4> &LRegs){ |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 891 | if (LiveRegs.empty()) |
| 892 | return false; |
| 893 | |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 894 | SmallSet<unsigned, 4> RegAdded; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 895 | // If this node would clobber any "live" register, then it's not ready. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 896 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 897 | I != E; ++I) { |
| 898 | if (I->Cost < 0) { |
| 899 | unsigned Reg = I->Reg; |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 900 | if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) { |
| 901 | if (RegAdded.insert(Reg)) |
| 902 | LRegs.push_back(Reg); |
| 903 | } |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 904 | for (const unsigned *Alias = TRI->getAliasSet(Reg); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 905 | *Alias; ++Alias) |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 906 | if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) { |
| 907 | if (RegAdded.insert(*Alias)) |
| 908 | LRegs.push_back(*Alias); |
| 909 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 910 | } |
| 911 | } |
| 912 | |
| 913 | for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) { |
| 914 | SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1]; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 915 | if (!Node || !Node->isTargetOpcode()) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 916 | continue; |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 917 | const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode()); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 918 | if (!TID.ImplicitDefs) |
| 919 | continue; |
| 920 | for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) { |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 921 | if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) { |
| 922 | if (RegAdded.insert(*Reg)) |
| 923 | LRegs.push_back(*Reg); |
| 924 | } |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 925 | for (const unsigned *Alias = TRI->getAliasSet(*Reg); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 926 | *Alias; ++Alias) |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 927 | if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) { |
| 928 | if (RegAdded.insert(*Alias)) |
| 929 | LRegs.push_back(*Alias); |
| 930 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 931 | } |
| 932 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 933 | return !LRegs.empty(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 934 | } |
| 935 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 936 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 937 | /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up |
| 938 | /// schedulers. |
| 939 | void ScheduleDAGRRList::ListScheduleBottomUp() { |
| 940 | unsigned CurCycle = 0; |
| 941 | // Add root to Available queue. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 942 | SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front(); |
| 943 | RootSU->isAvailable = true; |
| 944 | AvailableQueue->push(RootSU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 945 | |
| 946 | // While Available queue is not empty, grab the node with the highest |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 947 | // priority. If it is not ready put it back. Schedule the node. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 948 | SmallVector<SUnit*, 4> NotReady; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 949 | while (!AvailableQueue->empty()) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 950 | bool Delayed = false; |
| 951 | DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 952 | SUnit *CurSU = AvailableQueue->pop(); |
| 953 | while (CurSU) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 954 | if (CurSU->CycleBound <= CurCycle) { |
| 955 | SmallVector<unsigned, 4> LRegs; |
| 956 | if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 957 | break; |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 958 | Delayed = true; |
| 959 | LRegsMap.insert(std::make_pair(CurSU, LRegs)); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 960 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 961 | |
| 962 | CurSU->isPending = true; // This SU is not in AvailableQueue right now. |
| 963 | NotReady.push_back(CurSU); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 964 | CurSU = AvailableQueue->pop(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 965 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 966 | |
| 967 | // All candidates are delayed due to live physical reg dependencies. |
| 968 | // Try backtracking, code duplication, or inserting cross class copies |
| 969 | // to resolve it. |
| 970 | if (Delayed && !CurSU) { |
| 971 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 972 | SUnit *TrySU = NotReady[i]; |
| 973 | SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; |
| 974 | |
| 975 | // Try unscheduling up to the point where it's safe to schedule |
| 976 | // this node. |
| 977 | unsigned LiveCycle = CurCycle; |
| 978 | for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { |
| 979 | unsigned Reg = LRegs[j]; |
| 980 | unsigned LCycle = LiveRegCycles[Reg]; |
| 981 | LiveCycle = std::min(LiveCycle, LCycle); |
| 982 | } |
| 983 | SUnit *OldSU = Sequence[LiveCycle]; |
| 984 | if (!WillCreateCycle(TrySU, OldSU)) { |
| 985 | BacktrackBottomUp(TrySU, LiveCycle, CurCycle); |
| 986 | // Force the current node to be scheduled before the node that |
| 987 | // requires the physical reg dep. |
| 988 | if (OldSU->isAvailable) { |
| 989 | OldSU->isAvailable = false; |
| 990 | AvailableQueue->remove(OldSU); |
| 991 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 992 | AddPred(TrySU, OldSU, true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 993 | // If one or more successors has been unscheduled, then the current |
| 994 | // node is no longer avaialable. Schedule a successor that's now |
| 995 | // available instead. |
| 996 | if (!TrySU->isAvailable) |
| 997 | CurSU = AvailableQueue->pop(); |
| 998 | else { |
| 999 | CurSU = TrySU; |
| 1000 | TrySU->isPending = false; |
| 1001 | NotReady.erase(NotReady.begin()+i); |
| 1002 | } |
| 1003 | break; |
| 1004 | } |
| 1005 | } |
| 1006 | |
| 1007 | if (!CurSU) { |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 1008 | // Can't backtrack. Try duplicating the nodes that produces these |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1009 | // "expensive to copy" values to break the dependency. In case even |
| 1010 | // that doesn't work, insert cross class copies. |
| 1011 | SUnit *TrySU = NotReady[0]; |
| 1012 | SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; |
| 1013 | assert(LRegs.size() == 1 && "Can't handle this yet!"); |
| 1014 | unsigned Reg = LRegs[0]; |
| 1015 | SUnit *LRDef = LiveRegDefs[Reg]; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 1016 | SUnit *NewDef = CopyAndMoveSuccessors(LRDef); |
| 1017 | if (!NewDef) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1018 | // Issue expensive cross register class copies. |
| 1019 | MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); |
| 1020 | const TargetRegisterClass *RC = |
Evan Cheng | e88a625 | 2008-03-11 07:19:34 +0000 | [diff] [blame] | 1021 | TRI->getPhysicalRegisterRegClass(Reg, VT); |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1022 | const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1023 | if (!DestRC) { |
| 1024 | assert(false && "Don't know how to copy this physical register!"); |
| 1025 | abort(); |
| 1026 | } |
| 1027 | SmallVector<SUnit*, 2> Copies; |
| 1028 | InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); |
| 1029 | DOUT << "Adding an edge from SU # " << TrySU->NodeNum |
| 1030 | << " to SU #" << Copies.front()->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1031 | AddPred(TrySU, Copies.front(), true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1032 | NewDef = Copies.back(); |
| 1033 | } |
| 1034 | |
| 1035 | DOUT << "Adding an edge from SU # " << NewDef->NodeNum |
| 1036 | << " to SU #" << TrySU->NodeNum << "\n"; |
| 1037 | LiveRegDefs[Reg] = NewDef; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1038 | AddPred(NewDef, TrySU, true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1039 | TrySU->isAvailable = false; |
| 1040 | CurSU = NewDef; |
| 1041 | } |
| 1042 | |
| 1043 | if (!CurSU) { |
| 1044 | assert(false && "Unable to resolve live physical register dependencies!"); |
| 1045 | abort(); |
| 1046 | } |
| 1047 | } |
| 1048 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1049 | // Add the nodes that aren't ready back onto the available list. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1050 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 1051 | NotReady[i]->isPending = false; |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1052 | // May no longer be available due to backtracking. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1053 | if (NotReady[i]->isAvailable) |
| 1054 | AvailableQueue->push(NotReady[i]); |
| 1055 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1056 | NotReady.clear(); |
| 1057 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1058 | if (!CurSU) |
| 1059 | Sequence.push_back(0); |
| 1060 | else { |
| 1061 | ScheduleNodeBottomUp(CurSU, CurCycle); |
| 1062 | Sequence.push_back(CurSU); |
| 1063 | } |
| 1064 | ++CurCycle; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1065 | } |
| 1066 | |
| 1067 | // Add entry node last |
| 1068 | if (DAG.getEntryNode().Val != DAG.getRoot().Val) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1069 | SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1070 | Sequence.push_back(Entry); |
| 1071 | } |
| 1072 | |
| 1073 | // Reverse the order if it is bottom up. |
| 1074 | std::reverse(Sequence.begin(), Sequence.end()); |
| 1075 | |
| 1076 | |
| 1077 | #ifndef NDEBUG |
| 1078 | // Verify that all SUnits were scheduled. |
| 1079 | bool AnyNotSched = false; |
| 1080 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1081 | if (SUnits[i].NumSuccsLeft != 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1082 | if (!AnyNotSched) |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1083 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1084 | SUnits[i].dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1085 | cerr << "has not been scheduled!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1086 | AnyNotSched = true; |
| 1087 | } |
| 1088 | } |
| 1089 | assert(!AnyNotSched); |
| 1090 | #endif |
| 1091 | } |
| 1092 | |
| 1093 | //===----------------------------------------------------------------------===// |
| 1094 | // Top-Down Scheduling |
| 1095 | //===----------------------------------------------------------------------===// |
| 1096 | |
| 1097 | /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1098 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1099 | void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, |
| 1100 | unsigned CurCycle) { |
| 1101 | // FIXME: the distance between two nodes is not always == the predecessor's |
| 1102 | // latency. For example, the reader can very well read the register written |
| 1103 | // by the predecessor later than the issue cycle. It also depends on the |
| 1104 | // interrupt model (drain vs. freeze). |
| 1105 | SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency); |
| 1106 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1107 | --SuccSU->NumPredsLeft; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1108 | |
| 1109 | #ifndef NDEBUG |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1110 | if (SuccSU->NumPredsLeft < 0) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1111 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1112 | SuccSU->dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1113 | cerr << " has been released too many times!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1114 | assert(0); |
| 1115 | } |
| 1116 | #endif |
| 1117 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1118 | if (SuccSU->NumPredsLeft == 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1119 | SuccSU->isAvailable = true; |
| 1120 | AvailableQueue->push(SuccSU); |
| 1121 | } |
| 1122 | } |
| 1123 | |
| 1124 | |
| 1125 | /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending |
| 1126 | /// count of its successors. If a successor pending count is zero, add it to |
| 1127 | /// the Available queue. |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1128 | void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1129 | DOUT << "*** Scheduling [" << CurCycle << "]: "; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1130 | DEBUG(SU->dump(&DAG)); |
| 1131 | SU->Cycle = CurCycle; |
| 1132 | |
| 1133 | AvailableQueue->ScheduledNode(SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1134 | |
| 1135 | // Top down: release successors |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1136 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1137 | I != E; ++I) |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1138 | ReleaseSucc(I->Dep, I->isCtrl, CurCycle); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1139 | SU->isScheduled = true; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1140 | } |
| 1141 | |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1142 | /// ListScheduleTopDown - The main loop of list scheduling for top-down |
| 1143 | /// schedulers. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1144 | void ScheduleDAGRRList::ListScheduleTopDown() { |
| 1145 | unsigned CurCycle = 0; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1146 | SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1147 | |
| 1148 | // All leaves to Available queue. |
| 1149 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 1150 | // It is available if it has no predecessors. |
Dan Gohman | 70de4cb | 2008-01-29 13:02:09 +0000 | [diff] [blame] | 1151 | if (SUnits[i].Preds.empty() && &SUnits[i] != Entry) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1152 | AvailableQueue->push(&SUnits[i]); |
| 1153 | SUnits[i].isAvailable = true; |
| 1154 | } |
| 1155 | } |
| 1156 | |
| 1157 | // Emit the entry node first. |
| 1158 | ScheduleNodeTopDown(Entry, CurCycle); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1159 | Sequence.push_back(Entry); |
| 1160 | ++CurCycle; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1161 | |
| 1162 | // While Available queue is not empty, grab the node with the highest |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1163 | // priority. If it is not ready put it back. Schedule the node. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1164 | std::vector<SUnit*> NotReady; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1165 | while (!AvailableQueue->empty()) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1166 | SUnit *CurSU = AvailableQueue->pop(); |
| 1167 | while (CurSU && CurSU->CycleBound > CurCycle) { |
| 1168 | NotReady.push_back(CurSU); |
| 1169 | CurSU = AvailableQueue->pop(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1170 | } |
| 1171 | |
| 1172 | // Add the nodes that aren't ready back onto the available list. |
| 1173 | AvailableQueue->push_all(NotReady); |
| 1174 | NotReady.clear(); |
| 1175 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1176 | if (!CurSU) |
| 1177 | Sequence.push_back(0); |
| 1178 | else { |
| 1179 | ScheduleNodeTopDown(CurSU, CurCycle); |
| 1180 | Sequence.push_back(CurSU); |
| 1181 | } |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1182 | CurCycle++; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1183 | } |
| 1184 | |
| 1185 | |
| 1186 | #ifndef NDEBUG |
| 1187 | // Verify that all SUnits were scheduled. |
| 1188 | bool AnyNotSched = false; |
| 1189 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 1190 | if (!SUnits[i].isScheduled) { |
| 1191 | if (!AnyNotSched) |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1192 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1193 | SUnits[i].dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1194 | cerr << "has not been scheduled!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1195 | AnyNotSched = true; |
| 1196 | } |
| 1197 | } |
| 1198 | assert(!AnyNotSched); |
| 1199 | #endif |
| 1200 | } |
| 1201 | |
| 1202 | |
| 1203 | |
| 1204 | //===----------------------------------------------------------------------===// |
| 1205 | // RegReductionPriorityQueue Implementation |
| 1206 | //===----------------------------------------------------------------------===// |
| 1207 | // |
| 1208 | // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers |
| 1209 | // to reduce register pressure. |
| 1210 | // |
| 1211 | namespace { |
| 1212 | template<class SF> |
| 1213 | class RegReductionPriorityQueue; |
| 1214 | |
| 1215 | /// Sorting functions for the Available queue. |
| 1216 | struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { |
| 1217 | RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ; |
| 1218 | bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {} |
| 1219 | bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} |
| 1220 | |
| 1221 | bool operator()(const SUnit* left, const SUnit* right) const; |
| 1222 | }; |
| 1223 | |
| 1224 | struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { |
| 1225 | RegReductionPriorityQueue<td_ls_rr_sort> *SPQ; |
| 1226 | td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {} |
| 1227 | td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} |
| 1228 | |
| 1229 | bool operator()(const SUnit* left, const SUnit* right) const; |
| 1230 | }; |
| 1231 | } // end anonymous namespace |
| 1232 | |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1233 | static inline bool isCopyFromLiveIn(const SUnit *SU) { |
| 1234 | SDNode *N = SU->Node; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1235 | return N && N->getOpcode() == ISD::CopyFromReg && |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1236 | N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag; |
| 1237 | } |
| 1238 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1239 | namespace { |
| 1240 | template<class SF> |
Chris Lattner | 996795b | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 1241 | class VISIBILITY_HIDDEN RegReductionPriorityQueue |
| 1242 | : public SchedulingPriorityQueue { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1243 | std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue; |
| 1244 | |
| 1245 | public: |
| 1246 | RegReductionPriorityQueue() : |
| 1247 | Queue(SF(this)) {} |
| 1248 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1249 | virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1250 | std::vector<SUnit> &sunits) {} |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1251 | |
| 1252 | virtual void addNode(const SUnit *SU) {} |
| 1253 | |
| 1254 | virtual void updateNode(const SUnit *SU) {} |
| 1255 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1256 | virtual void releaseState() {} |
| 1257 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1258 | virtual unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1259 | return 0; |
| 1260 | } |
| 1261 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1262 | unsigned size() const { return Queue.size(); } |
| 1263 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1264 | bool empty() const { return Queue.empty(); } |
| 1265 | |
| 1266 | void push(SUnit *U) { |
| 1267 | Queue.push(U); |
| 1268 | } |
| 1269 | void push_all(const std::vector<SUnit *> &Nodes) { |
| 1270 | for (unsigned i = 0, e = Nodes.size(); i != e; ++i) |
| 1271 | Queue.push(Nodes[i]); |
| 1272 | } |
| 1273 | |
| 1274 | SUnit *pop() { |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1275 | if (empty()) return NULL; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1276 | SUnit *V = Queue.top(); |
| 1277 | Queue.pop(); |
| 1278 | return V; |
| 1279 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1280 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1281 | /// remove - This is a really inefficient way to remove a node from a |
| 1282 | /// priority queue. We should roll our own heap to make this better or |
| 1283 | /// something. |
| 1284 | void remove(SUnit *SU) { |
| 1285 | std::vector<SUnit*> Temp; |
| 1286 | |
| 1287 | assert(!Queue.empty() && "Not in queue!"); |
| 1288 | while (Queue.top() != SU) { |
| 1289 | Temp.push_back(Queue.top()); |
| 1290 | Queue.pop(); |
| 1291 | assert(!Queue.empty() && "Not in queue!"); |
| 1292 | } |
| 1293 | |
| 1294 | // Remove the node from the PQ. |
| 1295 | Queue.pop(); |
| 1296 | |
| 1297 | // Add all the other nodes back. |
| 1298 | for (unsigned i = 0, e = Temp.size(); i != e; ++i) |
| 1299 | Queue.push(Temp[i]); |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1300 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1301 | }; |
| 1302 | |
| 1303 | template<class SF> |
Chris Lattner | 996795b | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 1304 | class VISIBILITY_HIDDEN BURegReductionPriorityQueue |
| 1305 | : public RegReductionPriorityQueue<SF> { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1306 | // SUnitMap SDNode to SUnit mapping (n -> n). |
| 1307 | DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1308 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1309 | // SUnits - The SUnits for the current graph. |
| 1310 | const std::vector<SUnit> *SUnits; |
| 1311 | |
| 1312 | // SethiUllmanNumbers - The SethiUllman number for each node. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1313 | std::vector<unsigned> SethiUllmanNumbers; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1314 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1315 | const TargetInstrInfo *TII; |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1316 | const TargetRegisterInfo *TRI; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1317 | ScheduleDAGRRList *scheduleDAG; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1318 | public: |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1319 | explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii, |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1320 | const TargetRegisterInfo *tri) |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1321 | : TII(tii), TRI(tri), scheduleDAG(NULL) {} |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1322 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1323 | void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1324 | std::vector<SUnit> &sunits) { |
| 1325 | SUnitMap = &sumap; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1326 | SUnits = &sunits; |
| 1327 | // Add pseudo dependency edges for two-address nodes. |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 1328 | AddPseudoTwoAddrDeps(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1329 | // Calculate node priorities. |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1330 | CalculateSethiUllmanNumbers(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1331 | } |
| 1332 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1333 | void addNode(const SUnit *SU) { |
| 1334 | SethiUllmanNumbers.resize(SUnits->size(), 0); |
| 1335 | CalcNodeSethiUllmanNumber(SU); |
| 1336 | } |
| 1337 | |
| 1338 | void updateNode(const SUnit *SU) { |
| 1339 | SethiUllmanNumbers[SU->NodeNum] = 0; |
| 1340 | CalcNodeSethiUllmanNumber(SU); |
| 1341 | } |
| 1342 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1343 | void releaseState() { |
| 1344 | SUnits = 0; |
| 1345 | SethiUllmanNumbers.clear(); |
| 1346 | } |
| 1347 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1348 | unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1349 | assert(SU->NodeNum < SethiUllmanNumbers.size()); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1350 | unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1351 | if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU)) |
| 1352 | // CopyFromReg should be close to its def because it restricts |
| 1353 | // allocation choices. But if it is a livein then perhaps we want it |
| 1354 | // closer to its uses so it can be coalesced. |
| 1355 | return 0xffff; |
| 1356 | else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
| 1357 | // CopyToReg should be close to its uses to facilitate coalescing and |
| 1358 | // avoid spilling. |
| 1359 | return 0; |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1360 | else if (Opc == TargetInstrInfo::EXTRACT_SUBREG || |
| 1361 | Opc == TargetInstrInfo::INSERT_SUBREG) |
| 1362 | // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to |
| 1363 | // facilitate coalescing. |
| 1364 | return 0; |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1365 | else if (SU->NumSuccs == 0) |
| 1366 | // If SU does not have a use, i.e. it doesn't produce a value that would |
| 1367 | // be consumed (e.g. store), then it terminates a chain of computation. |
| 1368 | // Give it a large SethiUllman number so it will be scheduled right |
| 1369 | // before its predecessors that it doesn't lengthen their live ranges. |
| 1370 | return 0xffff; |
| 1371 | else if (SU->NumPreds == 0) |
| 1372 | // If SU does not have a def, schedule it close to its uses because it |
| 1373 | // does not lengthen any live ranges. |
| 1374 | return 0; |
| 1375 | else |
| 1376 | return SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1377 | } |
| 1378 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1379 | void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { |
| 1380 | scheduleDAG = scheduleDag; |
| 1381 | } |
| 1382 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1383 | private: |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1384 | bool canClobber(const SUnit *SU, const SUnit *Op); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1385 | void AddPseudoTwoAddrDeps(); |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1386 | void CalculateSethiUllmanNumbers(); |
| 1387 | unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1388 | }; |
| 1389 | |
| 1390 | |
| 1391 | template<class SF> |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1392 | class VISIBILITY_HIDDEN TDRegReductionPriorityQueue |
| 1393 | : public RegReductionPriorityQueue<SF> { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1394 | // SUnitMap SDNode to SUnit mapping (n -> n). |
| 1395 | DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1396 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1397 | // SUnits - The SUnits for the current graph. |
| 1398 | const std::vector<SUnit> *SUnits; |
| 1399 | |
| 1400 | // SethiUllmanNumbers - The SethiUllman number for each node. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1401 | std::vector<unsigned> SethiUllmanNumbers; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1402 | |
| 1403 | public: |
| 1404 | TDRegReductionPriorityQueue() {} |
| 1405 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1406 | void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1407 | std::vector<SUnit> &sunits) { |
| 1408 | SUnitMap = &sumap; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1409 | SUnits = &sunits; |
| 1410 | // Calculate node priorities. |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1411 | CalculateSethiUllmanNumbers(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1412 | } |
| 1413 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1414 | void addNode(const SUnit *SU) { |
| 1415 | SethiUllmanNumbers.resize(SUnits->size(), 0); |
| 1416 | CalcNodeSethiUllmanNumber(SU); |
| 1417 | } |
| 1418 | |
| 1419 | void updateNode(const SUnit *SU) { |
| 1420 | SethiUllmanNumbers[SU->NodeNum] = 0; |
| 1421 | CalcNodeSethiUllmanNumber(SU); |
| 1422 | } |
| 1423 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1424 | void releaseState() { |
| 1425 | SUnits = 0; |
| 1426 | SethiUllmanNumbers.clear(); |
| 1427 | } |
| 1428 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1429 | unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1430 | assert(SU->NodeNum < SethiUllmanNumbers.size()); |
| 1431 | return SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1432 | } |
| 1433 | |
| 1434 | private: |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1435 | void CalculateSethiUllmanNumbers(); |
| 1436 | unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1437 | }; |
| 1438 | } |
| 1439 | |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1440 | /// closestSucc - Returns the scheduled cycle of the successor which is |
| 1441 | /// closet to the current cycle. |
Evan Cheng | 2874855 | 2007-03-13 23:25:11 +0000 | [diff] [blame] | 1442 | static unsigned closestSucc(const SUnit *SU) { |
| 1443 | unsigned MaxCycle = 0; |
| 1444 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1445 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1446 | unsigned Cycle = I->Dep->Cycle; |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1447 | // If there are bunch of CopyToRegs stacked up, they should be considered |
| 1448 | // to be at the same position. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1449 | if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg) |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1450 | Cycle = closestSucc(I->Dep)+1; |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1451 | if (Cycle > MaxCycle) |
| 1452 | MaxCycle = Cycle; |
| 1453 | } |
Evan Cheng | 2874855 | 2007-03-13 23:25:11 +0000 | [diff] [blame] | 1454 | return MaxCycle; |
| 1455 | } |
| 1456 | |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1457 | /// calcMaxScratches - Returns an cost estimate of the worse case requirement |
| 1458 | /// for scratch registers. Live-in operands and live-out results don't count |
| 1459 | /// since they are "fixed". |
| 1460 | static unsigned calcMaxScratches(const SUnit *SU) { |
| 1461 | unsigned Scratches = 0; |
| 1462 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1463 | I != E; ++I) { |
| 1464 | if (I->isCtrl) continue; // ignore chain preds |
Evan Cheng | 0e400d4 | 2008-01-09 23:01:55 +0000 | [diff] [blame] | 1465 | if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg) |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1466 | Scratches++; |
| 1467 | } |
| 1468 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1469 | I != E; ++I) { |
| 1470 | if (I->isCtrl) continue; // ignore chain succs |
Evan Cheng | 0e400d4 | 2008-01-09 23:01:55 +0000 | [diff] [blame] | 1471 | if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg) |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1472 | Scratches += 10; |
| 1473 | } |
| 1474 | return Scratches; |
| 1475 | } |
| 1476 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1477 | // Bottom up |
| 1478 | bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { |
David Greene | 4c1e6f3 | 2007-06-29 03:42:23 +0000 | [diff] [blame] | 1479 | // There used to be a special tie breaker here that looked for |
David Greene | 5b6f755 | 2007-06-29 02:48:09 +0000 | [diff] [blame] | 1480 | // two-address instructions and preferred the instruction with a |
| 1481 | // def&use operand. The special case triggered diagnostics when |
| 1482 | // _GLIBCXX_DEBUG was enabled because it broke the strict weak |
| 1483 | // ordering that priority_queue requires. It didn't help much anyway |
| 1484 | // because AddPseudoTwoAddrDeps already covers many of the cases |
| 1485 | // where it would have applied. In addition, it's counter-intuitive |
| 1486 | // that a tie breaker would be the first thing attempted. There's a |
| 1487 | // "real" tie breaker below that is the operation of last resort. |
| 1488 | // The fact that the "special tie breaker" would trigger when there |
| 1489 | // wasn't otherwise a tie is what broke the strict weak ordering |
| 1490 | // constraint. |
Evan Cheng | 99f2f79 | 2006-05-13 08:22:24 +0000 | [diff] [blame] | 1491 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1492 | unsigned LPriority = SPQ->getNodePriority(left); |
| 1493 | unsigned RPriority = SPQ->getNodePriority(right); |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1494 | if (LPriority != RPriority) |
| 1495 | return LPriority > RPriority; |
| 1496 | |
| 1497 | // Try schedule def + use closer when Sethi-Ullman numbers are the same. |
| 1498 | // e.g. |
| 1499 | // t1 = op t2, c1 |
| 1500 | // t3 = op t4, c2 |
| 1501 | // |
| 1502 | // and the following instructions are both ready. |
| 1503 | // t2 = op c3 |
| 1504 | // t4 = op c4 |
| 1505 | // |
| 1506 | // Then schedule t2 = op first. |
| 1507 | // i.e. |
| 1508 | // t4 = op c4 |
| 1509 | // t2 = op c3 |
| 1510 | // t1 = op t2, c1 |
| 1511 | // t3 = op t4, c2 |
| 1512 | // |
| 1513 | // This creates more short live intervals. |
| 1514 | unsigned LDist = closestSucc(left); |
| 1515 | unsigned RDist = closestSucc(right); |
| 1516 | if (LDist != RDist) |
| 1517 | return LDist < RDist; |
| 1518 | |
| 1519 | // Intuitively, it's good to push down instructions whose results are |
| 1520 | // liveout so their long live ranges won't conflict with other values |
| 1521 | // which are needed inside the BB. Further prioritize liveout instructions |
| 1522 | // by the number of operands which are calculated within the BB. |
| 1523 | unsigned LScratch = calcMaxScratches(left); |
| 1524 | unsigned RScratch = calcMaxScratches(right); |
| 1525 | if (LScratch != RScratch) |
| 1526 | return LScratch > RScratch; |
| 1527 | |
| 1528 | if (left->Height != right->Height) |
| 1529 | return left->Height > right->Height; |
| 1530 | |
| 1531 | if (left->Depth != right->Depth) |
| 1532 | return left->Depth < right->Depth; |
| 1533 | |
| 1534 | if (left->CycleBound != right->CycleBound) |
| 1535 | return left->CycleBound > right->CycleBound; |
| 1536 | |
| 1537 | // FIXME: No strict ordering. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1538 | return false; |
| 1539 | } |
| 1540 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1541 | template<class SF> bool |
| 1542 | BURegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1543 | if (SU->isTwoAddress) { |
| 1544 | unsigned Opc = SU->Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 1545 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1546 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 1547 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1548 | for (unsigned i = 0; i != NumOps; ++i) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1549 | if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1550 | SDNode *DU = SU->Node->getOperand(i).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 1551 | if ((*SUnitMap).find(DU) != (*SUnitMap).end() && |
| 1552 | Op == (*SUnitMap)[DU][SU->InstanceNo]) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1553 | return true; |
| 1554 | } |
| 1555 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1556 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1557 | return false; |
| 1558 | } |
| 1559 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1560 | |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1561 | /// hasCopyToRegUse - Return true if SU has a value successor that is a |
| 1562 | /// CopyToReg node. |
| 1563 | static bool hasCopyToRegUse(SUnit *SU) { |
| 1564 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1565 | I != E; ++I) { |
| 1566 | if (I->isCtrl) continue; |
| 1567 | SUnit *SuccSU = I->Dep; |
| 1568 | if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg) |
| 1569 | return true; |
| 1570 | } |
| 1571 | return false; |
| 1572 | } |
| 1573 | |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1574 | /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's |
| 1575 | /// physical register def. |
| 1576 | static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU, |
| 1577 | const TargetInstrInfo *TII, |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1578 | const TargetRegisterInfo *TRI) { |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1579 | SDNode *N = SuccSU->Node; |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 1580 | unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs(); |
| 1581 | const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs(); |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1582 | if (!ImpDefs) |
| 1583 | return false; |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 1584 | const unsigned *SUImpDefs = |
| 1585 | TII->get(SU->Node->getTargetOpcode()).getImplicitDefs(); |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1586 | if (!SUImpDefs) |
| 1587 | return false; |
| 1588 | for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { |
| 1589 | MVT::ValueType VT = N->getValueType(i); |
| 1590 | if (VT == MVT::Flag || VT == MVT::Other) |
| 1591 | continue; |
| 1592 | unsigned Reg = ImpDefs[i - NumDefs]; |
| 1593 | for (;*SUImpDefs; ++SUImpDefs) { |
| 1594 | unsigned SUReg = *SUImpDefs; |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1595 | if (TRI->regsOverlap(Reg, SUReg)) |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1596 | return true; |
| 1597 | } |
| 1598 | } |
| 1599 | return false; |
| 1600 | } |
| 1601 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1602 | /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses |
| 1603 | /// it as a def&use operand. Add a pseudo control edge from it to the other |
| 1604 | /// node (if it won't create a cycle) so the two-address one will be scheduled |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1605 | /// first (lower in the schedule). If both nodes are two-address, favor the |
| 1606 | /// one that has a CopyToReg use (more likely to be a loop induction update). |
| 1607 | /// If both are two-address, but one is commutable while the other is not |
| 1608 | /// commutable, favor the one that's not commutable. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1609 | template<class SF> |
| 1610 | void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1611 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { |
| 1612 | SUnit *SU = (SUnit *)&((*SUnits)[i]); |
| 1613 | if (!SU->isTwoAddress) |
| 1614 | continue; |
| 1615 | |
| 1616 | SDNode *Node = SU->Node; |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1617 | if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1618 | continue; |
| 1619 | |
| 1620 | unsigned Opc = Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 1621 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1622 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 1623 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1624 | for (unsigned j = 0; j != NumOps; ++j) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1625 | if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1626 | SDNode *DU = SU->Node->getOperand(j).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 1627 | if ((*SUnitMap).find(DU) == (*SUnitMap).end()) |
| 1628 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1629 | SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo]; |
Evan Cheng | f24d15f | 2006-11-06 21:33:46 +0000 | [diff] [blame] | 1630 | if (!DUSU) continue; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1631 | for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end(); |
| 1632 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1633 | if (I->isCtrl) continue; |
| 1634 | SUnit *SuccSU = I->Dep; |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1635 | if (SuccSU == SU) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1636 | continue; |
Evan Cheng | 2dbffa4 | 2007-11-06 08:44:59 +0000 | [diff] [blame] | 1637 | // Be conservative. Ignore if nodes aren't at roughly the same |
| 1638 | // depth and height. |
| 1639 | if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1) |
| 1640 | continue; |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1641 | if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode()) |
| 1642 | continue; |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1643 | // Don't constrain nodes with physical register defs if the |
Dan Gohman | cf8827a | 2008-01-29 12:43:50 +0000 | [diff] [blame] | 1644 | // predecessor can clobber them. |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1645 | if (SuccSU->hasPhysRegDefs) { |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1646 | if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1647 | continue; |
| 1648 | } |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1649 | // Don't constraint extract_subreg / insert_subreg these may be |
| 1650 | // coalesced away. We don't them close to their uses. |
| 1651 | unsigned SuccOpc = SuccSU->Node->getTargetOpcode(); |
| 1652 | if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || |
| 1653 | SuccOpc == TargetInstrInfo::INSERT_SUBREG) |
| 1654 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1655 | if ((!canClobber(SuccSU, DUSU) || |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1656 | (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1657 | (!SU->isCommutable && SuccSU->isCommutable)) && |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1658 | !scheduleDAG->IsReachable(SuccSU, SU)) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1659 | DOUT << "Adding an edge from SU # " << SU->NodeNum |
| 1660 | << " to SU #" << SuccSU->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1661 | scheduleDAG->AddPred(SU, SuccSU, true, true); |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1662 | } |
| 1663 | } |
| 1664 | } |
| 1665 | } |
| 1666 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1667 | } |
| 1668 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1669 | /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1670 | /// Smaller number is the higher priority. |
| 1671 | template<class SF> |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1672 | unsigned BURegReductionPriorityQueue<SF>:: |
| 1673 | CalcNodeSethiUllmanNumber(const SUnit *SU) { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1674 | unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1675 | if (SethiUllmanNumber != 0) |
| 1676 | return SethiUllmanNumber; |
| 1677 | |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1678 | unsigned Extra = 0; |
| 1679 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1680 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1681 | if (I->isCtrl) continue; // ignore chain preds |
| 1682 | SUnit *PredSU = I->Dep; |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1683 | unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1684 | if (PredSethiUllman > SethiUllmanNumber) { |
| 1685 | SethiUllmanNumber = PredSethiUllman; |
| 1686 | Extra = 0; |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1687 | } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1688 | ++Extra; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1689 | } |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1690 | |
| 1691 | SethiUllmanNumber += Extra; |
| 1692 | |
| 1693 | if (SethiUllmanNumber == 0) |
| 1694 | SethiUllmanNumber = 1; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1695 | |
| 1696 | return SethiUllmanNumber; |
| 1697 | } |
| 1698 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1699 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
| 1700 | /// scheduling units. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1701 | template<class SF> |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1702 | void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1703 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
| 1704 | |
| 1705 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1706 | CalcNodeSethiUllmanNumber(&(*SUnits)[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1707 | } |
| 1708 | |
| 1709 | static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) { |
| 1710 | unsigned Sum = 0; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1711 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1712 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1713 | SUnit *SuccSU = I->Dep; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1714 | for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), |
| 1715 | EE = SuccSU->Preds.end(); II != EE; ++II) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1716 | SUnit *PredSU = II->Dep; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1717 | if (!PredSU->isScheduled) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1718 | ++Sum; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1719 | } |
| 1720 | } |
| 1721 | |
| 1722 | return Sum; |
| 1723 | } |
| 1724 | |
| 1725 | |
| 1726 | // Top down |
| 1727 | bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1728 | unsigned LPriority = SPQ->getNodePriority(left); |
| 1729 | unsigned RPriority = SPQ->getNodePriority(right); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1730 | bool LIsTarget = left->Node && left->Node->isTargetOpcode(); |
| 1731 | bool RIsTarget = right->Node && right->Node->isTargetOpcode(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1732 | bool LIsFloater = LIsTarget && left->NumPreds == 0; |
| 1733 | bool RIsFloater = RIsTarget && right->NumPreds == 0; |
| 1734 | unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0; |
| 1735 | unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0; |
| 1736 | |
| 1737 | if (left->NumSuccs == 0 && right->NumSuccs != 0) |
| 1738 | return false; |
| 1739 | else if (left->NumSuccs != 0 && right->NumSuccs == 0) |
| 1740 | return true; |
| 1741 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1742 | if (LIsFloater) |
| 1743 | LBonus -= 2; |
| 1744 | if (RIsFloater) |
| 1745 | RBonus -= 2; |
| 1746 | if (left->NumSuccs == 1) |
| 1747 | LBonus += 2; |
| 1748 | if (right->NumSuccs == 1) |
| 1749 | RBonus += 2; |
| 1750 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1751 | if (LPriority+LBonus != RPriority+RBonus) |
| 1752 | return LPriority+LBonus < RPriority+RBonus; |
Anton Korobeynikov | 035eaac | 2008-02-20 11:10:28 +0000 | [diff] [blame] | 1753 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1754 | if (left->Depth != right->Depth) |
| 1755 | return left->Depth < right->Depth; |
| 1756 | |
| 1757 | if (left->NumSuccsLeft != right->NumSuccsLeft) |
| 1758 | return left->NumSuccsLeft > right->NumSuccsLeft; |
| 1759 | |
| 1760 | if (left->CycleBound != right->CycleBound) |
| 1761 | return left->CycleBound > right->CycleBound; |
| 1762 | |
| 1763 | // FIXME: No strict ordering. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1764 | return false; |
| 1765 | } |
| 1766 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1767 | /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1768 | /// Smaller number is the higher priority. |
| 1769 | template<class SF> |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1770 | unsigned TDRegReductionPriorityQueue<SF>:: |
| 1771 | CalcNodeSethiUllmanNumber(const SUnit *SU) { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1772 | unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1773 | if (SethiUllmanNumber != 0) |
| 1774 | return SethiUllmanNumber; |
| 1775 | |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1776 | unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1777 | if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1778 | SethiUllmanNumber = 0xffff; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1779 | else if (SU->NumSuccsLeft == 0) |
| 1780 | // If SU does not have a use, i.e. it doesn't produce a value that would |
| 1781 | // be consumed (e.g. store), then it terminates a chain of computation. |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1782 | // Give it a small SethiUllman number so it will be scheduled right before |
| 1783 | // its predecessors that it doesn't lengthen their live ranges. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1784 | SethiUllmanNumber = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1785 | else if (SU->NumPredsLeft == 0 && |
| 1786 | (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU))) |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1787 | SethiUllmanNumber = 0xffff; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1788 | else { |
| 1789 | int Extra = 0; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1790 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1791 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1792 | if (I->isCtrl) continue; // ignore chain preds |
| 1793 | SUnit *PredSU = I->Dep; |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1794 | unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1795 | if (PredSethiUllman > SethiUllmanNumber) { |
| 1796 | SethiUllmanNumber = PredSethiUllman; |
| 1797 | Extra = 0; |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1798 | } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1799 | ++Extra; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1800 | } |
| 1801 | |
| 1802 | SethiUllmanNumber += Extra; |
| 1803 | } |
| 1804 | |
| 1805 | return SethiUllmanNumber; |
| 1806 | } |
| 1807 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1808 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
| 1809 | /// scheduling units. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1810 | template<class SF> |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1811 | void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1812 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
| 1813 | |
| 1814 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1815 | CalcNodeSethiUllmanNumber(&(*SUnits)[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1816 | } |
| 1817 | |
| 1818 | //===----------------------------------------------------------------------===// |
| 1819 | // Public Constructor Functions |
| 1820 | //===----------------------------------------------------------------------===// |
| 1821 | |
Jim Laskey | 03593f7 | 2006-08-01 18:29:48 +0000 | [diff] [blame] | 1822 | llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, |
| 1823 | SelectionDAG *DAG, |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1824 | MachineBasicBlock *BB) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1825 | const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo(); |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1826 | const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame^] | 1827 | |
| 1828 | BURegReductionPriorityQueue<bu_ls_rr_sort> *priorityQueue = |
| 1829 | new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII, TRI); |
| 1830 | |
| 1831 | ScheduleDAGRRList * scheduleDAG = |
| 1832 | new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, priorityQueue); |
| 1833 | priorityQueue->setScheduleDAG(scheduleDAG); |
| 1834 | return scheduleDAG; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1835 | } |
| 1836 | |
Jim Laskey | 03593f7 | 2006-08-01 18:29:48 +0000 | [diff] [blame] | 1837 | llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, |
| 1838 | SelectionDAG *DAG, |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1839 | MachineBasicBlock *BB) { |
Jim Laskey | 95eda5b | 2006-08-01 14:21:23 +0000 | [diff] [blame] | 1840 | return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1841 | new TDRegReductionPriorityQueue<td_ls_rr_sort>()); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1842 | } |
| 1843 | |