Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1 | //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This implements bottom-up and top-down register pressure reduction list |
| 11 | // schedulers, using standard algorithms. The basic approach uses a priority |
| 12 | // queue of available nodes to schedule. One at a time, nodes are taken from |
| 13 | // the priority queue (thus in priority order), checked for legality to |
| 14 | // schedule, and emitted if legal. |
| 15 | // |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
Dale Johannesen | 2182f06 | 2007-07-13 17:13:54 +0000 | [diff] [blame] | 18 | #define DEBUG_TYPE "pre-RA-sched" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/ScheduleDAG.h" |
Jim Laskey | 29e635d | 2006-08-02 12:30:23 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/SchedulerRegistry.h" |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 21 | #include "llvm/Target/TargetRegisterInfo.h" |
Owen Anderson | 8c2c1e9 | 2006-05-12 06:33:49 +0000 | [diff] [blame] | 22 | #include "llvm/Target/TargetData.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 23 | #include "llvm/Target/TargetMachine.h" |
| 24 | #include "llvm/Target/TargetInstrInfo.h" |
| 25 | #include "llvm/Support/Debug.h" |
Chris Lattner | 3d27be1 | 2006-08-27 12:54:02 +0000 | [diff] [blame] | 26 | #include "llvm/Support/Compiler.h" |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 27 | #include "llvm/ADT/SmallPtrSet.h" |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 28 | #include "llvm/ADT/SmallSet.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 29 | #include "llvm/ADT/Statistic.h" |
| 30 | #include <climits> |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 31 | #include <queue> |
| 32 | #include "llvm/Support/CommandLine.h" |
| 33 | using namespace llvm; |
| 34 | |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 35 | STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 36 | STATISTIC(NumUnfolds, "Number of nodes unfolded"); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 37 | STATISTIC(NumDups, "Number of duplicated nodes"); |
| 38 | STATISTIC(NumCCCopies, "Number of cross class copies"); |
| 39 | |
Jim Laskey | 95eda5b | 2006-08-01 14:21:23 +0000 | [diff] [blame] | 40 | static RegisterScheduler |
| 41 | burrListDAGScheduler("list-burr", |
| 42 | " Bottom-up register reduction list scheduling", |
| 43 | createBURRListDAGScheduler); |
| 44 | static RegisterScheduler |
| 45 | tdrListrDAGScheduler("list-tdrr", |
| 46 | " Top-down register reduction list scheduling", |
| 47 | createTDRRListDAGScheduler); |
| 48 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 49 | namespace { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 50 | //===----------------------------------------------------------------------===// |
| 51 | /// ScheduleDAGRRList - The actual register reduction list scheduler |
| 52 | /// implementation. This supports both top-down and bottom-up scheduling. |
| 53 | /// |
Chris Lattner | e097e6f | 2006-06-28 22:17:39 +0000 | [diff] [blame] | 54 | class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 55 | private: |
| 56 | /// isBottomUp - This is true if the scheduling problem is bottom-up, false if |
| 57 | /// it is top-down. |
| 58 | bool isBottomUp; |
| 59 | |
| 60 | /// AvailableQueue - The priority queue to use for the available SUnits. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 61 | SchedulingPriorityQueue *AvailableQueue; |
| 62 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 63 | /// LiveRegs / LiveRegDefs - A set of physical registers and their definition |
| 64 | /// that are "live". These nodes must be scheduled before any other nodes that |
| 65 | /// modifies the registers can be scheduled. |
| 66 | SmallSet<unsigned, 4> LiveRegs; |
| 67 | std::vector<SUnit*> LiveRegDefs; |
| 68 | std::vector<unsigned> LiveRegCycles; |
| 69 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 70 | public: |
| 71 | ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb, |
| 72 | const TargetMachine &tm, bool isbottomup, |
| 73 | SchedulingPriorityQueue *availqueue) |
| 74 | : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), |
| 75 | AvailableQueue(availqueue) { |
| 76 | } |
| 77 | |
| 78 | ~ScheduleDAGRRList() { |
| 79 | delete AvailableQueue; |
| 80 | } |
| 81 | |
| 82 | void Schedule(); |
| 83 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 84 | /// IsReachable - Checks if SU is reachable from TargetSU. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 85 | bool IsReachable(SUnit *SU, SUnit *TargetSU); |
| 86 | |
| 87 | /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will |
| 88 | /// create a cycle. |
| 89 | bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); |
| 90 | |
| 91 | /// AddPred - This adds the specified node X as a predecessor of |
| 92 | /// the current node Y if not already. |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 93 | /// This returns true if this is a new predecessor. |
| 94 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 95 | bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 96 | unsigned PhyReg = 0, int Cost = 1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 97 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 98 | /// RemovePred - This removes the specified node N from the predecessors of |
| 99 | /// the current node M. Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 100 | bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial); |
| 101 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 102 | private: |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 103 | void ReleasePred(SUnit*, bool, unsigned); |
| 104 | void ReleaseSucc(SUnit*, bool isChain, unsigned); |
| 105 | void CapturePred(SUnit*, SUnit*, bool); |
| 106 | void ScheduleNodeBottomUp(SUnit*, unsigned); |
| 107 | void ScheduleNodeTopDown(SUnit*, unsigned); |
| 108 | void UnscheduleNodeBottomUp(SUnit*); |
| 109 | void BacktrackBottomUp(SUnit*, unsigned, unsigned&); |
| 110 | SUnit *CopyAndMoveSuccessors(SUnit*); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 111 | void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned, |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 112 | const TargetRegisterClass*, |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 113 | const TargetRegisterClass*, |
| 114 | SmallVector<SUnit*, 2>&); |
| 115 | bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 116 | void ListScheduleTopDown(); |
| 117 | void ListScheduleBottomUp(); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 118 | void CommuteNodesToReducePressure(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 119 | |
| 120 | |
| 121 | /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 122 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 123 | SUnit *CreateNewSUnit(SDNode *N) { |
| 124 | SUnit *NewNode = NewSUnit(N); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 125 | // Update the topological ordering. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 126 | if (NewNode->NodeNum >= Node2Index.size()) |
| 127 | InitDAGTopologicalSorting(); |
| 128 | return NewNode; |
| 129 | } |
| 130 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 131 | /// CreateClone - Creates a new SUnit from an existing one. |
| 132 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 133 | SUnit *CreateClone(SUnit *N) { |
| 134 | SUnit *NewNode = Clone(N); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 135 | // Update the topological ordering. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 136 | if (NewNode->NodeNum >= Node2Index.size()) |
| 137 | InitDAGTopologicalSorting(); |
| 138 | return NewNode; |
| 139 | } |
| 140 | |
| 141 | /// Functions for preserving the topological ordering |
| 142 | /// even after dynamic insertions of new edges. |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 143 | /// This allows a very fast implementation of IsReachable. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 144 | |
| 145 | |
| 146 | /** |
| 147 | The idea of the algorithm is taken from |
| 148 | "Online algorithms for managing the topological order of |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 149 | a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly |
| 150 | This is the MNR algorithm, which was first introduced by |
| 151 | A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 152 | "Maintaining a topological order under edge insertions". |
| 153 | |
| 154 | Short description of the algorithm: |
| 155 | |
| 156 | Topological ordering, ord, of a DAG maps each node to a topological |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 157 | index so that for all edges X->Y it is the case that ord(X) < ord(Y). |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 158 | |
| 159 | This means that if there is a path from the node X to the node Z, |
| 160 | then ord(X) < ord(Z). |
| 161 | |
| 162 | This property can be used to check for reachability of nodes: |
| 163 | if Z is reachable from X, then an insertion of the edge Z->X would |
| 164 | create a cycle. |
| 165 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 166 | The algorithm first computes a topological ordering for the DAG by initializing |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 167 | the Index2Node and Node2Index arrays and then tries to keep the ordering |
| 168 | up-to-date after edge insertions by reordering the DAG. |
| 169 | |
| 170 | On insertion of the edge X->Y, the algorithm first marks by calling DFS the |
| 171 | nodes reachable from Y, and then shifts them using Shift to lie immediately |
| 172 | after X in Index2Node. |
| 173 | */ |
| 174 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 175 | /// InitDAGTopologicalSorting - create the initial topological |
| 176 | /// ordering from the DAG to be scheduled. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 177 | void InitDAGTopologicalSorting(); |
| 178 | |
| 179 | /// DFS - make a DFS traversal and mark all nodes affected by the |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 180 | /// edge insertion. These nodes will later get new topological indexes |
| 181 | /// by means of the Shift method. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 182 | void DFS(SUnit *SU, int UpperBound, bool& HasLoop); |
| 183 | |
| 184 | /// Shift - reassign topological indexes for the nodes in the DAG |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 185 | /// to preserve the topological ordering. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 186 | void Shift(BitVector& Visited, int LowerBound, int UpperBound); |
| 187 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 188 | /// Allocate - assign the topological index to the node n. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 189 | void Allocate(int n, int index); |
| 190 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 191 | /// Index2Node - Maps topological index to the node number. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 192 | std::vector<int> Index2Node; |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 193 | /// Node2Index - Maps the node number to its topological index. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 194 | std::vector<int> Node2Index; |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 195 | /// Visited - a set of nodes visited during a DFS traversal. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 196 | BitVector Visited; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 197 | }; |
| 198 | } // end anonymous namespace |
| 199 | |
| 200 | |
| 201 | /// Schedule - Schedule the DAG using list scheduling. |
| 202 | void ScheduleDAGRRList::Schedule() { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 203 | DOUT << "********** List Scheduling **********\n"; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 204 | |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 205 | LiveRegDefs.resize(TRI->getNumRegs(), NULL); |
| 206 | LiveRegCycles.resize(TRI->getNumRegs(), 0); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 207 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 208 | // Build scheduling units. |
| 209 | BuildSchedUnits(); |
| 210 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 211 | DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 212 | SUnits[su].dumpAll(&DAG)); |
Evan Cheng | 47fbeda | 2006-10-14 08:34:06 +0000 | [diff] [blame] | 213 | CalculateDepths(); |
| 214 | CalculateHeights(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 215 | InitDAGTopologicalSorting(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 216 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 217 | AvailableQueue->initNodes(SUnitMap, SUnits); |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 218 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 219 | // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate. |
| 220 | if (isBottomUp) |
| 221 | ListScheduleBottomUp(); |
| 222 | else |
| 223 | ListScheduleTopDown(); |
| 224 | |
| 225 | AvailableQueue->releaseState(); |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 226 | |
Evan Cheng | 009f5f5 | 2006-05-25 08:37:31 +0000 | [diff] [blame] | 227 | CommuteNodesToReducePressure(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 228 | |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 229 | DOUT << "*** Final schedule ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 230 | DEBUG(dumpSchedule()); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 231 | DOUT << "\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 232 | |
| 233 | // Emit in scheduled order |
| 234 | EmitSchedule(); |
| 235 | } |
| 236 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 237 | /// CommuteNodesToReducePressure - If a node is two-address and commutable, and |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 238 | /// it is not the last use of its first operand, add it to the CommuteSet if |
| 239 | /// possible. It will be commuted when it is translated to a MI. |
| 240 | void ScheduleDAGRRList::CommuteNodesToReducePressure() { |
Evan Cheng | e3c4419 | 2007-06-22 01:35:51 +0000 | [diff] [blame] | 241 | SmallPtrSet<SUnit*, 4> OperandSeen; |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 242 | for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node. |
| 243 | SUnit *SU = Sequence[i]; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 244 | if (!SU || !SU->Node) continue; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 245 | if (SU->isCommutable) { |
| 246 | unsigned Opc = SU->Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 247 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 248 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 249 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 250 | for (unsigned j = 0; j != NumOps; ++j) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 251 | if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 252 | continue; |
| 253 | |
| 254 | SDNode *OpN = SU->Node->getOperand(j).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 255 | SUnit *OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 256 | if (OpSU && OperandSeen.count(OpSU) == 1) { |
| 257 | // Ok, so SU is not the last use of OpSU, but SU is two-address so |
| 258 | // it will clobber OpSU. Try to commute SU if no other source operands |
| 259 | // are live below. |
| 260 | bool DoCommute = true; |
| 261 | for (unsigned k = 0; k < NumOps; ++k) { |
| 262 | if (k != j) { |
| 263 | OpN = SU->Node->getOperand(k).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 264 | OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 265 | if (OpSU && OperandSeen.count(OpSU) == 1) { |
| 266 | DoCommute = false; |
| 267 | break; |
| 268 | } |
| 269 | } |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 270 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 271 | if (DoCommute) |
| 272 | CommuteSet.insert(SU->Node); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 273 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 274 | |
| 275 | // Only look at the first use&def node for now. |
| 276 | break; |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 277 | } |
| 278 | } |
| 279 | |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 280 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 281 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 282 | if (!I->isCtrl) |
| 283 | OperandSeen.insert(I->Dep); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 284 | } |
| 285 | } |
| 286 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 287 | |
| 288 | //===----------------------------------------------------------------------===// |
| 289 | // Bottom-Up Scheduling |
| 290 | //===----------------------------------------------------------------------===// |
| 291 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 292 | /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 293 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 294 | void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, |
| 295 | unsigned CurCycle) { |
| 296 | // FIXME: the distance between two nodes is not always == the predecessor's |
| 297 | // latency. For example, the reader can very well read the register written |
| 298 | // by the predecessor later than the issue cycle. It also depends on the |
| 299 | // interrupt model (drain vs. freeze). |
| 300 | PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency); |
| 301 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 302 | --PredSU->NumSuccsLeft; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 303 | |
| 304 | #ifndef NDEBUG |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 305 | if (PredSU->NumSuccsLeft < 0) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 306 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 307 | PredSU->dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 308 | cerr << " has been released too many times!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 309 | assert(0); |
| 310 | } |
| 311 | #endif |
| 312 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 313 | if (PredSU->NumSuccsLeft == 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 314 | // EntryToken has to go last! Special case it here. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 315 | if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 316 | PredSU->isAvailable = true; |
| 317 | AvailableQueue->push(PredSU); |
| 318 | } |
| 319 | } |
| 320 | } |
| 321 | |
| 322 | /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending |
| 323 | /// count of its predecessors. If a predecessor pending count is zero, add it to |
| 324 | /// the Available queue. |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 325 | void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 326 | DOUT << "*** Scheduling [" << CurCycle << "]: "; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 327 | DEBUG(SU->dump(&DAG)); |
| 328 | SU->Cycle = CurCycle; |
| 329 | |
| 330 | AvailableQueue->ScheduledNode(SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 331 | |
| 332 | // Bottom up: release predecessors |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 333 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 334 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 335 | ReleasePred(I->Dep, I->isCtrl, CurCycle); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 336 | if (I->Cost < 0) { |
| 337 | // This is a physical register dependency and it's impossible or |
| 338 | // expensive to copy the register. Make sure nothing that can |
| 339 | // clobber the register is scheduled between the predecessor and |
| 340 | // this node. |
| 341 | if (LiveRegs.insert(I->Reg)) { |
| 342 | LiveRegDefs[I->Reg] = I->Dep; |
| 343 | LiveRegCycles[I->Reg] = CurCycle; |
| 344 | } |
| 345 | } |
| 346 | } |
| 347 | |
| 348 | // Release all the implicit physical register defs that are live. |
| 349 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 350 | I != E; ++I) { |
| 351 | if (I->Cost < 0) { |
| 352 | if (LiveRegCycles[I->Reg] == I->Dep->Cycle) { |
| 353 | LiveRegs.erase(I->Reg); |
| 354 | assert(LiveRegDefs[I->Reg] == SU && |
| 355 | "Physical register dependency violated?"); |
| 356 | LiveRegDefs[I->Reg] = NULL; |
| 357 | LiveRegCycles[I->Reg] = 0; |
| 358 | } |
| 359 | } |
| 360 | } |
| 361 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 362 | SU->isScheduled = true; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 363 | } |
| 364 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 365 | /// CapturePred - This does the opposite of ReleasePred. Since SU is being |
| 366 | /// unscheduled, incrcease the succ left count of its predecessors. Remove |
| 367 | /// them from AvailableQueue if necessary. |
| 368 | void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) { |
| 369 | PredSU->CycleBound = 0; |
| 370 | for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end(); |
| 371 | I != E; ++I) { |
| 372 | if (I->Dep == SU) |
| 373 | continue; |
| 374 | PredSU->CycleBound = std::max(PredSU->CycleBound, |
| 375 | I->Dep->Cycle + PredSU->Latency); |
| 376 | } |
| 377 | |
| 378 | if (PredSU->isAvailable) { |
| 379 | PredSU->isAvailable = false; |
| 380 | if (!PredSU->isPending) |
| 381 | AvailableQueue->remove(PredSU); |
| 382 | } |
| 383 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 384 | ++PredSU->NumSuccsLeft; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 385 | } |
| 386 | |
| 387 | /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and |
| 388 | /// its predecessor states to reflect the change. |
| 389 | void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { |
| 390 | DOUT << "*** Unscheduling [" << SU->Cycle << "]: "; |
| 391 | DEBUG(SU->dump(&DAG)); |
| 392 | |
| 393 | AvailableQueue->UnscheduledNode(SU); |
| 394 | |
| 395 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 396 | I != E; ++I) { |
| 397 | CapturePred(I->Dep, SU, I->isCtrl); |
| 398 | if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) { |
| 399 | LiveRegs.erase(I->Reg); |
| 400 | assert(LiveRegDefs[I->Reg] == I->Dep && |
| 401 | "Physical register dependency violated?"); |
| 402 | LiveRegDefs[I->Reg] = NULL; |
| 403 | LiveRegCycles[I->Reg] = 0; |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 408 | I != E; ++I) { |
| 409 | if (I->Cost < 0) { |
| 410 | if (LiveRegs.insert(I->Reg)) { |
| 411 | assert(!LiveRegDefs[I->Reg] && |
| 412 | "Physical register dependency violated?"); |
| 413 | LiveRegDefs[I->Reg] = SU; |
| 414 | } |
| 415 | if (I->Dep->Cycle < LiveRegCycles[I->Reg]) |
| 416 | LiveRegCycles[I->Reg] = I->Dep->Cycle; |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | SU->Cycle = 0; |
| 421 | SU->isScheduled = false; |
| 422 | SU->isAvailable = true; |
| 423 | AvailableQueue->push(SU); |
| 424 | } |
| 425 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 426 | /// IsReachable - Checks if SU is reachable from TargetSU. |
| 427 | bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 428 | // If insertion of the edge SU->TargetSU would create a cycle |
| 429 | // then there is a path from TargetSU to SU. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 430 | int UpperBound, LowerBound; |
| 431 | LowerBound = Node2Index[TargetSU->NodeNum]; |
| 432 | UpperBound = Node2Index[SU->NodeNum]; |
| 433 | bool HasLoop = false; |
| 434 | // Is Ord(TargetSU) < Ord(SU) ? |
| 435 | if (LowerBound < UpperBound) { |
| 436 | Visited.reset(); |
| 437 | // There may be a path from TargetSU to SU. Check for it. |
| 438 | DFS(TargetSU, UpperBound, HasLoop); |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 439 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 440 | return HasLoop; |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 441 | } |
| 442 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 443 | /// Allocate - assign the topological index to the node n. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 444 | inline void ScheduleDAGRRList::Allocate(int n, int index) { |
| 445 | Node2Index[n] = index; |
| 446 | Index2Node[index] = n; |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 447 | } |
| 448 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 449 | /// InitDAGTopologicalSorting - create the initial topological |
| 450 | /// ordering from the DAG to be scheduled. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 451 | void ScheduleDAGRRList::InitDAGTopologicalSorting() { |
| 452 | unsigned DAGSize = SUnits.size(); |
| 453 | std::vector<unsigned> InDegree(DAGSize); |
| 454 | std::vector<SUnit*> WorkList; |
| 455 | WorkList.reserve(DAGSize); |
| 456 | std::vector<SUnit*> TopOrder; |
| 457 | TopOrder.reserve(DAGSize); |
| 458 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 459 | // Initialize the data structures. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 460 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 461 | SUnit *SU = &SUnits[i]; |
| 462 | int NodeNum = SU->NodeNum; |
| 463 | unsigned Degree = SU->Succs.size(); |
| 464 | InDegree[NodeNum] = Degree; |
| 465 | |
| 466 | // Is it a node without dependencies? |
| 467 | if (Degree == 0) { |
| 468 | assert(SU->Succs.empty() && "SUnit should have no successors"); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 469 | // Collect leaf nodes. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 470 | WorkList.push_back(SU); |
| 471 | } |
| 472 | } |
| 473 | |
| 474 | while (!WorkList.empty()) { |
| 475 | SUnit *SU = WorkList.back(); |
| 476 | WorkList.pop_back(); |
| 477 | TopOrder.push_back(SU); |
| 478 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 479 | I != E; ++I) { |
| 480 | SUnit *SU = I->Dep; |
| 481 | if (!--InDegree[SU->NodeNum]) |
| 482 | // If all dependencies of the node are processed already, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 483 | // then the node can be computed now. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 484 | WorkList.push_back(SU); |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | // Second pass, assign the actual topological order as node ids. |
| 489 | int Id = 0; |
| 490 | |
| 491 | Index2Node.clear(); |
| 492 | Node2Index.clear(); |
| 493 | Index2Node.resize(DAGSize); |
| 494 | Node2Index.resize(DAGSize); |
| 495 | Visited.resize(DAGSize); |
| 496 | |
| 497 | for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(), |
| 498 | TE = TopOrder.rend();TI != TE; ++TI) { |
| 499 | Allocate((*TI)->NodeNum, Id); |
| 500 | Id++; |
| 501 | } |
| 502 | |
| 503 | #ifndef NDEBUG |
| 504 | // Check correctness of the ordering |
| 505 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 506 | SUnit *SU = &SUnits[i]; |
| 507 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 508 | I != E; ++I) { |
| 509 | assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] && |
| 510 | "Wrong topological sorting"); |
| 511 | } |
| 512 | } |
| 513 | #endif |
| 514 | } |
| 515 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 516 | /// AddPred - adds an edge from SUnit X to SUnit Y. |
| 517 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 518 | bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, |
| 519 | unsigned PhyReg, int Cost) { |
| 520 | int UpperBound, LowerBound; |
| 521 | LowerBound = Node2Index[Y->NodeNum]; |
| 522 | UpperBound = Node2Index[X->NodeNum]; |
| 523 | bool HasLoop = false; |
| 524 | // Is Ord(X) < Ord(Y) ? |
| 525 | if (LowerBound < UpperBound) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 526 | // Update the topological order. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 527 | Visited.reset(); |
| 528 | DFS(Y, UpperBound, HasLoop); |
| 529 | assert(!HasLoop && "Inserted edge creates a loop!"); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 530 | // Recompute topological indexes. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 531 | Shift(Visited, LowerBound, UpperBound); |
| 532 | } |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 533 | // Now really insert the edge. |
| 534 | return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 535 | } |
| 536 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 537 | /// RemovePred - This removes the specified node N from the predecessors of |
| 538 | /// the current node M. Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 539 | bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N, |
| 540 | bool isCtrl, bool isSpecial) { |
| 541 | // InitDAGTopologicalSorting(); |
| 542 | return M->removePred(N, isCtrl, isSpecial); |
| 543 | } |
| 544 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 545 | /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark |
| 546 | /// all nodes affected by the edge insertion. These nodes will later get new |
| 547 | /// topological indexes by means of the Shift method. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 548 | void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) { |
| 549 | std::vector<SUnit*> WorkList; |
| 550 | WorkList.reserve(SUnits.size()); |
| 551 | |
| 552 | WorkList.push_back(SU); |
| 553 | while (!WorkList.empty()) { |
| 554 | SU = WorkList.back(); |
| 555 | WorkList.pop_back(); |
| 556 | Visited.set(SU->NodeNum); |
| 557 | for (int I = SU->Succs.size()-1; I >= 0; --I) { |
| 558 | int s = SU->Succs[I].Dep->NodeNum; |
| 559 | if (Node2Index[s] == UpperBound) { |
| 560 | HasLoop = true; |
| 561 | return; |
| 562 | } |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 563 | // Visit successors if not already and in affected region. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 564 | if (!Visited.test(s) && Node2Index[s] < UpperBound) { |
| 565 | WorkList.push_back(SU->Succs[I].Dep); |
| 566 | } |
| 567 | } |
| 568 | } |
| 569 | } |
| 570 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 571 | /// Shift - Renumber the nodes so that the topological ordering is |
| 572 | /// preserved. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 573 | void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound, |
| 574 | int UpperBound) { |
| 575 | std::vector<int> L; |
| 576 | int shift = 0; |
| 577 | int i; |
| 578 | |
| 579 | for (i = LowerBound; i <= UpperBound; ++i) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 580 | // w is node at topological index i. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 581 | int w = Index2Node[i]; |
| 582 | if (Visited.test(w)) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 583 | // Unmark. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 584 | Visited.reset(w); |
| 585 | L.push_back(w); |
| 586 | shift = shift + 1; |
| 587 | } else { |
| 588 | Allocate(w, i - shift); |
| 589 | } |
| 590 | } |
| 591 | |
| 592 | for (unsigned j = 0; j < L.size(); ++j) { |
| 593 | Allocate(L[j], i - shift); |
| 594 | i = i + 1; |
| 595 | } |
| 596 | } |
| 597 | |
| 598 | |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 599 | /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 600 | /// create a cycle. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 601 | bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) { |
| 602 | if (IsReachable(TargetSU, SU)) |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 603 | return true; |
| 604 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 605 | I != E; ++I) |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 606 | if (I->Cost < 0 && IsReachable(TargetSU, I->Dep)) |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 607 | return true; |
| 608 | return false; |
| 609 | } |
| 610 | |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 611 | /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 612 | /// BTCycle in order to schedule a specific node. Returns the last unscheduled |
| 613 | /// SUnit. Also returns if a successor is unscheduled in the process. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 614 | void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, |
| 615 | unsigned &CurCycle) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 616 | SUnit *OldSU = NULL; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 617 | while (CurCycle > BtCycle) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 618 | OldSU = Sequence.back(); |
| 619 | Sequence.pop_back(); |
| 620 | if (SU->isSucc(OldSU)) |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 621 | // Don't try to remove SU from AvailableQueue. |
| 622 | SU->isAvailable = false; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 623 | UnscheduleNodeBottomUp(OldSU); |
| 624 | --CurCycle; |
| 625 | } |
| 626 | |
| 627 | |
| 628 | if (SU->isSucc(OldSU)) { |
| 629 | assert(false && "Something is wrong!"); |
| 630 | abort(); |
| 631 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 632 | |
| 633 | ++NumBacktracks; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 634 | } |
| 635 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 636 | /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled |
| 637 | /// successors to the newly created node. |
| 638 | SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 639 | if (SU->FlaggedNodes.size()) |
| 640 | return NULL; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 641 | |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 642 | SDNode *N = SU->Node; |
| 643 | if (!N) |
| 644 | return NULL; |
| 645 | |
| 646 | SUnit *NewSU; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 647 | bool TryUnfold = false; |
Evan Cheng | 84d0ebc | 2007-10-05 01:42:35 +0000 | [diff] [blame] | 648 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
| 649 | MVT::ValueType VT = N->getValueType(i); |
| 650 | if (VT == MVT::Flag) |
| 651 | return NULL; |
| 652 | else if (VT == MVT::Other) |
| 653 | TryUnfold = true; |
| 654 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 655 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 656 | const SDOperand &Op = N->getOperand(i); |
| 657 | MVT::ValueType VT = Op.Val->getValueType(Op.ResNo); |
| 658 | if (VT == MVT::Flag) |
| 659 | return NULL; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 660 | } |
| 661 | |
| 662 | if (TryUnfold) { |
| 663 | SmallVector<SDNode*, 4> NewNodes; |
Owen Anderson | 0ec92e9 | 2008-01-07 01:35:56 +0000 | [diff] [blame] | 664 | if (!TII->unfoldMemoryOperand(DAG, N, NewNodes)) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 665 | return NULL; |
| 666 | |
| 667 | DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; |
| 668 | assert(NewNodes.size() == 2 && "Expected a load folding node!"); |
| 669 | |
| 670 | N = NewNodes[1]; |
| 671 | SDNode *LoadNode = NewNodes[0]; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 672 | unsigned NumVals = N->getNumValues(); |
| 673 | unsigned OldNumVals = SU->Node->getNumValues(); |
| 674 | for (unsigned i = 0; i != NumVals; ++i) |
Chris Lattner | 3cfb56d | 2007-10-15 06:10:22 +0000 | [diff] [blame] | 675 | DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i)); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 676 | DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1), |
Chris Lattner | 3cfb56d | 2007-10-15 06:10:22 +0000 | [diff] [blame] | 677 | SDOperand(LoadNode, 1)); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 678 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 679 | SUnit *NewSU = CreateNewSUnit(N); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 680 | SUnitMap[N].push_back(NewSU); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 681 | const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); |
Dan Gohman | 856c012 | 2008-02-16 00:25:40 +0000 | [diff] [blame] | 682 | for (unsigned i = 0; i != TID.getNumOperands(); ++i) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 683 | if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 684 | NewSU->isTwoAddress = true; |
| 685 | break; |
| 686 | } |
| 687 | } |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 688 | if (TID.isCommutable()) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 689 | NewSU->isCommutable = true; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 690 | // FIXME: Calculate height / depth and propagate the changes? |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 691 | NewSU->Depth = SU->Depth; |
| 692 | NewSU->Height = SU->Height; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 693 | ComputeLatency(NewSU); |
| 694 | |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 695 | // LoadNode may already exist. This can happen when there is another |
| 696 | // load from the same location and producing the same type of value |
| 697 | // but it has different alignment or volatileness. |
| 698 | bool isNewLoad = true; |
| 699 | SUnit *LoadSU; |
| 700 | DenseMap<SDNode*, std::vector<SUnit*> >::iterator SMI = |
| 701 | SUnitMap.find(LoadNode); |
| 702 | if (SMI != SUnitMap.end()) { |
| 703 | LoadSU = SMI->second.front(); |
| 704 | isNewLoad = false; |
| 705 | } else { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 706 | LoadSU = CreateNewSUnit(LoadNode); |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 707 | SUnitMap[LoadNode].push_back(LoadSU); |
| 708 | |
| 709 | LoadSU->Depth = SU->Depth; |
| 710 | LoadSU->Height = SU->Height; |
| 711 | ComputeLatency(LoadSU); |
| 712 | } |
| 713 | |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 714 | SUnit *ChainPred = NULL; |
| 715 | SmallVector<SDep, 4> ChainSuccs; |
| 716 | SmallVector<SDep, 4> LoadPreds; |
| 717 | SmallVector<SDep, 4> NodePreds; |
| 718 | SmallVector<SDep, 4> NodeSuccs; |
| 719 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 720 | I != E; ++I) { |
| 721 | if (I->isCtrl) |
| 722 | ChainPred = I->Dep; |
Evan Cheng | 567d2e5 | 2008-03-04 00:41:45 +0000 | [diff] [blame] | 723 | else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode)) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 724 | LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); |
| 725 | else |
| 726 | NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); |
| 727 | } |
| 728 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 729 | I != E; ++I) { |
| 730 | if (I->isCtrl) |
| 731 | ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, |
| 732 | I->isCtrl, I->isSpecial)); |
| 733 | else |
| 734 | NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, |
| 735 | I->isCtrl, I->isSpecial)); |
| 736 | } |
| 737 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 738 | RemovePred(SU, ChainPred, true, false); |
| 739 | if (isNewLoad) { |
| 740 | AddPred(LoadSU,ChainPred, true, false); |
| 741 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 742 | for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { |
| 743 | SDep *Pred = &LoadPreds[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 744 | RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); |
| 745 | if (isNewLoad) { |
| 746 | AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 747 | Pred->Reg, Pred->Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 748 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 749 | } |
| 750 | for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { |
| 751 | SDep *Pred = &NodePreds[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 752 | RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); |
| 753 | AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 754 | Pred->Reg, Pred->Cost); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 755 | } |
| 756 | for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { |
| 757 | SDep *Succ = &NodeSuccs[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 758 | RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); |
| 759 | AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 760 | Succ->Reg, Succ->Cost); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 761 | } |
| 762 | for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { |
| 763 | SDep *Succ = &ChainSuccs[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 764 | RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); |
| 765 | if (isNewLoad) { |
| 766 | AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 767 | Succ->Reg, Succ->Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 768 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 769 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 770 | if (isNewLoad) { |
| 771 | AddPred(NewSU, LoadSU, false, false); |
| 772 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 773 | |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 774 | if (isNewLoad) |
| 775 | AvailableQueue->addNode(LoadSU); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 776 | AvailableQueue->addNode(NewSU); |
| 777 | |
| 778 | ++NumUnfolds; |
| 779 | |
| 780 | if (NewSU->NumSuccsLeft == 0) { |
| 781 | NewSU->isAvailable = true; |
| 782 | return NewSU; |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 783 | } |
| 784 | SU = NewSU; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 785 | } |
| 786 | |
| 787 | DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 788 | NewSU = CreateClone(SU); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 789 | |
| 790 | // New SUnit has the exact same predecessors. |
| 791 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 792 | I != E; ++I) |
| 793 | if (!I->isSpecial) { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 794 | AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 795 | NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1); |
| 796 | } |
| 797 | |
| 798 | // Only copy scheduled successors. Cut them from old node's successor |
| 799 | // list and move them over. |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 800 | SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 801 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 802 | I != E; ++I) { |
| 803 | if (I->isSpecial) |
| 804 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 805 | if (I->Dep->isScheduled) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 806 | NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 807 | AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 808 | DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 809 | } |
| 810 | } |
| 811 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 812 | SUnit *Succ = DelDeps[i].first; |
| 813 | bool isCtrl = DelDeps[i].second; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 814 | RemovePred(Succ, SU, isCtrl, false); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 815 | } |
| 816 | |
| 817 | AvailableQueue->updateNode(SU); |
| 818 | AvailableQueue->addNode(NewSU); |
| 819 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 820 | ++NumDups; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 821 | return NewSU; |
| 822 | } |
| 823 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 824 | /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies |
| 825 | /// and move all scheduled successors of the given SUnit to the last copy. |
| 826 | void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, |
| 827 | const TargetRegisterClass *DestRC, |
| 828 | const TargetRegisterClass *SrcRC, |
| 829 | SmallVector<SUnit*, 2> &Copies) { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 830 | SUnit *CopyFromSU = CreateNewSUnit(NULL); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 831 | CopyFromSU->CopySrcRC = SrcRC; |
| 832 | CopyFromSU->CopyDstRC = DestRC; |
| 833 | CopyFromSU->Depth = SU->Depth; |
| 834 | CopyFromSU->Height = SU->Height; |
| 835 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 836 | SUnit *CopyToSU = CreateNewSUnit(NULL); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 837 | CopyToSU->CopySrcRC = DestRC; |
| 838 | CopyToSU->CopyDstRC = SrcRC; |
| 839 | |
| 840 | // Only copy scheduled successors. Cut them from old node's successor |
| 841 | // list and move them over. |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 842 | SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 843 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 844 | I != E; ++I) { |
| 845 | if (I->isSpecial) |
| 846 | continue; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 847 | if (I->Dep->isScheduled) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 848 | CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 849 | AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 850 | DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 851 | } |
| 852 | } |
| 853 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 854 | SUnit *Succ = DelDeps[i].first; |
| 855 | bool isCtrl = DelDeps[i].second; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 856 | RemovePred(Succ, SU, isCtrl, false); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 857 | } |
| 858 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 859 | AddPred(CopyFromSU, SU, false, false, Reg, -1); |
| 860 | AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 861 | |
| 862 | AvailableQueue->updateNode(SU); |
| 863 | AvailableQueue->addNode(CopyFromSU); |
| 864 | AvailableQueue->addNode(CopyToSU); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 865 | Copies.push_back(CopyFromSU); |
| 866 | Copies.push_back(CopyToSU); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 867 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 868 | ++NumCCCopies; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 869 | } |
| 870 | |
| 871 | /// getPhysicalRegisterVT - Returns the ValueType of the physical register |
| 872 | /// definition of the specified node. |
| 873 | /// FIXME: Move to SelectionDAG? |
| 874 | static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg, |
| 875 | const TargetInstrInfo *TII) { |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 876 | const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 877 | assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 878 | unsigned NumRes = TID.getNumDefs(); |
| 879 | for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) { |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 880 | if (Reg == *ImpDef) |
| 881 | break; |
| 882 | ++NumRes; |
| 883 | } |
| 884 | return N->getValueType(NumRes); |
| 885 | } |
| 886 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 887 | /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay |
| 888 | /// scheduling of the given node to satisfy live physical register dependencies. |
| 889 | /// If the specific node is the last one that's available to schedule, do |
| 890 | /// whatever is necessary (i.e. backtracking or cloning) to make it possible. |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 891 | bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, |
| 892 | SmallVector<unsigned, 4> &LRegs){ |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 893 | if (LiveRegs.empty()) |
| 894 | return false; |
| 895 | |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 896 | SmallSet<unsigned, 4> RegAdded; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 897 | // If this node would clobber any "live" register, then it's not ready. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 898 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 899 | I != E; ++I) { |
| 900 | if (I->Cost < 0) { |
| 901 | unsigned Reg = I->Reg; |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 902 | if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) { |
| 903 | if (RegAdded.insert(Reg)) |
| 904 | LRegs.push_back(Reg); |
| 905 | } |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 906 | for (const unsigned *Alias = TRI->getAliasSet(Reg); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 907 | *Alias; ++Alias) |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 908 | if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) { |
| 909 | if (RegAdded.insert(*Alias)) |
| 910 | LRegs.push_back(*Alias); |
| 911 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 912 | } |
| 913 | } |
| 914 | |
| 915 | for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) { |
| 916 | SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1]; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 917 | if (!Node || !Node->isTargetOpcode()) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 918 | continue; |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 919 | const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode()); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 920 | if (!TID.ImplicitDefs) |
| 921 | continue; |
| 922 | for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) { |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 923 | if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) { |
| 924 | if (RegAdded.insert(*Reg)) |
| 925 | LRegs.push_back(*Reg); |
| 926 | } |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 927 | for (const unsigned *Alias = TRI->getAliasSet(*Reg); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 928 | *Alias; ++Alias) |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 929 | if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) { |
| 930 | if (RegAdded.insert(*Alias)) |
| 931 | LRegs.push_back(*Alias); |
| 932 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 933 | } |
| 934 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 935 | return !LRegs.empty(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 936 | } |
| 937 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 938 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 939 | /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up |
| 940 | /// schedulers. |
| 941 | void ScheduleDAGRRList::ListScheduleBottomUp() { |
| 942 | unsigned CurCycle = 0; |
| 943 | // Add root to Available queue. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 944 | SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front(); |
| 945 | RootSU->isAvailable = true; |
| 946 | AvailableQueue->push(RootSU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 947 | |
| 948 | // While Available queue is not empty, grab the node with the highest |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 949 | // priority. If it is not ready put it back. Schedule the node. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 950 | SmallVector<SUnit*, 4> NotReady; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 951 | while (!AvailableQueue->empty()) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 952 | bool Delayed = false; |
| 953 | DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 954 | SUnit *CurSU = AvailableQueue->pop(); |
| 955 | while (CurSU) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 956 | if (CurSU->CycleBound <= CurCycle) { |
| 957 | SmallVector<unsigned, 4> LRegs; |
| 958 | if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 959 | break; |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 960 | Delayed = true; |
| 961 | LRegsMap.insert(std::make_pair(CurSU, LRegs)); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 962 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 963 | |
| 964 | CurSU->isPending = true; // This SU is not in AvailableQueue right now. |
| 965 | NotReady.push_back(CurSU); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 966 | CurSU = AvailableQueue->pop(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 967 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 968 | |
| 969 | // All candidates are delayed due to live physical reg dependencies. |
| 970 | // Try backtracking, code duplication, or inserting cross class copies |
| 971 | // to resolve it. |
| 972 | if (Delayed && !CurSU) { |
| 973 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 974 | SUnit *TrySU = NotReady[i]; |
| 975 | SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; |
| 976 | |
| 977 | // Try unscheduling up to the point where it's safe to schedule |
| 978 | // this node. |
| 979 | unsigned LiveCycle = CurCycle; |
| 980 | for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { |
| 981 | unsigned Reg = LRegs[j]; |
| 982 | unsigned LCycle = LiveRegCycles[Reg]; |
| 983 | LiveCycle = std::min(LiveCycle, LCycle); |
| 984 | } |
| 985 | SUnit *OldSU = Sequence[LiveCycle]; |
| 986 | if (!WillCreateCycle(TrySU, OldSU)) { |
| 987 | BacktrackBottomUp(TrySU, LiveCycle, CurCycle); |
| 988 | // Force the current node to be scheduled before the node that |
| 989 | // requires the physical reg dep. |
| 990 | if (OldSU->isAvailable) { |
| 991 | OldSU->isAvailable = false; |
| 992 | AvailableQueue->remove(OldSU); |
| 993 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 994 | AddPred(TrySU, OldSU, true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 995 | // If one or more successors has been unscheduled, then the current |
| 996 | // node is no longer avaialable. Schedule a successor that's now |
| 997 | // available instead. |
| 998 | if (!TrySU->isAvailable) |
| 999 | CurSU = AvailableQueue->pop(); |
| 1000 | else { |
| 1001 | CurSU = TrySU; |
| 1002 | TrySU->isPending = false; |
| 1003 | NotReady.erase(NotReady.begin()+i); |
| 1004 | } |
| 1005 | break; |
| 1006 | } |
| 1007 | } |
| 1008 | |
| 1009 | if (!CurSU) { |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 1010 | // Can't backtrack. Try duplicating the nodes that produces these |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1011 | // "expensive to copy" values to break the dependency. In case even |
| 1012 | // that doesn't work, insert cross class copies. |
| 1013 | SUnit *TrySU = NotReady[0]; |
| 1014 | SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; |
| 1015 | assert(LRegs.size() == 1 && "Can't handle this yet!"); |
| 1016 | unsigned Reg = LRegs[0]; |
| 1017 | SUnit *LRDef = LiveRegDefs[Reg]; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 1018 | SUnit *NewDef = CopyAndMoveSuccessors(LRDef); |
| 1019 | if (!NewDef) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1020 | // Issue expensive cross register class copies. |
| 1021 | MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); |
| 1022 | const TargetRegisterClass *RC = |
Evan Cheng | e88a625 | 2008-03-11 07:19:34 +0000 | [diff] [blame] | 1023 | TRI->getPhysicalRegisterRegClass(Reg, VT); |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1024 | const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1025 | if (!DestRC) { |
| 1026 | assert(false && "Don't know how to copy this physical register!"); |
| 1027 | abort(); |
| 1028 | } |
| 1029 | SmallVector<SUnit*, 2> Copies; |
| 1030 | InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); |
| 1031 | DOUT << "Adding an edge from SU # " << TrySU->NodeNum |
| 1032 | << " to SU #" << Copies.front()->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1033 | AddPred(TrySU, Copies.front(), true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1034 | NewDef = Copies.back(); |
| 1035 | } |
| 1036 | |
| 1037 | DOUT << "Adding an edge from SU # " << NewDef->NodeNum |
| 1038 | << " to SU #" << TrySU->NodeNum << "\n"; |
| 1039 | LiveRegDefs[Reg] = NewDef; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1040 | AddPred(NewDef, TrySU, true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1041 | TrySU->isAvailable = false; |
| 1042 | CurSU = NewDef; |
| 1043 | } |
| 1044 | |
| 1045 | if (!CurSU) { |
| 1046 | assert(false && "Unable to resolve live physical register dependencies!"); |
| 1047 | abort(); |
| 1048 | } |
| 1049 | } |
| 1050 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1051 | // Add the nodes that aren't ready back onto the available list. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1052 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 1053 | NotReady[i]->isPending = false; |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1054 | // May no longer be available due to backtracking. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1055 | if (NotReady[i]->isAvailable) |
| 1056 | AvailableQueue->push(NotReady[i]); |
| 1057 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1058 | NotReady.clear(); |
| 1059 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1060 | if (!CurSU) |
| 1061 | Sequence.push_back(0); |
| 1062 | else { |
| 1063 | ScheduleNodeBottomUp(CurSU, CurCycle); |
| 1064 | Sequence.push_back(CurSU); |
| 1065 | } |
| 1066 | ++CurCycle; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1067 | } |
| 1068 | |
| 1069 | // Add entry node last |
| 1070 | if (DAG.getEntryNode().Val != DAG.getRoot().Val) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1071 | SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1072 | Sequence.push_back(Entry); |
| 1073 | } |
| 1074 | |
| 1075 | // Reverse the order if it is bottom up. |
| 1076 | std::reverse(Sequence.begin(), Sequence.end()); |
| 1077 | |
| 1078 | |
| 1079 | #ifndef NDEBUG |
| 1080 | // Verify that all SUnits were scheduled. |
| 1081 | bool AnyNotSched = false; |
| 1082 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1083 | if (SUnits[i].NumSuccsLeft != 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1084 | if (!AnyNotSched) |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1085 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1086 | SUnits[i].dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1087 | cerr << "has not been scheduled!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1088 | AnyNotSched = true; |
| 1089 | } |
| 1090 | } |
| 1091 | assert(!AnyNotSched); |
| 1092 | #endif |
| 1093 | } |
| 1094 | |
| 1095 | //===----------------------------------------------------------------------===// |
| 1096 | // Top-Down Scheduling |
| 1097 | //===----------------------------------------------------------------------===// |
| 1098 | |
| 1099 | /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1100 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1101 | void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, |
| 1102 | unsigned CurCycle) { |
| 1103 | // FIXME: the distance between two nodes is not always == the predecessor's |
| 1104 | // latency. For example, the reader can very well read the register written |
| 1105 | // by the predecessor later than the issue cycle. It also depends on the |
| 1106 | // interrupt model (drain vs. freeze). |
| 1107 | SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency); |
| 1108 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1109 | --SuccSU->NumPredsLeft; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1110 | |
| 1111 | #ifndef NDEBUG |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1112 | if (SuccSU->NumPredsLeft < 0) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1113 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1114 | SuccSU->dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1115 | cerr << " has been released too many times!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1116 | assert(0); |
| 1117 | } |
| 1118 | #endif |
| 1119 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1120 | if (SuccSU->NumPredsLeft == 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1121 | SuccSU->isAvailable = true; |
| 1122 | AvailableQueue->push(SuccSU); |
| 1123 | } |
| 1124 | } |
| 1125 | |
| 1126 | |
| 1127 | /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending |
| 1128 | /// count of its successors. If a successor pending count is zero, add it to |
| 1129 | /// the Available queue. |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1130 | void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1131 | DOUT << "*** Scheduling [" << CurCycle << "]: "; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1132 | DEBUG(SU->dump(&DAG)); |
| 1133 | SU->Cycle = CurCycle; |
| 1134 | |
| 1135 | AvailableQueue->ScheduledNode(SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1136 | |
| 1137 | // Top down: release successors |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1138 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1139 | I != E; ++I) |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1140 | ReleaseSucc(I->Dep, I->isCtrl, CurCycle); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1141 | SU->isScheduled = true; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1142 | } |
| 1143 | |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1144 | /// ListScheduleTopDown - The main loop of list scheduling for top-down |
| 1145 | /// schedulers. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1146 | void ScheduleDAGRRList::ListScheduleTopDown() { |
| 1147 | unsigned CurCycle = 0; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1148 | SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1149 | |
| 1150 | // All leaves to Available queue. |
| 1151 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 1152 | // It is available if it has no predecessors. |
Dan Gohman | 70de4cb | 2008-01-29 13:02:09 +0000 | [diff] [blame] | 1153 | if (SUnits[i].Preds.empty() && &SUnits[i] != Entry) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1154 | AvailableQueue->push(&SUnits[i]); |
| 1155 | SUnits[i].isAvailable = true; |
| 1156 | } |
| 1157 | } |
| 1158 | |
| 1159 | // Emit the entry node first. |
| 1160 | ScheduleNodeTopDown(Entry, CurCycle); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1161 | Sequence.push_back(Entry); |
| 1162 | ++CurCycle; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1163 | |
| 1164 | // While Available queue is not empty, grab the node with the highest |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1165 | // priority. If it is not ready put it back. Schedule the node. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1166 | std::vector<SUnit*> NotReady; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1167 | while (!AvailableQueue->empty()) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1168 | SUnit *CurSU = AvailableQueue->pop(); |
| 1169 | while (CurSU && CurSU->CycleBound > CurCycle) { |
| 1170 | NotReady.push_back(CurSU); |
| 1171 | CurSU = AvailableQueue->pop(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1172 | } |
| 1173 | |
| 1174 | // Add the nodes that aren't ready back onto the available list. |
| 1175 | AvailableQueue->push_all(NotReady); |
| 1176 | NotReady.clear(); |
| 1177 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1178 | if (!CurSU) |
| 1179 | Sequence.push_back(0); |
| 1180 | else { |
| 1181 | ScheduleNodeTopDown(CurSU, CurCycle); |
| 1182 | Sequence.push_back(CurSU); |
| 1183 | } |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1184 | CurCycle++; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1185 | } |
| 1186 | |
| 1187 | |
| 1188 | #ifndef NDEBUG |
| 1189 | // Verify that all SUnits were scheduled. |
| 1190 | bool AnyNotSched = false; |
| 1191 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 1192 | if (!SUnits[i].isScheduled) { |
| 1193 | if (!AnyNotSched) |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1194 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1195 | SUnits[i].dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1196 | cerr << "has not been scheduled!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1197 | AnyNotSched = true; |
| 1198 | } |
| 1199 | } |
| 1200 | assert(!AnyNotSched); |
| 1201 | #endif |
| 1202 | } |
| 1203 | |
| 1204 | |
| 1205 | |
| 1206 | //===----------------------------------------------------------------------===// |
| 1207 | // RegReductionPriorityQueue Implementation |
| 1208 | //===----------------------------------------------------------------------===// |
| 1209 | // |
| 1210 | // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers |
| 1211 | // to reduce register pressure. |
| 1212 | // |
| 1213 | namespace { |
| 1214 | template<class SF> |
| 1215 | class RegReductionPriorityQueue; |
| 1216 | |
| 1217 | /// Sorting functions for the Available queue. |
| 1218 | struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { |
| 1219 | RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ; |
| 1220 | bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {} |
| 1221 | bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} |
| 1222 | |
| 1223 | bool operator()(const SUnit* left, const SUnit* right) const; |
| 1224 | }; |
| 1225 | |
| 1226 | struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { |
| 1227 | RegReductionPriorityQueue<td_ls_rr_sort> *SPQ; |
| 1228 | td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {} |
| 1229 | td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} |
| 1230 | |
| 1231 | bool operator()(const SUnit* left, const SUnit* right) const; |
| 1232 | }; |
| 1233 | } // end anonymous namespace |
| 1234 | |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1235 | static inline bool isCopyFromLiveIn(const SUnit *SU) { |
| 1236 | SDNode *N = SU->Node; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1237 | return N && N->getOpcode() == ISD::CopyFromReg && |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1238 | N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag; |
| 1239 | } |
| 1240 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1241 | namespace { |
| 1242 | template<class SF> |
Chris Lattner | 996795b | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 1243 | class VISIBILITY_HIDDEN RegReductionPriorityQueue |
| 1244 | : public SchedulingPriorityQueue { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1245 | std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue; |
| 1246 | |
| 1247 | public: |
| 1248 | RegReductionPriorityQueue() : |
| 1249 | Queue(SF(this)) {} |
| 1250 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1251 | virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1252 | std::vector<SUnit> &sunits) {} |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1253 | |
| 1254 | virtual void addNode(const SUnit *SU) {} |
| 1255 | |
| 1256 | virtual void updateNode(const SUnit *SU) {} |
| 1257 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1258 | virtual void releaseState() {} |
| 1259 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1260 | virtual unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1261 | return 0; |
| 1262 | } |
| 1263 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1264 | unsigned size() const { return Queue.size(); } |
| 1265 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1266 | bool empty() const { return Queue.empty(); } |
| 1267 | |
| 1268 | void push(SUnit *U) { |
| 1269 | Queue.push(U); |
| 1270 | } |
| 1271 | void push_all(const std::vector<SUnit *> &Nodes) { |
| 1272 | for (unsigned i = 0, e = Nodes.size(); i != e; ++i) |
| 1273 | Queue.push(Nodes[i]); |
| 1274 | } |
| 1275 | |
| 1276 | SUnit *pop() { |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1277 | if (empty()) return NULL; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1278 | SUnit *V = Queue.top(); |
| 1279 | Queue.pop(); |
| 1280 | return V; |
| 1281 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1282 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1283 | /// remove - This is a really inefficient way to remove a node from a |
| 1284 | /// priority queue. We should roll our own heap to make this better or |
| 1285 | /// something. |
| 1286 | void remove(SUnit *SU) { |
| 1287 | std::vector<SUnit*> Temp; |
| 1288 | |
| 1289 | assert(!Queue.empty() && "Not in queue!"); |
| 1290 | while (Queue.top() != SU) { |
| 1291 | Temp.push_back(Queue.top()); |
| 1292 | Queue.pop(); |
| 1293 | assert(!Queue.empty() && "Not in queue!"); |
| 1294 | } |
| 1295 | |
| 1296 | // Remove the node from the PQ. |
| 1297 | Queue.pop(); |
| 1298 | |
| 1299 | // Add all the other nodes back. |
| 1300 | for (unsigned i = 0, e = Temp.size(); i != e; ++i) |
| 1301 | Queue.push(Temp[i]); |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1302 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1303 | }; |
| 1304 | |
| 1305 | template<class SF> |
Chris Lattner | 996795b | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 1306 | class VISIBILITY_HIDDEN BURegReductionPriorityQueue |
| 1307 | : public RegReductionPriorityQueue<SF> { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1308 | // SUnitMap SDNode to SUnit mapping (n -> n). |
| 1309 | DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1310 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1311 | // SUnits - The SUnits for the current graph. |
| 1312 | const std::vector<SUnit> *SUnits; |
| 1313 | |
| 1314 | // SethiUllmanNumbers - The SethiUllman number for each node. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1315 | std::vector<unsigned> SethiUllmanNumbers; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1316 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1317 | const TargetInstrInfo *TII; |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1318 | const TargetRegisterInfo *TRI; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1319 | ScheduleDAGRRList *scheduleDAG; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1320 | public: |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1321 | explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii, |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1322 | const TargetRegisterInfo *tri) |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1323 | : TII(tii), TRI(tri), scheduleDAG(NULL) {} |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1324 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1325 | void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1326 | std::vector<SUnit> &sunits) { |
| 1327 | SUnitMap = &sumap; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1328 | SUnits = &sunits; |
| 1329 | // Add pseudo dependency edges for two-address nodes. |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 1330 | AddPseudoTwoAddrDeps(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1331 | // Calculate node priorities. |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1332 | CalculateSethiUllmanNumbers(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1333 | } |
| 1334 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1335 | void addNode(const SUnit *SU) { |
| 1336 | SethiUllmanNumbers.resize(SUnits->size(), 0); |
| 1337 | CalcNodeSethiUllmanNumber(SU); |
| 1338 | } |
| 1339 | |
| 1340 | void updateNode(const SUnit *SU) { |
| 1341 | SethiUllmanNumbers[SU->NodeNum] = 0; |
| 1342 | CalcNodeSethiUllmanNumber(SU); |
| 1343 | } |
| 1344 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1345 | void releaseState() { |
| 1346 | SUnits = 0; |
| 1347 | SethiUllmanNumbers.clear(); |
| 1348 | } |
| 1349 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1350 | unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1351 | assert(SU->NodeNum < SethiUllmanNumbers.size()); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1352 | unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1353 | if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU)) |
| 1354 | // CopyFromReg should be close to its def because it restricts |
| 1355 | // allocation choices. But if it is a livein then perhaps we want it |
| 1356 | // closer to its uses so it can be coalesced. |
| 1357 | return 0xffff; |
| 1358 | else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
| 1359 | // CopyToReg should be close to its uses to facilitate coalescing and |
| 1360 | // avoid spilling. |
| 1361 | return 0; |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1362 | else if (Opc == TargetInstrInfo::EXTRACT_SUBREG || |
| 1363 | Opc == TargetInstrInfo::INSERT_SUBREG) |
| 1364 | // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to |
| 1365 | // facilitate coalescing. |
| 1366 | return 0; |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1367 | else if (SU->NumSuccs == 0) |
| 1368 | // If SU does not have a use, i.e. it doesn't produce a value that would |
| 1369 | // be consumed (e.g. store), then it terminates a chain of computation. |
| 1370 | // Give it a large SethiUllman number so it will be scheduled right |
| 1371 | // before its predecessors that it doesn't lengthen their live ranges. |
| 1372 | return 0xffff; |
| 1373 | else if (SU->NumPreds == 0) |
| 1374 | // If SU does not have a def, schedule it close to its uses because it |
| 1375 | // does not lengthen any live ranges. |
| 1376 | return 0; |
| 1377 | else |
| 1378 | return SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1379 | } |
| 1380 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1381 | void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { |
| 1382 | scheduleDAG = scheduleDag; |
| 1383 | } |
| 1384 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1385 | private: |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1386 | bool canClobber(const SUnit *SU, const SUnit *Op); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1387 | void AddPseudoTwoAddrDeps(); |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1388 | void CalculateSethiUllmanNumbers(); |
| 1389 | unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1390 | }; |
| 1391 | |
| 1392 | |
| 1393 | template<class SF> |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1394 | class VISIBILITY_HIDDEN TDRegReductionPriorityQueue |
| 1395 | : public RegReductionPriorityQueue<SF> { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1396 | // SUnitMap SDNode to SUnit mapping (n -> n). |
| 1397 | DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1398 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1399 | // SUnits - The SUnits for the current graph. |
| 1400 | const std::vector<SUnit> *SUnits; |
| 1401 | |
| 1402 | // SethiUllmanNumbers - The SethiUllman number for each node. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1403 | std::vector<unsigned> SethiUllmanNumbers; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1404 | |
| 1405 | public: |
| 1406 | TDRegReductionPriorityQueue() {} |
| 1407 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1408 | void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1409 | std::vector<SUnit> &sunits) { |
| 1410 | SUnitMap = &sumap; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1411 | SUnits = &sunits; |
| 1412 | // Calculate node priorities. |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1413 | CalculateSethiUllmanNumbers(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1414 | } |
| 1415 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1416 | void addNode(const SUnit *SU) { |
| 1417 | SethiUllmanNumbers.resize(SUnits->size(), 0); |
| 1418 | CalcNodeSethiUllmanNumber(SU); |
| 1419 | } |
| 1420 | |
| 1421 | void updateNode(const SUnit *SU) { |
| 1422 | SethiUllmanNumbers[SU->NodeNum] = 0; |
| 1423 | CalcNodeSethiUllmanNumber(SU); |
| 1424 | } |
| 1425 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1426 | void releaseState() { |
| 1427 | SUnits = 0; |
| 1428 | SethiUllmanNumbers.clear(); |
| 1429 | } |
| 1430 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1431 | unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1432 | assert(SU->NodeNum < SethiUllmanNumbers.size()); |
| 1433 | return SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1434 | } |
| 1435 | |
| 1436 | private: |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1437 | void CalculateSethiUllmanNumbers(); |
| 1438 | unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1439 | }; |
| 1440 | } |
| 1441 | |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1442 | /// closestSucc - Returns the scheduled cycle of the successor which is |
| 1443 | /// closet to the current cycle. |
Evan Cheng | 2874855 | 2007-03-13 23:25:11 +0000 | [diff] [blame] | 1444 | static unsigned closestSucc(const SUnit *SU) { |
| 1445 | unsigned MaxCycle = 0; |
| 1446 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1447 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1448 | unsigned Cycle = I->Dep->Cycle; |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1449 | // If there are bunch of CopyToRegs stacked up, they should be considered |
| 1450 | // to be at the same position. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1451 | if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg) |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1452 | Cycle = closestSucc(I->Dep)+1; |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1453 | if (Cycle > MaxCycle) |
| 1454 | MaxCycle = Cycle; |
| 1455 | } |
Evan Cheng | 2874855 | 2007-03-13 23:25:11 +0000 | [diff] [blame] | 1456 | return MaxCycle; |
| 1457 | } |
| 1458 | |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1459 | /// calcMaxScratches - Returns an cost estimate of the worse case requirement |
| 1460 | /// for scratch registers. Live-in operands and live-out results don't count |
| 1461 | /// since they are "fixed". |
| 1462 | static unsigned calcMaxScratches(const SUnit *SU) { |
| 1463 | unsigned Scratches = 0; |
| 1464 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1465 | I != E; ++I) { |
| 1466 | if (I->isCtrl) continue; // ignore chain preds |
Evan Cheng | 0e400d4 | 2008-01-09 23:01:55 +0000 | [diff] [blame] | 1467 | if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg) |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1468 | Scratches++; |
| 1469 | } |
| 1470 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1471 | I != E; ++I) { |
| 1472 | if (I->isCtrl) continue; // ignore chain succs |
Evan Cheng | 0e400d4 | 2008-01-09 23:01:55 +0000 | [diff] [blame] | 1473 | if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg) |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1474 | Scratches += 10; |
| 1475 | } |
| 1476 | return Scratches; |
| 1477 | } |
| 1478 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1479 | // Bottom up |
| 1480 | bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { |
David Greene | 4c1e6f3 | 2007-06-29 03:42:23 +0000 | [diff] [blame] | 1481 | // There used to be a special tie breaker here that looked for |
David Greene | 5b6f755 | 2007-06-29 02:48:09 +0000 | [diff] [blame] | 1482 | // two-address instructions and preferred the instruction with a |
| 1483 | // def&use operand. The special case triggered diagnostics when |
| 1484 | // _GLIBCXX_DEBUG was enabled because it broke the strict weak |
| 1485 | // ordering that priority_queue requires. It didn't help much anyway |
| 1486 | // because AddPseudoTwoAddrDeps already covers many of the cases |
| 1487 | // where it would have applied. In addition, it's counter-intuitive |
| 1488 | // that a tie breaker would be the first thing attempted. There's a |
| 1489 | // "real" tie breaker below that is the operation of last resort. |
| 1490 | // The fact that the "special tie breaker" would trigger when there |
| 1491 | // wasn't otherwise a tie is what broke the strict weak ordering |
| 1492 | // constraint. |
Evan Cheng | 99f2f79 | 2006-05-13 08:22:24 +0000 | [diff] [blame] | 1493 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1494 | unsigned LPriority = SPQ->getNodePriority(left); |
| 1495 | unsigned RPriority = SPQ->getNodePriority(right); |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1496 | if (LPriority != RPriority) |
| 1497 | return LPriority > RPriority; |
| 1498 | |
| 1499 | // Try schedule def + use closer when Sethi-Ullman numbers are the same. |
| 1500 | // e.g. |
| 1501 | // t1 = op t2, c1 |
| 1502 | // t3 = op t4, c2 |
| 1503 | // |
| 1504 | // and the following instructions are both ready. |
| 1505 | // t2 = op c3 |
| 1506 | // t4 = op c4 |
| 1507 | // |
| 1508 | // Then schedule t2 = op first. |
| 1509 | // i.e. |
| 1510 | // t4 = op c4 |
| 1511 | // t2 = op c3 |
| 1512 | // t1 = op t2, c1 |
| 1513 | // t3 = op t4, c2 |
| 1514 | // |
| 1515 | // This creates more short live intervals. |
| 1516 | unsigned LDist = closestSucc(left); |
| 1517 | unsigned RDist = closestSucc(right); |
| 1518 | if (LDist != RDist) |
| 1519 | return LDist < RDist; |
| 1520 | |
| 1521 | // Intuitively, it's good to push down instructions whose results are |
| 1522 | // liveout so their long live ranges won't conflict with other values |
| 1523 | // which are needed inside the BB. Further prioritize liveout instructions |
| 1524 | // by the number of operands which are calculated within the BB. |
| 1525 | unsigned LScratch = calcMaxScratches(left); |
| 1526 | unsigned RScratch = calcMaxScratches(right); |
| 1527 | if (LScratch != RScratch) |
| 1528 | return LScratch > RScratch; |
| 1529 | |
| 1530 | if (left->Height != right->Height) |
| 1531 | return left->Height > right->Height; |
| 1532 | |
| 1533 | if (left->Depth != right->Depth) |
| 1534 | return left->Depth < right->Depth; |
| 1535 | |
| 1536 | if (left->CycleBound != right->CycleBound) |
| 1537 | return left->CycleBound > right->CycleBound; |
| 1538 | |
| 1539 | // FIXME: No strict ordering. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1540 | return false; |
| 1541 | } |
| 1542 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1543 | template<class SF> bool |
| 1544 | BURegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1545 | if (SU->isTwoAddress) { |
| 1546 | unsigned Opc = SU->Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 1547 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1548 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 1549 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1550 | for (unsigned i = 0; i != NumOps; ++i) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1551 | if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1552 | SDNode *DU = SU->Node->getOperand(i).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 1553 | if ((*SUnitMap).find(DU) != (*SUnitMap).end() && |
| 1554 | Op == (*SUnitMap)[DU][SU->InstanceNo]) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1555 | return true; |
| 1556 | } |
| 1557 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1558 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1559 | return false; |
| 1560 | } |
| 1561 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1562 | |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1563 | /// hasCopyToRegUse - Return true if SU has a value successor that is a |
| 1564 | /// CopyToReg node. |
| 1565 | static bool hasCopyToRegUse(SUnit *SU) { |
| 1566 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1567 | I != E; ++I) { |
| 1568 | if (I->isCtrl) continue; |
| 1569 | SUnit *SuccSU = I->Dep; |
| 1570 | if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg) |
| 1571 | return true; |
| 1572 | } |
| 1573 | return false; |
| 1574 | } |
| 1575 | |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1576 | /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's |
| 1577 | /// physical register def. |
| 1578 | static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU, |
| 1579 | const TargetInstrInfo *TII, |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1580 | const TargetRegisterInfo *TRI) { |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1581 | SDNode *N = SuccSU->Node; |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 1582 | unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs(); |
| 1583 | const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs(); |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1584 | if (!ImpDefs) |
| 1585 | return false; |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 1586 | const unsigned *SUImpDefs = |
| 1587 | TII->get(SU->Node->getTargetOpcode()).getImplicitDefs(); |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1588 | if (!SUImpDefs) |
| 1589 | return false; |
| 1590 | for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { |
| 1591 | MVT::ValueType VT = N->getValueType(i); |
| 1592 | if (VT == MVT::Flag || VT == MVT::Other) |
| 1593 | continue; |
| 1594 | unsigned Reg = ImpDefs[i - NumDefs]; |
| 1595 | for (;*SUImpDefs; ++SUImpDefs) { |
| 1596 | unsigned SUReg = *SUImpDefs; |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1597 | if (TRI->regsOverlap(Reg, SUReg)) |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1598 | return true; |
| 1599 | } |
| 1600 | } |
| 1601 | return false; |
| 1602 | } |
| 1603 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1604 | /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses |
| 1605 | /// it as a def&use operand. Add a pseudo control edge from it to the other |
| 1606 | /// node (if it won't create a cycle) so the two-address one will be scheduled |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1607 | /// first (lower in the schedule). If both nodes are two-address, favor the |
| 1608 | /// one that has a CopyToReg use (more likely to be a loop induction update). |
| 1609 | /// If both are two-address, but one is commutable while the other is not |
| 1610 | /// commutable, favor the one that's not commutable. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1611 | template<class SF> |
| 1612 | void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1613 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { |
| 1614 | SUnit *SU = (SUnit *)&((*SUnits)[i]); |
| 1615 | if (!SU->isTwoAddress) |
| 1616 | continue; |
| 1617 | |
| 1618 | SDNode *Node = SU->Node; |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1619 | if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1620 | continue; |
| 1621 | |
| 1622 | unsigned Opc = Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 1623 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1624 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 1625 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1626 | for (unsigned j = 0; j != NumOps; ++j) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1627 | if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1628 | SDNode *DU = SU->Node->getOperand(j).Val; |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 1629 | if ((*SUnitMap).find(DU) == (*SUnitMap).end()) |
| 1630 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1631 | SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo]; |
Evan Cheng | f24d15f | 2006-11-06 21:33:46 +0000 | [diff] [blame] | 1632 | if (!DUSU) continue; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1633 | for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end(); |
| 1634 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1635 | if (I->isCtrl) continue; |
| 1636 | SUnit *SuccSU = I->Dep; |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1637 | if (SuccSU == SU) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1638 | continue; |
Evan Cheng | 2dbffa4 | 2007-11-06 08:44:59 +0000 | [diff] [blame] | 1639 | // Be conservative. Ignore if nodes aren't at roughly the same |
| 1640 | // depth and height. |
| 1641 | if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1) |
| 1642 | continue; |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1643 | if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode()) |
| 1644 | continue; |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1645 | // Don't constrain nodes with physical register defs if the |
Dan Gohman | cf8827a | 2008-01-29 12:43:50 +0000 | [diff] [blame] | 1646 | // predecessor can clobber them. |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1647 | if (SuccSU->hasPhysRegDefs) { |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1648 | if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1649 | continue; |
| 1650 | } |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1651 | // Don't constraint extract_subreg / insert_subreg these may be |
| 1652 | // coalesced away. We don't them close to their uses. |
| 1653 | unsigned SuccOpc = SuccSU->Node->getTargetOpcode(); |
| 1654 | if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || |
| 1655 | SuccOpc == TargetInstrInfo::INSERT_SUBREG) |
| 1656 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1657 | if ((!canClobber(SuccSU, DUSU) || |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1658 | (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1659 | (!SU->isCommutable && SuccSU->isCommutable)) && |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1660 | !scheduleDAG->IsReachable(SuccSU, SU)) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1661 | DOUT << "Adding an edge from SU # " << SU->NodeNum |
| 1662 | << " to SU #" << SuccSU->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1663 | scheduleDAG->AddPred(SU, SuccSU, true, true); |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1664 | } |
| 1665 | } |
| 1666 | } |
| 1667 | } |
| 1668 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1669 | } |
| 1670 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1671 | /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1672 | /// Smaller number is the higher priority. |
| 1673 | template<class SF> |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1674 | unsigned BURegReductionPriorityQueue<SF>:: |
| 1675 | CalcNodeSethiUllmanNumber(const SUnit *SU) { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1676 | unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1677 | if (SethiUllmanNumber != 0) |
| 1678 | return SethiUllmanNumber; |
| 1679 | |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1680 | unsigned Extra = 0; |
| 1681 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1682 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1683 | if (I->isCtrl) continue; // ignore chain preds |
| 1684 | SUnit *PredSU = I->Dep; |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1685 | unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1686 | if (PredSethiUllman > SethiUllmanNumber) { |
| 1687 | SethiUllmanNumber = PredSethiUllman; |
| 1688 | Extra = 0; |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1689 | } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1690 | ++Extra; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1691 | } |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1692 | |
| 1693 | SethiUllmanNumber += Extra; |
| 1694 | |
| 1695 | if (SethiUllmanNumber == 0) |
| 1696 | SethiUllmanNumber = 1; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1697 | |
| 1698 | return SethiUllmanNumber; |
| 1699 | } |
| 1700 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1701 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
| 1702 | /// scheduling units. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1703 | template<class SF> |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1704 | void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1705 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
| 1706 | |
| 1707 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1708 | CalcNodeSethiUllmanNumber(&(*SUnits)[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1709 | } |
| 1710 | |
| 1711 | static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) { |
| 1712 | unsigned Sum = 0; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1713 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1714 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1715 | SUnit *SuccSU = I->Dep; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1716 | for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), |
| 1717 | EE = SuccSU->Preds.end(); II != EE; ++II) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1718 | SUnit *PredSU = II->Dep; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1719 | if (!PredSU->isScheduled) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1720 | ++Sum; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1721 | } |
| 1722 | } |
| 1723 | |
| 1724 | return Sum; |
| 1725 | } |
| 1726 | |
Roman Levenstein | 30d0951 | 2008-03-27 09:44:37 +0000 | [diff] [blame^] | 1727 | /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled |
Roman Levenstein | bc67450 | 2008-03-27 09:14:57 +0000 | [diff] [blame] | 1728 | /// predecessors of the successors of the SUnit SU. Stop when the provided |
| 1729 | /// limit is exceeded. |
| 1730 | |
| 1731 | static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU, |
| 1732 | unsigned Limit) { |
| 1733 | unsigned Sum = 0; |
| 1734 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1735 | I != E; ++I) { |
| 1736 | SUnit *SuccSU = I->Dep; |
| 1737 | for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), |
| 1738 | EE = SuccSU->Preds.end(); II != EE; ++II) { |
| 1739 | SUnit *PredSU = II->Dep; |
| 1740 | if (!PredSU->isScheduled) { |
| 1741 | ++Sum; |
| 1742 | if(Sum > Limit) |
| 1743 | return Sum; |
| 1744 | } |
| 1745 | } |
| 1746 | } |
| 1747 | return Sum; |
| 1748 | } |
| 1749 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1750 | |
| 1751 | // Top down |
| 1752 | bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1753 | unsigned LPriority = SPQ->getNodePriority(left); |
| 1754 | unsigned RPriority = SPQ->getNodePriority(right); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1755 | bool LIsTarget = left->Node && left->Node->isTargetOpcode(); |
| 1756 | bool RIsTarget = right->Node && right->Node->isTargetOpcode(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1757 | bool LIsFloater = LIsTarget && left->NumPreds == 0; |
| 1758 | bool RIsFloater = RIsTarget && right->NumPreds == 0; |
Roman Levenstein | bc67450 | 2008-03-27 09:14:57 +0000 | [diff] [blame] | 1759 | unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0; |
| 1760 | unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1761 | |
| 1762 | if (left->NumSuccs == 0 && right->NumSuccs != 0) |
| 1763 | return false; |
| 1764 | else if (left->NumSuccs != 0 && right->NumSuccs == 0) |
| 1765 | return true; |
| 1766 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1767 | if (LIsFloater) |
| 1768 | LBonus -= 2; |
| 1769 | if (RIsFloater) |
| 1770 | RBonus -= 2; |
| 1771 | if (left->NumSuccs == 1) |
| 1772 | LBonus += 2; |
| 1773 | if (right->NumSuccs == 1) |
| 1774 | RBonus += 2; |
| 1775 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1776 | if (LPriority+LBonus != RPriority+RBonus) |
| 1777 | return LPriority+LBonus < RPriority+RBonus; |
Anton Korobeynikov | 035eaac | 2008-02-20 11:10:28 +0000 | [diff] [blame] | 1778 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1779 | if (left->Depth != right->Depth) |
| 1780 | return left->Depth < right->Depth; |
| 1781 | |
| 1782 | if (left->NumSuccsLeft != right->NumSuccsLeft) |
| 1783 | return left->NumSuccsLeft > right->NumSuccsLeft; |
| 1784 | |
| 1785 | if (left->CycleBound != right->CycleBound) |
| 1786 | return left->CycleBound > right->CycleBound; |
| 1787 | |
| 1788 | // FIXME: No strict ordering. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1789 | return false; |
| 1790 | } |
| 1791 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1792 | /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1793 | /// Smaller number is the higher priority. |
| 1794 | template<class SF> |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1795 | unsigned TDRegReductionPriorityQueue<SF>:: |
| 1796 | CalcNodeSethiUllmanNumber(const SUnit *SU) { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1797 | unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1798 | if (SethiUllmanNumber != 0) |
| 1799 | return SethiUllmanNumber; |
| 1800 | |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1801 | unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1802 | if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1803 | SethiUllmanNumber = 0xffff; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1804 | else if (SU->NumSuccsLeft == 0) |
| 1805 | // If SU does not have a use, i.e. it doesn't produce a value that would |
| 1806 | // be consumed (e.g. store), then it terminates a chain of computation. |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1807 | // Give it a small SethiUllman number so it will be scheduled right before |
| 1808 | // its predecessors that it doesn't lengthen their live ranges. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1809 | SethiUllmanNumber = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1810 | else if (SU->NumPredsLeft == 0 && |
| 1811 | (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU))) |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1812 | SethiUllmanNumber = 0xffff; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1813 | else { |
| 1814 | int Extra = 0; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1815 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1816 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1817 | if (I->isCtrl) continue; // ignore chain preds |
| 1818 | SUnit *PredSU = I->Dep; |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1819 | unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1820 | if (PredSethiUllman > SethiUllmanNumber) { |
| 1821 | SethiUllmanNumber = PredSethiUllman; |
| 1822 | Extra = 0; |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1823 | } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1824 | ++Extra; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1825 | } |
| 1826 | |
| 1827 | SethiUllmanNumber += Extra; |
| 1828 | } |
| 1829 | |
| 1830 | return SethiUllmanNumber; |
| 1831 | } |
| 1832 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1833 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
| 1834 | /// scheduling units. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1835 | template<class SF> |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1836 | void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1837 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
| 1838 | |
| 1839 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1840 | CalcNodeSethiUllmanNumber(&(*SUnits)[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1841 | } |
| 1842 | |
| 1843 | //===----------------------------------------------------------------------===// |
| 1844 | // Public Constructor Functions |
| 1845 | //===----------------------------------------------------------------------===// |
| 1846 | |
Jim Laskey | 03593f7 | 2006-08-01 18:29:48 +0000 | [diff] [blame] | 1847 | llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, |
| 1848 | SelectionDAG *DAG, |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1849 | MachineBasicBlock *BB) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1850 | const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo(); |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1851 | const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1852 | |
| 1853 | BURegReductionPriorityQueue<bu_ls_rr_sort> *priorityQueue = |
| 1854 | new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII, TRI); |
| 1855 | |
| 1856 | ScheduleDAGRRList * scheduleDAG = |
| 1857 | new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, priorityQueue); |
| 1858 | priorityQueue->setScheduleDAG(scheduleDAG); |
| 1859 | return scheduleDAG; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1860 | } |
| 1861 | |
Jim Laskey | 03593f7 | 2006-08-01 18:29:48 +0000 | [diff] [blame] | 1862 | llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, |
| 1863 | SelectionDAG *DAG, |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1864 | MachineBasicBlock *BB) { |
Jim Laskey | 95eda5b | 2006-08-01 14:21:23 +0000 | [diff] [blame] | 1865 | return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1866 | new TDRegReductionPriorityQueue<td_ls_rr_sort>()); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1867 | } |
| 1868 | |