Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1 | //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
Chris Lattner | f3ebc3f | 2007-12-29 20:36:04 +0000 | [diff] [blame] | 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This implements bottom-up and top-down register pressure reduction list |
| 11 | // schedulers, using standard algorithms. The basic approach uses a priority |
| 12 | // queue of available nodes to schedule. One at a time, nodes are taken from |
| 13 | // the priority queue (thus in priority order), checked for legality to |
| 14 | // schedule, and emitted if legal. |
| 15 | // |
| 16 | //===----------------------------------------------------------------------===// |
| 17 | |
Dale Johannesen | 2182f06 | 2007-07-13 17:13:54 +0000 | [diff] [blame] | 18 | #define DEBUG_TYPE "pre-RA-sched" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 19 | #include "llvm/CodeGen/ScheduleDAG.h" |
Jim Laskey | 29e635d | 2006-08-02 12:30:23 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/SchedulerRegistry.h" |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 21 | #include "llvm/Target/TargetRegisterInfo.h" |
Owen Anderson | 8c2c1e9 | 2006-05-12 06:33:49 +0000 | [diff] [blame] | 22 | #include "llvm/Target/TargetData.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 23 | #include "llvm/Target/TargetMachine.h" |
| 24 | #include "llvm/Target/TargetInstrInfo.h" |
| 25 | #include "llvm/Support/Debug.h" |
Chris Lattner | 3d27be1 | 2006-08-27 12:54:02 +0000 | [diff] [blame] | 26 | #include "llvm/Support/Compiler.h" |
Dan Gohman | a4db335 | 2008-06-21 18:35:25 +0000 | [diff] [blame] | 27 | #include "llvm/ADT/BitVector.h" |
| 28 | #include "llvm/ADT/PriorityQueue.h" |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 29 | #include "llvm/ADT/SmallPtrSet.h" |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 30 | #include "llvm/ADT/SmallSet.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 31 | #include "llvm/ADT/Statistic.h" |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 32 | #include "llvm/ADT/STLExtras.h" |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 33 | #include <climits> |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 34 | #include "llvm/Support/CommandLine.h" |
| 35 | using namespace llvm; |
| 36 | |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 37 | STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 38 | STATISTIC(NumUnfolds, "Number of nodes unfolded"); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 39 | STATISTIC(NumDups, "Number of duplicated nodes"); |
| 40 | STATISTIC(NumCCCopies, "Number of cross class copies"); |
| 41 | |
Jim Laskey | 95eda5b | 2006-08-01 14:21:23 +0000 | [diff] [blame] | 42 | static RegisterScheduler |
| 43 | burrListDAGScheduler("list-burr", |
| 44 | " Bottom-up register reduction list scheduling", |
| 45 | createBURRListDAGScheduler); |
| 46 | static RegisterScheduler |
| 47 | tdrListrDAGScheduler("list-tdrr", |
| 48 | " Top-down register reduction list scheduling", |
| 49 | createTDRRListDAGScheduler); |
| 50 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 51 | namespace { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 52 | //===----------------------------------------------------------------------===// |
| 53 | /// ScheduleDAGRRList - The actual register reduction list scheduler |
| 54 | /// implementation. This supports both top-down and bottom-up scheduling. |
| 55 | /// |
Chris Lattner | e097e6f | 2006-06-28 22:17:39 +0000 | [diff] [blame] | 56 | class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 57 | private: |
| 58 | /// isBottomUp - This is true if the scheduling problem is bottom-up, false if |
| 59 | /// it is top-down. |
| 60 | bool isBottomUp; |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 61 | |
| 62 | bool Fast; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 63 | |
| 64 | /// AvailableQueue - The priority queue to use for the available SUnits. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 65 | SchedulingPriorityQueue *AvailableQueue; |
| 66 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 67 | /// LiveRegs / LiveRegDefs - A set of physical registers and their definition |
| 68 | /// that are "live". These nodes must be scheduled before any other nodes that |
| 69 | /// modifies the registers can be scheduled. |
| 70 | SmallSet<unsigned, 4> LiveRegs; |
| 71 | std::vector<SUnit*> LiveRegDefs; |
| 72 | std::vector<unsigned> LiveRegCycles; |
| 73 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 74 | public: |
| 75 | ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb, |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 76 | const TargetMachine &tm, bool isbottomup, bool f, |
| 77 | SchedulingPriorityQueue *availqueue) |
| 78 | : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f), |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 79 | AvailableQueue(availqueue) { |
| 80 | } |
| 81 | |
| 82 | ~ScheduleDAGRRList() { |
| 83 | delete AvailableQueue; |
| 84 | } |
| 85 | |
| 86 | void Schedule(); |
| 87 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 88 | /// IsReachable - Checks if SU is reachable from TargetSU. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 89 | bool IsReachable(SUnit *SU, SUnit *TargetSU); |
| 90 | |
| 91 | /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will |
| 92 | /// create a cycle. |
| 93 | bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); |
| 94 | |
| 95 | /// AddPred - This adds the specified node X as a predecessor of |
| 96 | /// the current node Y if not already. |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 97 | /// This returns true if this is a new predecessor. |
| 98 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 99 | bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 100 | unsigned PhyReg = 0, int Cost = 1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 101 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 102 | /// RemovePred - This removes the specified node N from the predecessors of |
| 103 | /// the current node M. Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 104 | bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial); |
| 105 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 106 | private: |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 107 | void ReleasePred(SUnit*, bool, unsigned); |
| 108 | void ReleaseSucc(SUnit*, bool isChain, unsigned); |
| 109 | void CapturePred(SUnit*, SUnit*, bool); |
| 110 | void ScheduleNodeBottomUp(SUnit*, unsigned); |
| 111 | void ScheduleNodeTopDown(SUnit*, unsigned); |
| 112 | void UnscheduleNodeBottomUp(SUnit*); |
| 113 | void BacktrackBottomUp(SUnit*, unsigned, unsigned&); |
| 114 | SUnit *CopyAndMoveSuccessors(SUnit*); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 115 | void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned, |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 116 | const TargetRegisterClass*, |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 117 | const TargetRegisterClass*, |
| 118 | SmallVector<SUnit*, 2>&); |
| 119 | bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 120 | void ListScheduleTopDown(); |
| 121 | void ListScheduleBottomUp(); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 122 | void CommuteNodesToReducePressure(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 123 | |
| 124 | |
| 125 | /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 126 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 127 | SUnit *CreateNewSUnit(SDNode *N) { |
| 128 | SUnit *NewNode = NewSUnit(N); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 129 | // Update the topological ordering. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 130 | if (NewNode->NodeNum >= Node2Index.size()) |
| 131 | InitDAGTopologicalSorting(); |
| 132 | return NewNode; |
| 133 | } |
| 134 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 135 | /// CreateClone - Creates a new SUnit from an existing one. |
| 136 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 137 | SUnit *CreateClone(SUnit *N) { |
| 138 | SUnit *NewNode = Clone(N); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 139 | // Update the topological ordering. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 140 | if (NewNode->NodeNum >= Node2Index.size()) |
| 141 | InitDAGTopologicalSorting(); |
| 142 | return NewNode; |
| 143 | } |
| 144 | |
| 145 | /// Functions for preserving the topological ordering |
| 146 | /// even after dynamic insertions of new edges. |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 147 | /// This allows a very fast implementation of IsReachable. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 148 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 149 | /// InitDAGTopologicalSorting - create the initial topological |
| 150 | /// ordering from the DAG to be scheduled. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 151 | void InitDAGTopologicalSorting(); |
| 152 | |
| 153 | /// DFS - make a DFS traversal and mark all nodes affected by the |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 154 | /// edge insertion. These nodes will later get new topological indexes |
| 155 | /// by means of the Shift method. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 156 | void DFS(SUnit *SU, int UpperBound, bool& HasLoop); |
| 157 | |
| 158 | /// Shift - reassign topological indexes for the nodes in the DAG |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 159 | /// to preserve the topological ordering. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 160 | void Shift(BitVector& Visited, int LowerBound, int UpperBound); |
| 161 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 162 | /// Allocate - assign the topological index to the node n. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 163 | void Allocate(int n, int index); |
| 164 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 165 | /// Index2Node - Maps topological index to the node number. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 166 | std::vector<int> Index2Node; |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 167 | /// Node2Index - Maps the node number to its topological index. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 168 | std::vector<int> Node2Index; |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 169 | /// Visited - a set of nodes visited during a DFS traversal. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 170 | BitVector Visited; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 171 | }; |
| 172 | } // end anonymous namespace |
| 173 | |
| 174 | |
| 175 | /// Schedule - Schedule the DAG using list scheduling. |
| 176 | void ScheduleDAGRRList::Schedule() { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 177 | DOUT << "********** List Scheduling **********\n"; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 178 | |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 179 | LiveRegDefs.resize(TRI->getNumRegs(), NULL); |
| 180 | LiveRegCycles.resize(TRI->getNumRegs(), 0); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 181 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 182 | // Build scheduling units. |
| 183 | BuildSchedUnits(); |
| 184 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 185 | DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 186 | SUnits[su].dumpAll(&DAG)); |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 187 | if (!Fast) { |
| 188 | CalculateDepths(); |
| 189 | CalculateHeights(); |
| 190 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 191 | InitDAGTopologicalSorting(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 192 | |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 193 | AvailableQueue->initNodes(SUnits); |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 194 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 195 | // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate. |
| 196 | if (isBottomUp) |
| 197 | ListScheduleBottomUp(); |
| 198 | else |
| 199 | ListScheduleTopDown(); |
| 200 | |
| 201 | AvailableQueue->releaseState(); |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 202 | |
| 203 | if (!Fast) |
| 204 | CommuteNodesToReducePressure(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 205 | |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 206 | DOUT << "*** Final schedule ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 207 | DEBUG(dumpSchedule()); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 208 | DOUT << "\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 209 | |
| 210 | // Emit in scheduled order |
| 211 | EmitSchedule(); |
| 212 | } |
| 213 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 214 | /// CommuteNodesToReducePressure - If a node is two-address and commutable, and |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 215 | /// it is not the last use of its first operand, add it to the CommuteSet if |
| 216 | /// possible. It will be commuted when it is translated to a MI. |
| 217 | void ScheduleDAGRRList::CommuteNodesToReducePressure() { |
Evan Cheng | e3c4419 | 2007-06-22 01:35:51 +0000 | [diff] [blame] | 218 | SmallPtrSet<SUnit*, 4> OperandSeen; |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 219 | for (unsigned i = Sequence.size(); i != 0; ) { |
| 220 | --i; |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 221 | SUnit *SU = Sequence[i]; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 222 | if (!SU || !SU->Node) continue; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 223 | if (SU->isCommutable) { |
| 224 | unsigned Opc = SU->Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 225 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 226 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 227 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 228 | for (unsigned j = 0; j != NumOps; ++j) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 229 | if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 230 | continue; |
| 231 | |
| 232 | SDNode *OpN = SU->Node->getOperand(j).Val; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 233 | SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()]; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 234 | if (OpSU && OperandSeen.count(OpSU) == 1) { |
| 235 | // Ok, so SU is not the last use of OpSU, but SU is two-address so |
| 236 | // it will clobber OpSU. Try to commute SU if no other source operands |
| 237 | // are live below. |
| 238 | bool DoCommute = true; |
| 239 | for (unsigned k = 0; k < NumOps; ++k) { |
| 240 | if (k != j) { |
| 241 | OpN = SU->Node->getOperand(k).Val; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 242 | OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()]; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 243 | if (OpSU && OperandSeen.count(OpSU) == 1) { |
| 244 | DoCommute = false; |
| 245 | break; |
| 246 | } |
| 247 | } |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 248 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 249 | if (DoCommute) |
| 250 | CommuteSet.insert(SU->Node); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 251 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 252 | |
| 253 | // Only look at the first use&def node for now. |
| 254 | break; |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 255 | } |
| 256 | } |
| 257 | |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 258 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 259 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 260 | if (!I->isCtrl) |
Dan Gohman | e6e1348 | 2008-06-21 15:52:51 +0000 | [diff] [blame] | 261 | OperandSeen.insert(I->Dep->OrigNode); |
Evan Cheng | afed73e | 2006-05-12 01:58:24 +0000 | [diff] [blame] | 262 | } |
| 263 | } |
| 264 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 265 | |
| 266 | //===----------------------------------------------------------------------===// |
| 267 | // Bottom-Up Scheduling |
| 268 | //===----------------------------------------------------------------------===// |
| 269 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 270 | /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 271 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 272 | void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, |
| 273 | unsigned CurCycle) { |
| 274 | // FIXME: the distance between two nodes is not always == the predecessor's |
| 275 | // latency. For example, the reader can very well read the register written |
| 276 | // by the predecessor later than the issue cycle. It also depends on the |
| 277 | // interrupt model (drain vs. freeze). |
| 278 | PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency); |
| 279 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 280 | --PredSU->NumSuccsLeft; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 281 | |
| 282 | #ifndef NDEBUG |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 283 | if (PredSU->NumSuccsLeft < 0) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 284 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 285 | PredSU->dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 286 | cerr << " has been released too many times!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 287 | assert(0); |
| 288 | } |
| 289 | #endif |
| 290 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 291 | if (PredSU->NumSuccsLeft == 0) { |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 292 | PredSU->isAvailable = true; |
| 293 | AvailableQueue->push(PredSU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 294 | } |
| 295 | } |
| 296 | |
| 297 | /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending |
| 298 | /// count of its predecessors. If a predecessor pending count is zero, add it to |
| 299 | /// the Available queue. |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 300 | void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 301 | DOUT << "*** Scheduling [" << CurCycle << "]: "; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 302 | DEBUG(SU->dump(&DAG)); |
| 303 | SU->Cycle = CurCycle; |
| 304 | |
| 305 | AvailableQueue->ScheduledNode(SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 306 | |
| 307 | // Bottom up: release predecessors |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 308 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 309 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 310 | ReleasePred(I->Dep, I->isCtrl, CurCycle); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 311 | if (I->Cost < 0) { |
| 312 | // This is a physical register dependency and it's impossible or |
| 313 | // expensive to copy the register. Make sure nothing that can |
| 314 | // clobber the register is scheduled between the predecessor and |
| 315 | // this node. |
| 316 | if (LiveRegs.insert(I->Reg)) { |
| 317 | LiveRegDefs[I->Reg] = I->Dep; |
| 318 | LiveRegCycles[I->Reg] = CurCycle; |
| 319 | } |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | // Release all the implicit physical register defs that are live. |
| 324 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 325 | I != E; ++I) { |
| 326 | if (I->Cost < 0) { |
| 327 | if (LiveRegCycles[I->Reg] == I->Dep->Cycle) { |
| 328 | LiveRegs.erase(I->Reg); |
| 329 | assert(LiveRegDefs[I->Reg] == SU && |
| 330 | "Physical register dependency violated?"); |
| 331 | LiveRegDefs[I->Reg] = NULL; |
| 332 | LiveRegCycles[I->Reg] = 0; |
| 333 | } |
| 334 | } |
| 335 | } |
| 336 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 337 | SU->isScheduled = true; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 338 | } |
| 339 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 340 | /// CapturePred - This does the opposite of ReleasePred. Since SU is being |
| 341 | /// unscheduled, incrcease the succ left count of its predecessors. Remove |
| 342 | /// them from AvailableQueue if necessary. |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 343 | void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) { |
| 344 | unsigned CycleBound = 0; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 345 | for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end(); |
| 346 | I != E; ++I) { |
| 347 | if (I->Dep == SU) |
| 348 | continue; |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 349 | CycleBound = std::max(CycleBound, |
| 350 | I->Dep->Cycle + PredSU->Latency); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 351 | } |
| 352 | |
| 353 | if (PredSU->isAvailable) { |
| 354 | PredSU->isAvailable = false; |
| 355 | if (!PredSU->isPending) |
| 356 | AvailableQueue->remove(PredSU); |
| 357 | } |
| 358 | |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 359 | PredSU->CycleBound = CycleBound; |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 360 | ++PredSU->NumSuccsLeft; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 361 | } |
| 362 | |
| 363 | /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and |
| 364 | /// its predecessor states to reflect the change. |
| 365 | void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { |
| 366 | DOUT << "*** Unscheduling [" << SU->Cycle << "]: "; |
| 367 | DEBUG(SU->dump(&DAG)); |
| 368 | |
| 369 | AvailableQueue->UnscheduledNode(SU); |
| 370 | |
| 371 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 372 | I != E; ++I) { |
| 373 | CapturePred(I->Dep, SU, I->isCtrl); |
| 374 | if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) { |
| 375 | LiveRegs.erase(I->Reg); |
| 376 | assert(LiveRegDefs[I->Reg] == I->Dep && |
| 377 | "Physical register dependency violated?"); |
| 378 | LiveRegDefs[I->Reg] = NULL; |
| 379 | LiveRegCycles[I->Reg] = 0; |
| 380 | } |
| 381 | } |
| 382 | |
| 383 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 384 | I != E; ++I) { |
| 385 | if (I->Cost < 0) { |
| 386 | if (LiveRegs.insert(I->Reg)) { |
| 387 | assert(!LiveRegDefs[I->Reg] && |
| 388 | "Physical register dependency violated?"); |
| 389 | LiveRegDefs[I->Reg] = SU; |
| 390 | } |
| 391 | if (I->Dep->Cycle < LiveRegCycles[I->Reg]) |
| 392 | LiveRegCycles[I->Reg] = I->Dep->Cycle; |
| 393 | } |
| 394 | } |
| 395 | |
| 396 | SU->Cycle = 0; |
| 397 | SU->isScheduled = false; |
| 398 | SU->isAvailable = true; |
| 399 | AvailableQueue->push(SU); |
| 400 | } |
| 401 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 402 | /// IsReachable - Checks if SU is reachable from TargetSU. |
| 403 | bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 404 | // If insertion of the edge SU->TargetSU would create a cycle |
| 405 | // then there is a path from TargetSU to SU. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 406 | int UpperBound, LowerBound; |
| 407 | LowerBound = Node2Index[TargetSU->NodeNum]; |
| 408 | UpperBound = Node2Index[SU->NodeNum]; |
| 409 | bool HasLoop = false; |
| 410 | // Is Ord(TargetSU) < Ord(SU) ? |
| 411 | if (LowerBound < UpperBound) { |
| 412 | Visited.reset(); |
| 413 | // There may be a path from TargetSU to SU. Check for it. |
| 414 | DFS(TargetSU, UpperBound, HasLoop); |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 415 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 416 | return HasLoop; |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 417 | } |
| 418 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 419 | /// Allocate - assign the topological index to the node n. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 420 | inline void ScheduleDAGRRList::Allocate(int n, int index) { |
| 421 | Node2Index[n] = index; |
| 422 | Index2Node[index] = n; |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 423 | } |
| 424 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 425 | /// InitDAGTopologicalSorting - create the initial topological |
| 426 | /// ordering from the DAG to be scheduled. |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 427 | |
| 428 | /// The idea of the algorithm is taken from |
| 429 | /// "Online algorithms for managing the topological order of |
| 430 | /// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly |
| 431 | /// This is the MNR algorithm, which was first introduced by |
| 432 | /// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in |
| 433 | /// "Maintaining a topological order under edge insertions". |
| 434 | /// |
| 435 | /// Short description of the algorithm: |
| 436 | /// |
| 437 | /// Topological ordering, ord, of a DAG maps each node to a topological |
| 438 | /// index so that for all edges X->Y it is the case that ord(X) < ord(Y). |
| 439 | /// |
| 440 | /// This means that if there is a path from the node X to the node Z, |
| 441 | /// then ord(X) < ord(Z). |
| 442 | /// |
| 443 | /// This property can be used to check for reachability of nodes: |
| 444 | /// if Z is reachable from X, then an insertion of the edge Z->X would |
| 445 | /// create a cycle. |
| 446 | /// |
| 447 | /// The algorithm first computes a topological ordering for the DAG by |
| 448 | /// initializing the Index2Node and Node2Index arrays and then tries to keep |
| 449 | /// the ordering up-to-date after edge insertions by reordering the DAG. |
| 450 | /// |
| 451 | /// On insertion of the edge X->Y, the algorithm first marks by calling DFS |
| 452 | /// the nodes reachable from Y, and then shifts them using Shift to lie |
| 453 | /// immediately after X in Index2Node. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 454 | void ScheduleDAGRRList::InitDAGTopologicalSorting() { |
| 455 | unsigned DAGSize = SUnits.size(); |
| 456 | std::vector<unsigned> InDegree(DAGSize); |
| 457 | std::vector<SUnit*> WorkList; |
| 458 | WorkList.reserve(DAGSize); |
| 459 | std::vector<SUnit*> TopOrder; |
| 460 | TopOrder.reserve(DAGSize); |
| 461 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 462 | // Initialize the data structures. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 463 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 464 | SUnit *SU = &SUnits[i]; |
| 465 | int NodeNum = SU->NodeNum; |
| 466 | unsigned Degree = SU->Succs.size(); |
| 467 | InDegree[NodeNum] = Degree; |
| 468 | |
| 469 | // Is it a node without dependencies? |
| 470 | if (Degree == 0) { |
| 471 | assert(SU->Succs.empty() && "SUnit should have no successors"); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 472 | // Collect leaf nodes. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 473 | WorkList.push_back(SU); |
| 474 | } |
| 475 | } |
| 476 | |
| 477 | while (!WorkList.empty()) { |
| 478 | SUnit *SU = WorkList.back(); |
| 479 | WorkList.pop_back(); |
| 480 | TopOrder.push_back(SU); |
| 481 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 482 | I != E; ++I) { |
| 483 | SUnit *SU = I->Dep; |
| 484 | if (!--InDegree[SU->NodeNum]) |
| 485 | // If all dependencies of the node are processed already, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 486 | // then the node can be computed now. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 487 | WorkList.push_back(SU); |
| 488 | } |
| 489 | } |
| 490 | |
| 491 | // Second pass, assign the actual topological order as node ids. |
| 492 | int Id = 0; |
| 493 | |
| 494 | Index2Node.clear(); |
| 495 | Node2Index.clear(); |
| 496 | Index2Node.resize(DAGSize); |
| 497 | Node2Index.resize(DAGSize); |
| 498 | Visited.resize(DAGSize); |
| 499 | |
| 500 | for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(), |
| 501 | TE = TopOrder.rend();TI != TE; ++TI) { |
| 502 | Allocate((*TI)->NodeNum, Id); |
| 503 | Id++; |
| 504 | } |
| 505 | |
| 506 | #ifndef NDEBUG |
| 507 | // Check correctness of the ordering |
| 508 | for (unsigned i = 0, e = DAGSize; i != e; ++i) { |
| 509 | SUnit *SU = &SUnits[i]; |
| 510 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 511 | I != E; ++I) { |
| 512 | assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] && |
| 513 | "Wrong topological sorting"); |
| 514 | } |
| 515 | } |
| 516 | #endif |
| 517 | } |
| 518 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 519 | /// AddPred - adds an edge from SUnit X to SUnit Y. |
| 520 | /// Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 521 | bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, |
| 522 | unsigned PhyReg, int Cost) { |
| 523 | int UpperBound, LowerBound; |
| 524 | LowerBound = Node2Index[Y->NodeNum]; |
| 525 | UpperBound = Node2Index[X->NodeNum]; |
| 526 | bool HasLoop = false; |
| 527 | // Is Ord(X) < Ord(Y) ? |
| 528 | if (LowerBound < UpperBound) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 529 | // Update the topological order. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 530 | Visited.reset(); |
| 531 | DFS(Y, UpperBound, HasLoop); |
| 532 | assert(!HasLoop && "Inserted edge creates a loop!"); |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 533 | // Recompute topological indexes. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 534 | Shift(Visited, LowerBound, UpperBound); |
| 535 | } |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 536 | // Now really insert the edge. |
| 537 | return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 538 | } |
| 539 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 540 | /// RemovePred - This removes the specified node N from the predecessors of |
| 541 | /// the current node M. Updates the topological ordering if required. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 542 | bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N, |
| 543 | bool isCtrl, bool isSpecial) { |
| 544 | // InitDAGTopologicalSorting(); |
| 545 | return M->removePred(N, isCtrl, isSpecial); |
| 546 | } |
| 547 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 548 | /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark |
| 549 | /// all nodes affected by the edge insertion. These nodes will later get new |
| 550 | /// topological indexes by means of the Shift method. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 551 | void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) { |
| 552 | std::vector<SUnit*> WorkList; |
| 553 | WorkList.reserve(SUnits.size()); |
| 554 | |
| 555 | WorkList.push_back(SU); |
| 556 | while (!WorkList.empty()) { |
| 557 | SU = WorkList.back(); |
| 558 | WorkList.pop_back(); |
| 559 | Visited.set(SU->NodeNum); |
| 560 | for (int I = SU->Succs.size()-1; I >= 0; --I) { |
| 561 | int s = SU->Succs[I].Dep->NodeNum; |
| 562 | if (Node2Index[s] == UpperBound) { |
| 563 | HasLoop = true; |
| 564 | return; |
| 565 | } |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 566 | // Visit successors if not already and in affected region. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 567 | if (!Visited.test(s) && Node2Index[s] < UpperBound) { |
| 568 | WorkList.push_back(SU->Succs[I].Dep); |
| 569 | } |
| 570 | } |
| 571 | } |
| 572 | } |
| 573 | |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 574 | /// Shift - Renumber the nodes so that the topological ordering is |
| 575 | /// preserved. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 576 | void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound, |
| 577 | int UpperBound) { |
| 578 | std::vector<int> L; |
| 579 | int shift = 0; |
| 580 | int i; |
| 581 | |
| 582 | for (i = LowerBound; i <= UpperBound; ++i) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 583 | // w is node at topological index i. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 584 | int w = Index2Node[i]; |
| 585 | if (Visited.test(w)) { |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 586 | // Unmark. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 587 | Visited.reset(w); |
| 588 | L.push_back(w); |
| 589 | shift = shift + 1; |
| 590 | } else { |
| 591 | Allocate(w, i - shift); |
| 592 | } |
| 593 | } |
| 594 | |
| 595 | for (unsigned j = 0; j < L.size(); ++j) { |
| 596 | Allocate(L[j], i - shift); |
| 597 | i = i + 1; |
| 598 | } |
| 599 | } |
| 600 | |
| 601 | |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 602 | /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 603 | /// create a cycle. |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 604 | bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) { |
| 605 | if (IsReachable(TargetSU, SU)) |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 606 | return true; |
| 607 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 608 | I != E; ++I) |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 609 | if (I->Cost < 0 && IsReachable(TargetSU, I->Dep)) |
Evan Cheng | cfd5f82 | 2007-09-27 00:25:29 +0000 | [diff] [blame] | 610 | return true; |
| 611 | return false; |
| 612 | } |
| 613 | |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 614 | /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 615 | /// BTCycle in order to schedule a specific node. Returns the last unscheduled |
| 616 | /// SUnit. Also returns if a successor is unscheduled in the process. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 617 | void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, |
| 618 | unsigned &CurCycle) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 619 | SUnit *OldSU = NULL; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 620 | while (CurCycle > BtCycle) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 621 | OldSU = Sequence.back(); |
| 622 | Sequence.pop_back(); |
| 623 | if (SU->isSucc(OldSU)) |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 624 | // Don't try to remove SU from AvailableQueue. |
| 625 | SU->isAvailable = false; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 626 | UnscheduleNodeBottomUp(OldSU); |
| 627 | --CurCycle; |
| 628 | } |
| 629 | |
| 630 | |
| 631 | if (SU->isSucc(OldSU)) { |
| 632 | assert(false && "Something is wrong!"); |
| 633 | abort(); |
| 634 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 635 | |
| 636 | ++NumBacktracks; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 637 | } |
| 638 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 639 | /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled |
| 640 | /// successors to the newly created node. |
| 641 | SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 642 | if (SU->FlaggedNodes.size()) |
| 643 | return NULL; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 644 | |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 645 | SDNode *N = SU->Node; |
| 646 | if (!N) |
| 647 | return NULL; |
| 648 | |
| 649 | SUnit *NewSU; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 650 | bool TryUnfold = false; |
Evan Cheng | 84d0ebc | 2007-10-05 01:42:35 +0000 | [diff] [blame] | 651 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
Duncan Sands | 13237ac | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 652 | MVT VT = N->getValueType(i); |
Evan Cheng | 84d0ebc | 2007-10-05 01:42:35 +0000 | [diff] [blame] | 653 | if (VT == MVT::Flag) |
| 654 | return NULL; |
| 655 | else if (VT == MVT::Other) |
| 656 | TryUnfold = true; |
| 657 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 658 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 659 | const SDOperand &Op = N->getOperand(i); |
Duncan Sands | 13237ac | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 660 | MVT VT = Op.Val->getValueType(Op.ResNo); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 661 | if (VT == MVT::Flag) |
| 662 | return NULL; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 663 | } |
| 664 | |
| 665 | if (TryUnfold) { |
Dan Gohman | e6e1348 | 2008-06-21 15:52:51 +0000 | [diff] [blame] | 666 | SmallVector<SDNode*, 2> NewNodes; |
Owen Anderson | 0ec92e9 | 2008-01-07 01:35:56 +0000 | [diff] [blame] | 667 | if (!TII->unfoldMemoryOperand(DAG, N, NewNodes)) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 668 | return NULL; |
| 669 | |
| 670 | DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; |
| 671 | assert(NewNodes.size() == 2 && "Expected a load folding node!"); |
| 672 | |
| 673 | N = NewNodes[1]; |
| 674 | SDNode *LoadNode = NewNodes[0]; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 675 | unsigned NumVals = N->getNumValues(); |
| 676 | unsigned OldNumVals = SU->Node->getNumValues(); |
| 677 | for (unsigned i = 0; i != NumVals; ++i) |
Chris Lattner | 3cfb56d | 2007-10-15 06:10:22 +0000 | [diff] [blame] | 678 | DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i)); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 679 | DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1), |
Chris Lattner | 3cfb56d | 2007-10-15 06:10:22 +0000 | [diff] [blame] | 680 | SDOperand(LoadNode, 1)); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 681 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 682 | SUnit *NewSU = CreateNewSUnit(N); |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 683 | assert(N->getNodeId() == -1 && "Node already inserted!"); |
| 684 | N->setNodeId(NewSU->NodeNum); |
Dan Gohman | e6e1348 | 2008-06-21 15:52:51 +0000 | [diff] [blame] | 685 | |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 686 | const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); |
Dan Gohman | 856c012 | 2008-02-16 00:25:40 +0000 | [diff] [blame] | 687 | for (unsigned i = 0; i != TID.getNumOperands(); ++i) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 688 | if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 689 | NewSU->isTwoAddress = true; |
| 690 | break; |
| 691 | } |
| 692 | } |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 693 | if (TID.isCommutable()) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 694 | NewSU->isCommutable = true; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 695 | // FIXME: Calculate height / depth and propagate the changes? |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 696 | NewSU->Depth = SU->Depth; |
| 697 | NewSU->Height = SU->Height; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 698 | ComputeLatency(NewSU); |
| 699 | |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 700 | // LoadNode may already exist. This can happen when there is another |
| 701 | // load from the same location and producing the same type of value |
| 702 | // but it has different alignment or volatileness. |
| 703 | bool isNewLoad = true; |
| 704 | SUnit *LoadSU; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 705 | if (LoadNode->getNodeId() != -1) { |
| 706 | LoadSU = &SUnits[LoadNode->getNodeId()]; |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 707 | isNewLoad = false; |
| 708 | } else { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 709 | LoadSU = CreateNewSUnit(LoadNode); |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 710 | LoadNode->setNodeId(LoadSU->NodeNum); |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 711 | |
| 712 | LoadSU->Depth = SU->Depth; |
| 713 | LoadSU->Height = SU->Height; |
| 714 | ComputeLatency(LoadSU); |
| 715 | } |
| 716 | |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 717 | SUnit *ChainPred = NULL; |
| 718 | SmallVector<SDep, 4> ChainSuccs; |
| 719 | SmallVector<SDep, 4> LoadPreds; |
| 720 | SmallVector<SDep, 4> NodePreds; |
| 721 | SmallVector<SDep, 4> NodeSuccs; |
| 722 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 723 | I != E; ++I) { |
| 724 | if (I->isCtrl) |
| 725 | ChainPred = I->Dep; |
Evan Cheng | 567d2e5 | 2008-03-04 00:41:45 +0000 | [diff] [blame] | 726 | else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode)) |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 727 | LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); |
| 728 | else |
| 729 | NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); |
| 730 | } |
| 731 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 732 | I != E; ++I) { |
| 733 | if (I->isCtrl) |
| 734 | ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, |
| 735 | I->isCtrl, I->isSpecial)); |
| 736 | else |
| 737 | NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, |
| 738 | I->isCtrl, I->isSpecial)); |
| 739 | } |
| 740 | |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 741 | if (ChainPred) { |
| 742 | RemovePred(SU, ChainPred, true, false); |
| 743 | if (isNewLoad) |
| 744 | AddPred(LoadSU, ChainPred, true, false); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 745 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 746 | for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { |
| 747 | SDep *Pred = &LoadPreds[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 748 | RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); |
| 749 | if (isNewLoad) { |
| 750 | AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 751 | Pred->Reg, Pred->Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 752 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 753 | } |
| 754 | for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { |
| 755 | SDep *Pred = &NodePreds[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 756 | RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); |
| 757 | AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 758 | Pred->Reg, Pred->Cost); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 759 | } |
| 760 | for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { |
| 761 | SDep *Succ = &NodeSuccs[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 762 | RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); |
| 763 | AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 764 | Succ->Reg, Succ->Cost); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 765 | } |
| 766 | for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { |
| 767 | SDep *Succ = &ChainSuccs[i]; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 768 | RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); |
| 769 | if (isNewLoad) { |
| 770 | AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial, |
Roman Levenstein | 733a4d6 | 2008-03-26 11:23:38 +0000 | [diff] [blame] | 771 | Succ->Reg, Succ->Cost); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 772 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 773 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 774 | if (isNewLoad) { |
| 775 | AddPred(NewSU, LoadSU, false, false); |
| 776 | } |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 777 | |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 778 | if (isNewLoad) |
| 779 | AvailableQueue->addNode(LoadSU); |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 780 | AvailableQueue->addNode(NewSU); |
| 781 | |
| 782 | ++NumUnfolds; |
| 783 | |
| 784 | if (NewSU->NumSuccsLeft == 0) { |
| 785 | NewSU->isAvailable = true; |
| 786 | return NewSU; |
Evan Cheng | 91e0fc9 | 2007-12-18 08:42:10 +0000 | [diff] [blame] | 787 | } |
| 788 | SU = NewSU; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 789 | } |
| 790 | |
| 791 | DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 792 | NewSU = CreateClone(SU); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 793 | |
| 794 | // New SUnit has the exact same predecessors. |
| 795 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 796 | I != E; ++I) |
| 797 | if (!I->isSpecial) { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 798 | AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 799 | NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1); |
| 800 | } |
| 801 | |
| 802 | // Only copy scheduled successors. Cut them from old node's successor |
| 803 | // list and move them over. |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 804 | SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 805 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 806 | I != E; ++I) { |
| 807 | if (I->isSpecial) |
| 808 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 809 | if (I->Dep->isScheduled) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 810 | NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 811 | AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 812 | DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 813 | } |
| 814 | } |
| 815 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 816 | SUnit *Succ = DelDeps[i].first; |
| 817 | bool isCtrl = DelDeps[i].second; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 818 | RemovePred(Succ, SU, isCtrl, false); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 819 | } |
| 820 | |
| 821 | AvailableQueue->updateNode(SU); |
| 822 | AvailableQueue->addNode(NewSU); |
| 823 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 824 | ++NumDups; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 825 | return NewSU; |
| 826 | } |
| 827 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 828 | /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies |
| 829 | /// and move all scheduled successors of the given SUnit to the last copy. |
| 830 | void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, |
| 831 | const TargetRegisterClass *DestRC, |
| 832 | const TargetRegisterClass *SrcRC, |
| 833 | SmallVector<SUnit*, 2> &Copies) { |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 834 | SUnit *CopyFromSU = CreateNewSUnit(NULL); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 835 | CopyFromSU->CopySrcRC = SrcRC; |
| 836 | CopyFromSU->CopyDstRC = DestRC; |
| 837 | CopyFromSU->Depth = SU->Depth; |
| 838 | CopyFromSU->Height = SU->Height; |
| 839 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 840 | SUnit *CopyToSU = CreateNewSUnit(NULL); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 841 | CopyToSU->CopySrcRC = DestRC; |
| 842 | CopyToSU->CopyDstRC = SrcRC; |
| 843 | |
| 844 | // Only copy scheduled successors. Cut them from old node's successor |
| 845 | // list and move them over. |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 846 | SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 847 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 848 | I != E; ++I) { |
| 849 | if (I->isSpecial) |
| 850 | continue; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 851 | if (I->Dep->isScheduled) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 852 | CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 853 | AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost); |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 854 | DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 855 | } |
| 856 | } |
| 857 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Evan Cheng | bde499b | 2007-09-27 07:29:27 +0000 | [diff] [blame] | 858 | SUnit *Succ = DelDeps[i].first; |
| 859 | bool isCtrl = DelDeps[i].second; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 860 | RemovePred(Succ, SU, isCtrl, false); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 861 | } |
| 862 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 863 | AddPred(CopyFromSU, SU, false, false, Reg, -1); |
| 864 | AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 865 | |
| 866 | AvailableQueue->updateNode(SU); |
| 867 | AvailableQueue->addNode(CopyFromSU); |
| 868 | AvailableQueue->addNode(CopyToSU); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 869 | Copies.push_back(CopyFromSU); |
| 870 | Copies.push_back(CopyToSU); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 871 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 872 | ++NumCCCopies; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 873 | } |
| 874 | |
| 875 | /// getPhysicalRegisterVT - Returns the ValueType of the physical register |
| 876 | /// definition of the specified node. |
| 877 | /// FIXME: Move to SelectionDAG? |
Duncan Sands | 13237ac | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 878 | static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, |
| 879 | const TargetInstrInfo *TII) { |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 880 | const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 881 | assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 882 | unsigned NumRes = TID.getNumDefs(); |
| 883 | for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) { |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 884 | if (Reg == *ImpDef) |
| 885 | break; |
| 886 | ++NumRes; |
| 887 | } |
| 888 | return N->getValueType(NumRes); |
| 889 | } |
| 890 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 891 | /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay |
| 892 | /// scheduling of the given node to satisfy live physical register dependencies. |
| 893 | /// If the specific node is the last one that's available to schedule, do |
| 894 | /// whatever is necessary (i.e. backtracking or cloning) to make it possible. |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 895 | bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, |
| 896 | SmallVector<unsigned, 4> &LRegs){ |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 897 | if (LiveRegs.empty()) |
| 898 | return false; |
| 899 | |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 900 | SmallSet<unsigned, 4> RegAdded; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 901 | // If this node would clobber any "live" register, then it's not ready. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 902 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 903 | I != E; ++I) { |
| 904 | if (I->Cost < 0) { |
| 905 | unsigned Reg = I->Reg; |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 906 | if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) { |
| 907 | if (RegAdded.insert(Reg)) |
| 908 | LRegs.push_back(Reg); |
| 909 | } |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 910 | for (const unsigned *Alias = TRI->getAliasSet(Reg); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 911 | *Alias; ++Alias) |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 912 | if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) { |
| 913 | if (RegAdded.insert(*Alias)) |
| 914 | LRegs.push_back(*Alias); |
| 915 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 916 | } |
| 917 | } |
| 918 | |
| 919 | for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) { |
| 920 | SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1]; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 921 | if (!Node || !Node->isTargetOpcode()) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 922 | continue; |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 923 | const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode()); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 924 | if (!TID.ImplicitDefs) |
| 925 | continue; |
| 926 | for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) { |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 927 | if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) { |
| 928 | if (RegAdded.insert(*Reg)) |
| 929 | LRegs.push_back(*Reg); |
| 930 | } |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 931 | for (const unsigned *Alias = TRI->getAliasSet(*Reg); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 932 | *Alias; ++Alias) |
Evan Cheng | e6f9225 | 2007-09-27 18:46:06 +0000 | [diff] [blame] | 933 | if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) { |
| 934 | if (RegAdded.insert(*Alias)) |
| 935 | LRegs.push_back(*Alias); |
| 936 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 937 | } |
| 938 | } |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 939 | return !LRegs.empty(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 940 | } |
| 941 | |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 942 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 943 | /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up |
| 944 | /// schedulers. |
| 945 | void ScheduleDAGRRList::ListScheduleBottomUp() { |
| 946 | unsigned CurCycle = 0; |
| 947 | // Add root to Available queue. |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 948 | if (!SUnits.empty()) { |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 949 | SUnit *RootSU = &SUnits[DAG.getRoot().Val->getNodeId()]; |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 950 | assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); |
| 951 | RootSU->isAvailable = true; |
| 952 | AvailableQueue->push(RootSU); |
| 953 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 954 | |
| 955 | // While Available queue is not empty, grab the node with the highest |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 956 | // priority. If it is not ready put it back. Schedule the node. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 957 | SmallVector<SUnit*, 4> NotReady; |
Dan Gohman | fa63cc4 | 2008-06-23 21:15:00 +0000 | [diff] [blame] | 958 | DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; |
Dan Gohman | e6e1348 | 2008-06-21 15:52:51 +0000 | [diff] [blame] | 959 | Sequence.reserve(SUnits.size()); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 960 | while (!AvailableQueue->empty()) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 961 | bool Delayed = false; |
Dan Gohman | fa63cc4 | 2008-06-23 21:15:00 +0000 | [diff] [blame] | 962 | LRegsMap.clear(); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 963 | SUnit *CurSU = AvailableQueue->pop(); |
| 964 | while (CurSU) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 965 | if (CurSU->CycleBound <= CurCycle) { |
| 966 | SmallVector<unsigned, 4> LRegs; |
| 967 | if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 968 | break; |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 969 | Delayed = true; |
| 970 | LRegsMap.insert(std::make_pair(CurSU, LRegs)); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 971 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 972 | |
| 973 | CurSU->isPending = true; // This SU is not in AvailableQueue right now. |
| 974 | NotReady.push_back(CurSU); |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 975 | CurSU = AvailableQueue->pop(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 976 | } |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 977 | |
| 978 | // All candidates are delayed due to live physical reg dependencies. |
| 979 | // Try backtracking, code duplication, or inserting cross class copies |
| 980 | // to resolve it. |
| 981 | if (Delayed && !CurSU) { |
| 982 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 983 | SUnit *TrySU = NotReady[i]; |
| 984 | SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; |
| 985 | |
| 986 | // Try unscheduling up to the point where it's safe to schedule |
| 987 | // this node. |
| 988 | unsigned LiveCycle = CurCycle; |
| 989 | for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { |
| 990 | unsigned Reg = LRegs[j]; |
| 991 | unsigned LCycle = LiveRegCycles[Reg]; |
| 992 | LiveCycle = std::min(LiveCycle, LCycle); |
| 993 | } |
| 994 | SUnit *OldSU = Sequence[LiveCycle]; |
| 995 | if (!WillCreateCycle(TrySU, OldSU)) { |
| 996 | BacktrackBottomUp(TrySU, LiveCycle, CurCycle); |
| 997 | // Force the current node to be scheduled before the node that |
| 998 | // requires the physical reg dep. |
| 999 | if (OldSU->isAvailable) { |
| 1000 | OldSU->isAvailable = false; |
| 1001 | AvailableQueue->remove(OldSU); |
| 1002 | } |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1003 | AddPred(TrySU, OldSU, true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1004 | // If one or more successors has been unscheduled, then the current |
| 1005 | // node is no longer avaialable. Schedule a successor that's now |
| 1006 | // available instead. |
| 1007 | if (!TrySU->isAvailable) |
| 1008 | CurSU = AvailableQueue->pop(); |
| 1009 | else { |
| 1010 | CurSU = TrySU; |
| 1011 | TrySU->isPending = false; |
| 1012 | NotReady.erase(NotReady.begin()+i); |
| 1013 | } |
| 1014 | break; |
| 1015 | } |
| 1016 | } |
| 1017 | |
| 1018 | if (!CurSU) { |
Dan Gohman | fd227e9 | 2008-03-25 17:10:29 +0000 | [diff] [blame] | 1019 | // Can't backtrack. Try duplicating the nodes that produces these |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1020 | // "expensive to copy" values to break the dependency. In case even |
| 1021 | // that doesn't work, insert cross class copies. |
| 1022 | SUnit *TrySU = NotReady[0]; |
| 1023 | SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; |
| 1024 | assert(LRegs.size() == 1 && "Can't handle this yet!"); |
| 1025 | unsigned Reg = LRegs[0]; |
| 1026 | SUnit *LRDef = LiveRegDefs[Reg]; |
Evan Cheng | 79e9713 | 2007-10-05 01:39:18 +0000 | [diff] [blame] | 1027 | SUnit *NewDef = CopyAndMoveSuccessors(LRDef); |
| 1028 | if (!NewDef) { |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1029 | // Issue expensive cross register class copies. |
Duncan Sands | 13237ac | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 1030 | MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1031 | const TargetRegisterClass *RC = |
Evan Cheng | e88a625 | 2008-03-11 07:19:34 +0000 | [diff] [blame] | 1032 | TRI->getPhysicalRegisterRegClass(Reg, VT); |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1033 | const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1034 | if (!DestRC) { |
| 1035 | assert(false && "Don't know how to copy this physical register!"); |
| 1036 | abort(); |
| 1037 | } |
| 1038 | SmallVector<SUnit*, 2> Copies; |
| 1039 | InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); |
| 1040 | DOUT << "Adding an edge from SU # " << TrySU->NodeNum |
| 1041 | << " to SU #" << Copies.front()->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1042 | AddPred(TrySU, Copies.front(), true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1043 | NewDef = Copies.back(); |
| 1044 | } |
| 1045 | |
| 1046 | DOUT << "Adding an edge from SU # " << NewDef->NodeNum |
| 1047 | << " to SU #" << TrySU->NodeNum << "\n"; |
| 1048 | LiveRegDefs[Reg] = NewDef; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1049 | AddPred(NewDef, TrySU, true, true); |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1050 | TrySU->isAvailable = false; |
| 1051 | CurSU = NewDef; |
| 1052 | } |
| 1053 | |
| 1054 | if (!CurSU) { |
| 1055 | assert(false && "Unable to resolve live physical register dependencies!"); |
| 1056 | abort(); |
| 1057 | } |
| 1058 | } |
| 1059 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1060 | // Add the nodes that aren't ready back onto the available list. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1061 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 1062 | NotReady[i]->isPending = false; |
Evan Cheng | 1ec79b4 | 2007-09-27 07:09:03 +0000 | [diff] [blame] | 1063 | // May no longer be available due to backtracking. |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1064 | if (NotReady[i]->isAvailable) |
| 1065 | AvailableQueue->push(NotReady[i]); |
| 1066 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1067 | NotReady.clear(); |
| 1068 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1069 | if (!CurSU) |
| 1070 | Sequence.push_back(0); |
| 1071 | else { |
| 1072 | ScheduleNodeBottomUp(CurSU, CurCycle); |
| 1073 | Sequence.push_back(CurSU); |
| 1074 | } |
| 1075 | ++CurCycle; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1076 | } |
| 1077 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1078 | // Reverse the order if it is bottom up. |
| 1079 | std::reverse(Sequence.begin(), Sequence.end()); |
| 1080 | |
| 1081 | |
| 1082 | #ifndef NDEBUG |
| 1083 | // Verify that all SUnits were scheduled. |
| 1084 | bool AnyNotSched = false; |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1085 | unsigned DeadNodes = 0; |
Dan Gohman | 82b6673 | 2008-04-15 22:40:14 +0000 | [diff] [blame] | 1086 | unsigned Noops = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1087 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1088 | if (!SUnits[i].isScheduled) { |
| 1089 | if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { |
| 1090 | ++DeadNodes; |
| 1091 | continue; |
| 1092 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1093 | if (!AnyNotSched) |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1094 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1095 | SUnits[i].dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1096 | cerr << "has not been scheduled!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1097 | AnyNotSched = true; |
| 1098 | } |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1099 | if (SUnits[i].NumSuccsLeft != 0) { |
| 1100 | if (!AnyNotSched) |
| 1101 | cerr << "*** List scheduling failed! ***\n"; |
| 1102 | SUnits[i].dump(&DAG); |
| 1103 | cerr << "has successors left!\n"; |
| 1104 | AnyNotSched = true; |
| 1105 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1106 | } |
Dan Gohman | 82b6673 | 2008-04-15 22:40:14 +0000 | [diff] [blame] | 1107 | for (unsigned i = 0, e = Sequence.size(); i != e; ++i) |
| 1108 | if (!Sequence[i]) |
| 1109 | ++Noops; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1110 | assert(!AnyNotSched); |
Dan Gohman | 82b6673 | 2008-04-15 22:40:14 +0000 | [diff] [blame] | 1111 | assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1112 | "The number of nodes scheduled doesn't match the expected number!"); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1113 | #endif |
| 1114 | } |
| 1115 | |
| 1116 | //===----------------------------------------------------------------------===// |
| 1117 | // Top-Down Scheduling |
| 1118 | //===----------------------------------------------------------------------===// |
| 1119 | |
| 1120 | /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1121 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1122 | void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, |
| 1123 | unsigned CurCycle) { |
| 1124 | // FIXME: the distance between two nodes is not always == the predecessor's |
| 1125 | // latency. For example, the reader can very well read the register written |
| 1126 | // by the predecessor later than the issue cycle. It also depends on the |
| 1127 | // interrupt model (drain vs. freeze). |
| 1128 | SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency); |
| 1129 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1130 | --SuccSU->NumPredsLeft; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1131 | |
| 1132 | #ifndef NDEBUG |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1133 | if (SuccSU->NumPredsLeft < 0) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1134 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1135 | SuccSU->dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1136 | cerr << " has been released too many times!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1137 | assert(0); |
| 1138 | } |
| 1139 | #endif |
| 1140 | |
Evan Cheng | 038dcc5 | 2007-09-28 19:24:24 +0000 | [diff] [blame] | 1141 | if (SuccSU->NumPredsLeft == 0) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1142 | SuccSU->isAvailable = true; |
| 1143 | AvailableQueue->push(SuccSU); |
| 1144 | } |
| 1145 | } |
| 1146 | |
| 1147 | |
| 1148 | /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending |
| 1149 | /// count of its successors. If a successor pending count is zero, add it to |
| 1150 | /// the Available queue. |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1151 | void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1152 | DOUT << "*** Scheduling [" << CurCycle << "]: "; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1153 | DEBUG(SU->dump(&DAG)); |
| 1154 | SU->Cycle = CurCycle; |
| 1155 | |
| 1156 | AvailableQueue->ScheduledNode(SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1157 | |
| 1158 | // Top down: release successors |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1159 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1160 | I != E; ++I) |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1161 | ReleaseSucc(I->Dep, I->isCtrl, CurCycle); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1162 | SU->isScheduled = true; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1163 | } |
| 1164 | |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1165 | /// ListScheduleTopDown - The main loop of list scheduling for top-down |
| 1166 | /// schedulers. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1167 | void ScheduleDAGRRList::ListScheduleTopDown() { |
| 1168 | unsigned CurCycle = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1169 | |
| 1170 | // All leaves to Available queue. |
| 1171 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 1172 | // It is available if it has no predecessors. |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1173 | if (SUnits[i].Preds.empty()) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1174 | AvailableQueue->push(&SUnits[i]); |
| 1175 | SUnits[i].isAvailable = true; |
| 1176 | } |
| 1177 | } |
| 1178 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1179 | // While Available queue is not empty, grab the node with the highest |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1180 | // priority. If it is not ready put it back. Schedule the node. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1181 | std::vector<SUnit*> NotReady; |
Dan Gohman | e6e1348 | 2008-06-21 15:52:51 +0000 | [diff] [blame] | 1182 | Sequence.reserve(SUnits.size()); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1183 | while (!AvailableQueue->empty()) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1184 | SUnit *CurSU = AvailableQueue->pop(); |
| 1185 | while (CurSU && CurSU->CycleBound > CurCycle) { |
| 1186 | NotReady.push_back(CurSU); |
| 1187 | CurSU = AvailableQueue->pop(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1188 | } |
| 1189 | |
| 1190 | // Add the nodes that aren't ready back onto the available list. |
| 1191 | AvailableQueue->push_all(NotReady); |
| 1192 | NotReady.clear(); |
| 1193 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1194 | if (!CurSU) |
| 1195 | Sequence.push_back(0); |
| 1196 | else { |
| 1197 | ScheduleNodeTopDown(CurSU, CurCycle); |
| 1198 | Sequence.push_back(CurSU); |
| 1199 | } |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1200 | ++CurCycle; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1201 | } |
| 1202 | |
| 1203 | |
| 1204 | #ifndef NDEBUG |
| 1205 | // Verify that all SUnits were scheduled. |
| 1206 | bool AnyNotSched = false; |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1207 | unsigned DeadNodes = 0; |
Dan Gohman | 82b6673 | 2008-04-15 22:40:14 +0000 | [diff] [blame] | 1208 | unsigned Noops = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1209 | for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { |
| 1210 | if (!SUnits[i].isScheduled) { |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1211 | if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { |
| 1212 | ++DeadNodes; |
| 1213 | continue; |
| 1214 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1215 | if (!AnyNotSched) |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1216 | cerr << "*** List scheduling failed! ***\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1217 | SUnits[i].dump(&DAG); |
Bill Wendling | 22e978a | 2006-12-07 20:04:42 +0000 | [diff] [blame] | 1218 | cerr << "has not been scheduled!\n"; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1219 | AnyNotSched = true; |
| 1220 | } |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1221 | if (SUnits[i].NumPredsLeft != 0) { |
| 1222 | if (!AnyNotSched) |
| 1223 | cerr << "*** List scheduling failed! ***\n"; |
| 1224 | SUnits[i].dump(&DAG); |
| 1225 | cerr << "has predecessors left!\n"; |
| 1226 | AnyNotSched = true; |
| 1227 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1228 | } |
Dan Gohman | 82b6673 | 2008-04-15 22:40:14 +0000 | [diff] [blame] | 1229 | for (unsigned i = 0, e = Sequence.size(); i != e; ++i) |
| 1230 | if (!Sequence[i]) |
| 1231 | ++Noops; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1232 | assert(!AnyNotSched); |
Dan Gohman | 82b6673 | 2008-04-15 22:40:14 +0000 | [diff] [blame] | 1233 | assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && |
Dan Gohman | 4370f26 | 2008-04-15 01:22:18 +0000 | [diff] [blame] | 1234 | "The number of nodes scheduled doesn't match the expected number!"); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1235 | #endif |
| 1236 | } |
| 1237 | |
| 1238 | |
| 1239 | |
| 1240 | //===----------------------------------------------------------------------===// |
| 1241 | // RegReductionPriorityQueue Implementation |
| 1242 | //===----------------------------------------------------------------------===// |
| 1243 | // |
| 1244 | // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers |
| 1245 | // to reduce register pressure. |
| 1246 | // |
| 1247 | namespace { |
| 1248 | template<class SF> |
| 1249 | class RegReductionPriorityQueue; |
| 1250 | |
| 1251 | /// Sorting functions for the Available queue. |
| 1252 | struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { |
| 1253 | RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ; |
| 1254 | bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {} |
| 1255 | bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} |
| 1256 | |
| 1257 | bool operator()(const SUnit* left, const SUnit* right) const; |
| 1258 | }; |
| 1259 | |
| 1260 | struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { |
| 1261 | RegReductionPriorityQueue<td_ls_rr_sort> *SPQ; |
| 1262 | td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {} |
| 1263 | td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} |
| 1264 | |
| 1265 | bool operator()(const SUnit* left, const SUnit* right) const; |
| 1266 | }; |
| 1267 | } // end anonymous namespace |
| 1268 | |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1269 | static inline bool isCopyFromLiveIn(const SUnit *SU) { |
| 1270 | SDNode *N = SU->Node; |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1271 | return N && N->getOpcode() == ISD::CopyFromReg && |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1272 | N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag; |
| 1273 | } |
| 1274 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1275 | namespace { |
| 1276 | template<class SF> |
Chris Lattner | 996795b | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 1277 | class VISIBILITY_HIDDEN RegReductionPriorityQueue |
| 1278 | : public SchedulingPriorityQueue { |
Dan Gohman | a4db335 | 2008-06-21 18:35:25 +0000 | [diff] [blame] | 1279 | PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue; |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1280 | unsigned currentQueueId; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1281 | |
| 1282 | public: |
| 1283 | RegReductionPriorityQueue() : |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1284 | Queue(SF(this)), currentQueueId(0) {} |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1285 | |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1286 | virtual void initNodes(std::vector<SUnit> &sunits) {} |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1287 | |
| 1288 | virtual void addNode(const SUnit *SU) {} |
| 1289 | |
| 1290 | virtual void updateNode(const SUnit *SU) {} |
| 1291 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1292 | virtual void releaseState() {} |
| 1293 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1294 | virtual unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1295 | return 0; |
| 1296 | } |
| 1297 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1298 | unsigned size() const { return Queue.size(); } |
| 1299 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1300 | bool empty() const { return Queue.empty(); } |
| 1301 | |
| 1302 | void push(SUnit *U) { |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1303 | assert(!U->NodeQueueId && "Node in the queue already"); |
| 1304 | U->NodeQueueId = ++currentQueueId; |
Dan Gohman | a4db335 | 2008-06-21 18:35:25 +0000 | [diff] [blame] | 1305 | Queue.push(U); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1306 | } |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1307 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1308 | void push_all(const std::vector<SUnit *> &Nodes) { |
| 1309 | for (unsigned i = 0, e = Nodes.size(); i != e; ++i) |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1310 | push(Nodes[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1311 | } |
| 1312 | |
| 1313 | SUnit *pop() { |
Evan Cheng | d12c97d | 2006-05-30 18:05:39 +0000 | [diff] [blame] | 1314 | if (empty()) return NULL; |
Dan Gohman | a4db335 | 2008-06-21 18:35:25 +0000 | [diff] [blame] | 1315 | SUnit *V = Queue.top(); |
| 1316 | Queue.pop(); |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1317 | V->NodeQueueId = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1318 | return V; |
| 1319 | } |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1320 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1321 | void remove(SUnit *SU) { |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1322 | assert(!Queue.empty() && "Queue is empty!"); |
Dan Gohman | a4db335 | 2008-06-21 18:35:25 +0000 | [diff] [blame] | 1323 | assert(SU->NodeQueueId != 0 && "Not in queue!"); |
| 1324 | Queue.erase_one(SU); |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1325 | SU->NodeQueueId = 0; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1326 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1327 | }; |
| 1328 | |
Chris Lattner | 996795b | 2006-06-28 23:17:24 +0000 | [diff] [blame] | 1329 | class VISIBILITY_HIDDEN BURegReductionPriorityQueue |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1330 | : public RegReductionPriorityQueue<bu_ls_rr_sort> { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1331 | // SUnits - The SUnits for the current graph. |
| 1332 | const std::vector<SUnit> *SUnits; |
| 1333 | |
| 1334 | // SethiUllmanNumbers - The SethiUllman number for each node. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1335 | std::vector<unsigned> SethiUllmanNumbers; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1336 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1337 | const TargetInstrInfo *TII; |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1338 | const TargetRegisterInfo *TRI; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1339 | ScheduleDAGRRList *scheduleDAG; |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1340 | |
| 1341 | bool Fast; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1342 | public: |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1343 | explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii, |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1344 | const TargetRegisterInfo *tri, |
| 1345 | bool f) |
| 1346 | : TII(tii), TRI(tri), scheduleDAG(NULL), Fast(f) {} |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1347 | |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1348 | void initNodes(std::vector<SUnit> &sunits) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1349 | SUnits = &sunits; |
| 1350 | // Add pseudo dependency edges for two-address nodes. |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1351 | if (!Fast) |
| 1352 | AddPseudoTwoAddrDeps(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1353 | // Calculate node priorities. |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1354 | CalculateSethiUllmanNumbers(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1355 | } |
| 1356 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1357 | void addNode(const SUnit *SU) { |
| 1358 | SethiUllmanNumbers.resize(SUnits->size(), 0); |
| 1359 | CalcNodeSethiUllmanNumber(SU); |
| 1360 | } |
| 1361 | |
| 1362 | void updateNode(const SUnit *SU) { |
| 1363 | SethiUllmanNumbers[SU->NodeNum] = 0; |
| 1364 | CalcNodeSethiUllmanNumber(SU); |
| 1365 | } |
| 1366 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1367 | void releaseState() { |
| 1368 | SUnits = 0; |
| 1369 | SethiUllmanNumbers.clear(); |
| 1370 | } |
| 1371 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1372 | unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1373 | assert(SU->NodeNum < SethiUllmanNumbers.size()); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1374 | unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1375 | if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU)) |
| 1376 | // CopyFromReg should be close to its def because it restricts |
| 1377 | // allocation choices. But if it is a livein then perhaps we want it |
| 1378 | // closer to its uses so it can be coalesced. |
| 1379 | return 0xffff; |
| 1380 | else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
| 1381 | // CopyToReg should be close to its uses to facilitate coalescing and |
| 1382 | // avoid spilling. |
| 1383 | return 0; |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1384 | else if (Opc == TargetInstrInfo::EXTRACT_SUBREG || |
| 1385 | Opc == TargetInstrInfo::INSERT_SUBREG) |
| 1386 | // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to |
| 1387 | // facilitate coalescing. |
| 1388 | return 0; |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1389 | else if (SU->NumSuccs == 0) |
| 1390 | // If SU does not have a use, i.e. it doesn't produce a value that would |
| 1391 | // be consumed (e.g. store), then it terminates a chain of computation. |
| 1392 | // Give it a large SethiUllman number so it will be scheduled right |
| 1393 | // before its predecessors that it doesn't lengthen their live ranges. |
| 1394 | return 0xffff; |
| 1395 | else if (SU->NumPreds == 0) |
| 1396 | // If SU does not have a def, schedule it close to its uses because it |
| 1397 | // does not lengthen any live ranges. |
| 1398 | return 0; |
| 1399 | else |
| 1400 | return SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1401 | } |
| 1402 | |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1403 | void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { |
| 1404 | scheduleDAG = scheduleDag; |
| 1405 | } |
| 1406 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1407 | private: |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1408 | bool canClobber(const SUnit *SU, const SUnit *Op); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1409 | void AddPseudoTwoAddrDeps(); |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1410 | void CalculateSethiUllmanNumbers(); |
| 1411 | unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1412 | }; |
| 1413 | |
| 1414 | |
Dan Gohman | 54a187e | 2007-08-20 19:28:38 +0000 | [diff] [blame] | 1415 | class VISIBILITY_HIDDEN TDRegReductionPriorityQueue |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1416 | : public RegReductionPriorityQueue<td_ls_rr_sort> { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1417 | // SUnits - The SUnits for the current graph. |
| 1418 | const std::vector<SUnit> *SUnits; |
| 1419 | |
| 1420 | // SethiUllmanNumbers - The SethiUllman number for each node. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1421 | std::vector<unsigned> SethiUllmanNumbers; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1422 | |
| 1423 | public: |
| 1424 | TDRegReductionPriorityQueue() {} |
| 1425 | |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1426 | void initNodes(std::vector<SUnit> &sunits) { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1427 | SUnits = &sunits; |
| 1428 | // Calculate node priorities. |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1429 | CalculateSethiUllmanNumbers(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1430 | } |
| 1431 | |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1432 | void addNode(const SUnit *SU) { |
| 1433 | SethiUllmanNumbers.resize(SUnits->size(), 0); |
| 1434 | CalcNodeSethiUllmanNumber(SU); |
| 1435 | } |
| 1436 | |
| 1437 | void updateNode(const SUnit *SU) { |
| 1438 | SethiUllmanNumbers[SU->NodeNum] = 0; |
| 1439 | CalcNodeSethiUllmanNumber(SU); |
| 1440 | } |
| 1441 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1442 | void releaseState() { |
| 1443 | SUnits = 0; |
| 1444 | SethiUllmanNumbers.clear(); |
| 1445 | } |
| 1446 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1447 | unsigned getNodePriority(const SUnit *SU) const { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1448 | assert(SU->NodeNum < SethiUllmanNumbers.size()); |
| 1449 | return SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1450 | } |
| 1451 | |
| 1452 | private: |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1453 | void CalculateSethiUllmanNumbers(); |
| 1454 | unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1455 | }; |
| 1456 | } |
| 1457 | |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1458 | /// closestSucc - Returns the scheduled cycle of the successor which is |
| 1459 | /// closet to the current cycle. |
Evan Cheng | 2874855 | 2007-03-13 23:25:11 +0000 | [diff] [blame] | 1460 | static unsigned closestSucc(const SUnit *SU) { |
| 1461 | unsigned MaxCycle = 0; |
| 1462 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1463 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1464 | unsigned Cycle = I->Dep->Cycle; |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1465 | // If there are bunch of CopyToRegs stacked up, they should be considered |
| 1466 | // to be at the same position. |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1467 | if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg) |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1468 | Cycle = closestSucc(I->Dep)+1; |
Evan Cheng | b9e3db6 | 2007-03-14 22:43:40 +0000 | [diff] [blame] | 1469 | if (Cycle > MaxCycle) |
| 1470 | MaxCycle = Cycle; |
| 1471 | } |
Evan Cheng | 2874855 | 2007-03-13 23:25:11 +0000 | [diff] [blame] | 1472 | return MaxCycle; |
| 1473 | } |
| 1474 | |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1475 | /// calcMaxScratches - Returns an cost estimate of the worse case requirement |
| 1476 | /// for scratch registers. Live-in operands and live-out results don't count |
| 1477 | /// since they are "fixed". |
| 1478 | static unsigned calcMaxScratches(const SUnit *SU) { |
| 1479 | unsigned Scratches = 0; |
| 1480 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1481 | I != E; ++I) { |
| 1482 | if (I->isCtrl) continue; // ignore chain preds |
Evan Cheng | 0e400d4 | 2008-01-09 23:01:55 +0000 | [diff] [blame] | 1483 | if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg) |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1484 | Scratches++; |
| 1485 | } |
| 1486 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1487 | I != E; ++I) { |
| 1488 | if (I->isCtrl) continue; // ignore chain succs |
Evan Cheng | 0e400d4 | 2008-01-09 23:01:55 +0000 | [diff] [blame] | 1489 | if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg) |
Evan Cheng | 61bc51e | 2007-12-20 02:22:36 +0000 | [diff] [blame] | 1490 | Scratches += 10; |
| 1491 | } |
| 1492 | return Scratches; |
| 1493 | } |
| 1494 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1495 | // Bottom up |
| 1496 | bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { |
Evan Cheng | 99f2f79 | 2006-05-13 08:22:24 +0000 | [diff] [blame] | 1497 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1498 | unsigned LPriority = SPQ->getNodePriority(left); |
| 1499 | unsigned RPriority = SPQ->getNodePriority(right); |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1500 | if (LPriority != RPriority) |
| 1501 | return LPriority > RPriority; |
| 1502 | |
| 1503 | // Try schedule def + use closer when Sethi-Ullman numbers are the same. |
| 1504 | // e.g. |
| 1505 | // t1 = op t2, c1 |
| 1506 | // t3 = op t4, c2 |
| 1507 | // |
| 1508 | // and the following instructions are both ready. |
| 1509 | // t2 = op c3 |
| 1510 | // t4 = op c4 |
| 1511 | // |
| 1512 | // Then schedule t2 = op first. |
| 1513 | // i.e. |
| 1514 | // t4 = op c4 |
| 1515 | // t2 = op c3 |
| 1516 | // t1 = op t2, c1 |
| 1517 | // t3 = op t4, c2 |
| 1518 | // |
| 1519 | // This creates more short live intervals. |
| 1520 | unsigned LDist = closestSucc(left); |
| 1521 | unsigned RDist = closestSucc(right); |
| 1522 | if (LDist != RDist) |
| 1523 | return LDist < RDist; |
| 1524 | |
| 1525 | // Intuitively, it's good to push down instructions whose results are |
| 1526 | // liveout so their long live ranges won't conflict with other values |
| 1527 | // which are needed inside the BB. Further prioritize liveout instructions |
| 1528 | // by the number of operands which are calculated within the BB. |
| 1529 | unsigned LScratch = calcMaxScratches(left); |
| 1530 | unsigned RScratch = calcMaxScratches(right); |
| 1531 | if (LScratch != RScratch) |
| 1532 | return LScratch > RScratch; |
| 1533 | |
| 1534 | if (left->Height != right->Height) |
| 1535 | return left->Height > right->Height; |
| 1536 | |
| 1537 | if (left->Depth != right->Depth) |
| 1538 | return left->Depth < right->Depth; |
| 1539 | |
| 1540 | if (left->CycleBound != right->CycleBound) |
| 1541 | return left->CycleBound > right->CycleBound; |
| 1542 | |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1543 | assert(left->NodeQueueId && right->NodeQueueId && |
| 1544 | "NodeQueueId cannot be zero"); |
| 1545 | return (left->NodeQueueId > right->NodeQueueId); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1546 | } |
| 1547 | |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1548 | bool |
| 1549 | BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1550 | if (SU->isTwoAddress) { |
| 1551 | unsigned Opc = SU->Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 1552 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1553 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 1554 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1555 | for (unsigned i = 0; i != NumOps; ++i) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1556 | if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1557 | SDNode *DU = SU->Node->getOperand(i).Val; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1558 | if (DU->getNodeId() != -1 && |
| 1559 | Op->OrigNode == &(*SUnits)[DU->getNodeId()]) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1560 | return true; |
| 1561 | } |
| 1562 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1563 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1564 | return false; |
| 1565 | } |
| 1566 | |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1567 | |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1568 | /// hasCopyToRegUse - Return true if SU has a value successor that is a |
| 1569 | /// CopyToReg node. |
| 1570 | static bool hasCopyToRegUse(SUnit *SU) { |
| 1571 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1572 | I != E; ++I) { |
| 1573 | if (I->isCtrl) continue; |
| 1574 | SUnit *SuccSU = I->Dep; |
| 1575 | if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg) |
| 1576 | return true; |
| 1577 | } |
| 1578 | return false; |
| 1579 | } |
| 1580 | |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1581 | /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's |
Dan Gohman | ea04520 | 2008-06-21 22:05:24 +0000 | [diff] [blame] | 1582 | /// physical register defs. |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1583 | static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU, |
| 1584 | const TargetInstrInfo *TII, |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1585 | const TargetRegisterInfo *TRI) { |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1586 | SDNode *N = SuccSU->Node; |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 1587 | unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs(); |
| 1588 | const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs(); |
Dan Gohman | ea04520 | 2008-06-21 22:05:24 +0000 | [diff] [blame] | 1589 | assert(ImpDefs && "Caller should check hasPhysRegDefs"); |
Chris Lattner | b0d06b4 | 2008-01-07 03:13:06 +0000 | [diff] [blame] | 1590 | const unsigned *SUImpDefs = |
| 1591 | TII->get(SU->Node->getTargetOpcode()).getImplicitDefs(); |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1592 | if (!SUImpDefs) |
| 1593 | return false; |
| 1594 | for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { |
Duncan Sands | 13237ac | 2008-06-06 12:08:01 +0000 | [diff] [blame] | 1595 | MVT VT = N->getValueType(i); |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1596 | if (VT == MVT::Flag || VT == MVT::Other) |
| 1597 | continue; |
| 1598 | unsigned Reg = ImpDefs[i - NumDefs]; |
| 1599 | for (;*SUImpDefs; ++SUImpDefs) { |
| 1600 | unsigned SUReg = *SUImpDefs; |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1601 | if (TRI->regsOverlap(Reg, SUReg)) |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1602 | return true; |
| 1603 | } |
| 1604 | } |
| 1605 | return false; |
| 1606 | } |
| 1607 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1608 | /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses |
| 1609 | /// it as a def&use operand. Add a pseudo control edge from it to the other |
| 1610 | /// node (if it won't create a cycle) so the two-address one will be scheduled |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1611 | /// first (lower in the schedule). If both nodes are two-address, favor the |
| 1612 | /// one that has a CopyToReg use (more likely to be a loop induction update). |
| 1613 | /// If both are two-address, but one is commutable while the other is not |
| 1614 | /// commutable, favor the one that's not commutable. |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1615 | void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1616 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { |
| 1617 | SUnit *SU = (SUnit *)&((*SUnits)[i]); |
| 1618 | if (!SU->isTwoAddress) |
| 1619 | continue; |
| 1620 | |
| 1621 | SDNode *Node = SU->Node; |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1622 | if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0) |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1623 | continue; |
| 1624 | |
| 1625 | unsigned Opc = Node->getTargetOpcode(); |
Chris Lattner | 03ad885 | 2008-01-07 07:27:27 +0000 | [diff] [blame] | 1626 | const TargetInstrDesc &TID = TII->get(Opc); |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1627 | unsigned NumRes = TID.getNumDefs(); |
Dan Gohman | 0340d1e | 2008-02-15 20:50:13 +0000 | [diff] [blame] | 1628 | unsigned NumOps = TID.getNumOperands() - NumRes; |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1629 | for (unsigned j = 0; j != NumOps; ++j) { |
Chris Lattner | fd2e338 | 2008-01-07 06:47:00 +0000 | [diff] [blame] | 1630 | if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1631 | SDNode *DU = SU->Node->getOperand(j).Val; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1632 | if (DU->getNodeId() == -1) |
Evan Cheng | 1bf16631 | 2007-11-09 01:27:11 +0000 | [diff] [blame] | 1633 | continue; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1634 | const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; |
Evan Cheng | f24d15f | 2006-11-06 21:33:46 +0000 | [diff] [blame] | 1635 | if (!DUSU) continue; |
Dan Gohman | 46520a2 | 2008-06-21 19:18:17 +0000 | [diff] [blame] | 1636 | for (SUnit::const_succ_iterator I = DUSU->Succs.begin(), |
| 1637 | E = DUSU->Succs.end(); I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1638 | if (I->isCtrl) continue; |
| 1639 | SUnit *SuccSU = I->Dep; |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1640 | if (SuccSU == SU) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1641 | continue; |
Evan Cheng | 2dbffa4 | 2007-11-06 08:44:59 +0000 | [diff] [blame] | 1642 | // Be conservative. Ignore if nodes aren't at roughly the same |
| 1643 | // depth and height. |
| 1644 | if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1) |
| 1645 | continue; |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1646 | if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode()) |
| 1647 | continue; |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1648 | // Don't constrain nodes with physical register defs if the |
Dan Gohman | cf8827a | 2008-01-29 12:43:50 +0000 | [diff] [blame] | 1649 | // predecessor can clobber them. |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1650 | if (SuccSU->hasPhysRegDefs) { |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1651 | if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) |
Evan Cheng | f989141 | 2007-12-20 09:25:31 +0000 | [diff] [blame] | 1652 | continue; |
| 1653 | } |
Evan Cheng | aa2d6ef | 2007-10-12 08:50:34 +0000 | [diff] [blame] | 1654 | // Don't constraint extract_subreg / insert_subreg these may be |
| 1655 | // coalesced away. We don't them close to their uses. |
| 1656 | unsigned SuccOpc = SuccSU->Node->getTargetOpcode(); |
| 1657 | if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || |
| 1658 | SuccOpc == TargetInstrInfo::INSERT_SUBREG) |
| 1659 | continue; |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1660 | if ((!canClobber(SuccSU, DUSU) || |
Evan Cheng | a5e595d | 2007-09-28 22:32:30 +0000 | [diff] [blame] | 1661 | (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1662 | (!SU->isCommutable && SuccSU->isCommutable)) && |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1663 | !scheduleDAG->IsReachable(SuccSU, SU)) { |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1664 | DOUT << "Adding an edge from SU # " << SU->NodeNum |
| 1665 | << " to SU #" << SuccSU->NodeNum << "\n"; |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1666 | scheduleDAG->AddPred(SU, SuccSU, true, true); |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1667 | } |
| 1668 | } |
| 1669 | } |
| 1670 | } |
| 1671 | } |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1672 | } |
| 1673 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1674 | /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1675 | /// Smaller number is the higher priority. |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1676 | unsigned BURegReductionPriorityQueue:: |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1677 | CalcNodeSethiUllmanNumber(const SUnit *SU) { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1678 | unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1679 | if (SethiUllmanNumber != 0) |
| 1680 | return SethiUllmanNumber; |
| 1681 | |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1682 | unsigned Extra = 0; |
| 1683 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1684 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1685 | if (I->isCtrl) continue; // ignore chain preds |
| 1686 | SUnit *PredSU = I->Dep; |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1687 | unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1688 | if (PredSethiUllman > SethiUllmanNumber) { |
| 1689 | SethiUllmanNumber = PredSethiUllman; |
| 1690 | Extra = 0; |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1691 | } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1692 | ++Extra; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1693 | } |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1694 | |
| 1695 | SethiUllmanNumber += Extra; |
| 1696 | |
| 1697 | if (SethiUllmanNumber == 0) |
| 1698 | SethiUllmanNumber = 1; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1699 | |
| 1700 | return SethiUllmanNumber; |
| 1701 | } |
| 1702 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1703 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
| 1704 | /// scheduling units. |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1705 | void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1706 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
| 1707 | |
| 1708 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1709 | CalcNodeSethiUllmanNumber(&(*SUnits)[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1710 | } |
| 1711 | |
Roman Levenstein | 30d0951 | 2008-03-27 09:44:37 +0000 | [diff] [blame] | 1712 | /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled |
Roman Levenstein | bc67450 | 2008-03-27 09:14:57 +0000 | [diff] [blame] | 1713 | /// predecessors of the successors of the SUnit SU. Stop when the provided |
| 1714 | /// limit is exceeded. |
Roman Levenstein | bc67450 | 2008-03-27 09:14:57 +0000 | [diff] [blame] | 1715 | static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU, |
| 1716 | unsigned Limit) { |
| 1717 | unsigned Sum = 0; |
| 1718 | for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 1719 | I != E; ++I) { |
| 1720 | SUnit *SuccSU = I->Dep; |
| 1721 | for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), |
| 1722 | EE = SuccSU->Preds.end(); II != EE; ++II) { |
| 1723 | SUnit *PredSU = II->Dep; |
Evan Cheng | 16d7207 | 2008-03-29 18:34:22 +0000 | [diff] [blame] | 1724 | if (!PredSU->isScheduled) |
| 1725 | if (++Sum > Limit) |
| 1726 | return Sum; |
Roman Levenstein | bc67450 | 2008-03-27 09:14:57 +0000 | [diff] [blame] | 1727 | } |
| 1728 | } |
| 1729 | return Sum; |
| 1730 | } |
| 1731 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1732 | |
| 1733 | // Top down |
| 1734 | bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1735 | unsigned LPriority = SPQ->getNodePriority(left); |
| 1736 | unsigned RPriority = SPQ->getNodePriority(right); |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1737 | bool LIsTarget = left->Node && left->Node->isTargetOpcode(); |
| 1738 | bool RIsTarget = right->Node && right->Node->isTargetOpcode(); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1739 | bool LIsFloater = LIsTarget && left->NumPreds == 0; |
| 1740 | bool RIsFloater = RIsTarget && right->NumPreds == 0; |
Roman Levenstein | bc67450 | 2008-03-27 09:14:57 +0000 | [diff] [blame] | 1741 | unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0; |
| 1742 | unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1743 | |
| 1744 | if (left->NumSuccs == 0 && right->NumSuccs != 0) |
| 1745 | return false; |
| 1746 | else if (left->NumSuccs != 0 && right->NumSuccs == 0) |
| 1747 | return true; |
| 1748 | |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1749 | if (LIsFloater) |
| 1750 | LBonus -= 2; |
| 1751 | if (RIsFloater) |
| 1752 | RBonus -= 2; |
| 1753 | if (left->NumSuccs == 1) |
| 1754 | LBonus += 2; |
| 1755 | if (right->NumSuccs == 1) |
| 1756 | RBonus += 2; |
| 1757 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1758 | if (LPriority+LBonus != RPriority+RBonus) |
| 1759 | return LPriority+LBonus < RPriority+RBonus; |
Anton Korobeynikov | 035eaac | 2008-02-20 11:10:28 +0000 | [diff] [blame] | 1760 | |
Evan Cheng | 73bdf04 | 2008-03-01 00:39:47 +0000 | [diff] [blame] | 1761 | if (left->Depth != right->Depth) |
| 1762 | return left->Depth < right->Depth; |
| 1763 | |
| 1764 | if (left->NumSuccsLeft != right->NumSuccsLeft) |
| 1765 | return left->NumSuccsLeft > right->NumSuccsLeft; |
| 1766 | |
| 1767 | if (left->CycleBound != right->CycleBound) |
| 1768 | return left->CycleBound > right->CycleBound; |
| 1769 | |
Roman Levenstein | 6b37114 | 2008-04-29 09:07:59 +0000 | [diff] [blame] | 1770 | assert(left->NodeQueueId && right->NodeQueueId && |
| 1771 | "NodeQueueId cannot be zero"); |
| 1772 | return (left->NodeQueueId > right->NodeQueueId); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1773 | } |
| 1774 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1775 | /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1776 | /// Smaller number is the higher priority. |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1777 | unsigned TDRegReductionPriorityQueue:: |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1778 | CalcNodeSethiUllmanNumber(const SUnit *SU) { |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1779 | unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1780 | if (SethiUllmanNumber != 0) |
| 1781 | return SethiUllmanNumber; |
| 1782 | |
Evan Cheng | 8e136a9 | 2007-09-26 21:36:17 +0000 | [diff] [blame] | 1783 | unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1784 | if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1785 | SethiUllmanNumber = 0xffff; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1786 | else if (SU->NumSuccsLeft == 0) |
| 1787 | // If SU does not have a use, i.e. it doesn't produce a value that would |
| 1788 | // be consumed (e.g. store), then it terminates a chain of computation. |
Chris Lattner | 296a83c | 2007-02-01 04:55:59 +0000 | [diff] [blame] | 1789 | // Give it a small SethiUllman number so it will be scheduled right before |
| 1790 | // its predecessors that it doesn't lengthen their live ranges. |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1791 | SethiUllmanNumber = 0; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1792 | else if (SU->NumPredsLeft == 0 && |
| 1793 | (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU))) |
Evan Cheng | 961bbd3 | 2007-01-08 23:50:38 +0000 | [diff] [blame] | 1794 | SethiUllmanNumber = 0xffff; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1795 | else { |
| 1796 | int Extra = 0; |
Chris Lattner | d86418a | 2006-08-17 00:09:56 +0000 | [diff] [blame] | 1797 | for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 1798 | I != E; ++I) { |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1799 | if (I->isCtrl) continue; // ignore chain preds |
| 1800 | SUnit *PredSU = I->Dep; |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1801 | unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1802 | if (PredSethiUllman > SethiUllmanNumber) { |
| 1803 | SethiUllmanNumber = PredSethiUllman; |
| 1804 | Extra = 0; |
Evan Cheng | 0effc3a | 2007-09-19 01:38:40 +0000 | [diff] [blame] | 1805 | } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) |
Evan Cheng | 5924bf7 | 2007-09-25 01:54:36 +0000 | [diff] [blame] | 1806 | ++Extra; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1807 | } |
| 1808 | |
| 1809 | SethiUllmanNumber += Extra; |
| 1810 | } |
| 1811 | |
| 1812 | return SethiUllmanNumber; |
| 1813 | } |
| 1814 | |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1815 | /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all |
| 1816 | /// scheduling units. |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1817 | void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() { |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1818 | SethiUllmanNumbers.assign(SUnits->size(), 0); |
| 1819 | |
| 1820 | for (unsigned i = 0, e = SUnits->size(); i != e; ++i) |
Evan Cheng | 6730f03 | 2007-01-08 23:55:53 +0000 | [diff] [blame] | 1821 | CalcNodeSethiUllmanNumber(&(*SUnits)[i]); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1822 | } |
| 1823 | |
| 1824 | //===----------------------------------------------------------------------===// |
| 1825 | // Public Constructor Functions |
| 1826 | //===----------------------------------------------------------------------===// |
| 1827 | |
Jim Laskey | 03593f7 | 2006-08-01 18:29:48 +0000 | [diff] [blame] | 1828 | llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, |
| 1829 | SelectionDAG *DAG, |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1830 | MachineBasicBlock *BB, |
| 1831 | bool Fast) { |
Evan Cheng | fd2c5dd | 2006-11-04 09:44:31 +0000 | [diff] [blame] | 1832 | const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo(); |
Dan Gohman | 3a4be0f | 2008-02-10 18:45:23 +0000 | [diff] [blame] | 1833 | const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo(); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1834 | |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1835 | BURegReductionPriorityQueue *priorityQueue = |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1836 | new BURegReductionPriorityQueue(TII, TRI, Fast); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1837 | |
| 1838 | ScheduleDAGRRList * scheduleDAG = |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1839 | new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, Fast,priorityQueue); |
Roman Levenstein | 7e71b4b | 2008-03-26 09:18:09 +0000 | [diff] [blame] | 1840 | priorityQueue->setScheduleDAG(scheduleDAG); |
| 1841 | return scheduleDAG; |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1842 | } |
| 1843 | |
Jim Laskey | 03593f7 | 2006-08-01 18:29:48 +0000 | [diff] [blame] | 1844 | llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, |
| 1845 | SelectionDAG *DAG, |
Evan Cheng | 2c97731 | 2008-07-01 18:05:03 +0000 | [diff] [blame^] | 1846 | MachineBasicBlock *BB, |
| 1847 | bool Fast) { |
| 1848 | return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, Fast, |
Dan Gohman | 4b49be1 | 2008-06-21 01:08:22 +0000 | [diff] [blame] | 1849 | new TDRegReductionPriorityQueue()); |
Evan Cheng | d38c22b | 2006-05-11 23:55:42 +0000 | [diff] [blame] | 1850 | } |
| 1851 | |