Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 1 | //===----- ScheduleDAGFast.cpp - Fast poor list scheduler -----------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | // |
| 10 | // This implements a fast scheduler. |
| 11 | // |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 14 | #include "llvm/CodeGen/SchedulerRegistry.h" |
Chandler Carruth | d04a8d4 | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 15 | #include "InstrEmitter.h" |
| 16 | #include "ScheduleDAGSDNodes.h" |
| 17 | #include "llvm/ADT/STLExtras.h" |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 18 | #include "llvm/ADT/SmallSet.h" |
| 19 | #include "llvm/ADT/Statistic.h" |
Chandler Carruth | d04a8d4 | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 20 | #include "llvm/CodeGen/SelectionDAGISel.h" |
Chandler Carruth | 0b8c9a8 | 2013-01-02 11:36:10 +0000 | [diff] [blame] | 21 | #include "llvm/IR/DataLayout.h" |
| 22 | #include "llvm/IR/InlineAsm.h" |
Chandler Carruth | d04a8d4 | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 23 | #include "llvm/Support/Debug.h" |
Torok Edwin | 7d696d8 | 2009-07-11 13:10:19 +0000 | [diff] [blame] | 24 | #include "llvm/Support/ErrorHandling.h" |
Chris Lattner | bbbfa99 | 2009-08-23 06:35:02 +0000 | [diff] [blame] | 25 | #include "llvm/Support/raw_ostream.h" |
Chandler Carruth | d04a8d4 | 2012-12-03 16:50:05 +0000 | [diff] [blame] | 26 | #include "llvm/Target/TargetInstrInfo.h" |
| 27 | #include "llvm/Target/TargetRegisterInfo.h" |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 28 | using namespace llvm; |
| 29 | |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 30 | #define DEBUG_TYPE "pre-RA-sched" |
| 31 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 32 | STATISTIC(NumUnfolds, "Number of nodes unfolded"); |
| 33 | STATISTIC(NumDups, "Number of duplicated nodes"); |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 34 | STATISTIC(NumPRCopies, "Number of physical copies"); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 35 | |
| 36 | static RegisterScheduler |
Dan Gohman | b8cab92 | 2008-10-14 20:25:08 +0000 | [diff] [blame] | 37 | fastDAGScheduler("fast", "Fast suboptimal list scheduling", |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 38 | createFastDAGScheduler); |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 39 | static RegisterScheduler |
| 40 | linearizeDAGScheduler("linearize", "Linearize DAG, no scheduling", |
| 41 | createDAGLinearizer); |
| 42 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 43 | |
| 44 | namespace { |
| 45 | /// FastPriorityQueue - A degenerate priority queue that considers |
| 46 | /// all nodes to have the same priority. |
| 47 | /// |
Nick Lewycky | 6726b6d | 2009-10-25 06:33:48 +0000 | [diff] [blame] | 48 | struct FastPriorityQueue { |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 49 | SmallVector<SUnit *, 16> Queue; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 50 | |
| 51 | bool empty() const { return Queue.empty(); } |
Andrew Trick | dbdca36 | 2012-03-07 05:21:32 +0000 | [diff] [blame] | 52 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 53 | void push(SUnit *U) { |
| 54 | Queue.push_back(U); |
| 55 | } |
| 56 | |
| 57 | SUnit *pop() { |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 58 | if (empty()) return nullptr; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 59 | SUnit *V = Queue.back(); |
| 60 | Queue.pop_back(); |
| 61 | return V; |
| 62 | } |
| 63 | }; |
| 64 | |
| 65 | //===----------------------------------------------------------------------===// |
| 66 | /// ScheduleDAGFast - The actual "fast" list scheduler implementation. |
| 67 | /// |
Nick Lewycky | 6726b6d | 2009-10-25 06:33:48 +0000 | [diff] [blame] | 68 | class ScheduleDAGFast : public ScheduleDAGSDNodes { |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 69 | private: |
| 70 | /// AvailableQueue - The priority queue to use for the available SUnits. |
| 71 | FastPriorityQueue AvailableQueue; |
| 72 | |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 73 | /// LiveRegDefs - A set of physical registers and their definition |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 74 | /// that are "live". These nodes must be scheduled before any other nodes that |
| 75 | /// modifies the registers can be scheduled. |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 76 | unsigned NumLiveRegs; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 77 | std::vector<SUnit*> LiveRegDefs; |
| 78 | std::vector<unsigned> LiveRegCycles; |
| 79 | |
| 80 | public: |
Dan Gohman | 79ce276 | 2009-01-15 19:20:50 +0000 | [diff] [blame] | 81 | ScheduleDAGFast(MachineFunction &mf) |
| 82 | : ScheduleDAGSDNodes(mf) {} |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 83 | |
Stephen Hines | 36b5688 | 2014-04-23 16:57:46 -0700 | [diff] [blame] | 84 | void Schedule() override; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 85 | |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 86 | /// AddPred - adds a predecessor edge to SUnit SU. |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 87 | /// This returns true if this is a new predecessor. |
Dan Gohman | ffa3912 | 2008-12-16 01:00:55 +0000 | [diff] [blame] | 88 | void AddPred(SUnit *SU, const SDep &D) { |
| 89 | SU->addPred(D); |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 90 | } |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 91 | |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 92 | /// RemovePred - removes a predecessor edge from SUnit SU. |
| 93 | /// This returns true if an edge was removed. |
Dan Gohman | ffa3912 | 2008-12-16 01:00:55 +0000 | [diff] [blame] | 94 | void RemovePred(SUnit *SU, const SDep &D) { |
| 95 | SU->removePred(D); |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 96 | } |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 97 | |
| 98 | private: |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 99 | void ReleasePred(SUnit *SU, SDep *PredEdge); |
Dan Gohman | 9e64bbb | 2009-02-10 23:27:53 +0000 | [diff] [blame] | 100 | void ReleasePredecessors(SUnit *SU, unsigned CurCycle); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 101 | void ScheduleNodeBottomUp(SUnit*, unsigned); |
| 102 | SUnit *CopyAndMoveSuccessors(SUnit*); |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 103 | void InsertCopiesAndMoveSuccs(SUnit*, unsigned, |
| 104 | const TargetRegisterClass*, |
| 105 | const TargetRegisterClass*, |
Craig Topper | a0ec3f9 | 2013-07-14 04:42:23 +0000 | [diff] [blame] | 106 | SmallVectorImpl<SUnit*>&); |
| 107 | bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 108 | void ListScheduleBottomUp(); |
Dan Gohman | 3f23744 | 2008-12-16 03:25:46 +0000 | [diff] [blame] | 109 | |
Andrew Trick | 953be89 | 2012-03-07 23:00:49 +0000 | [diff] [blame] | 110 | /// forceUnitLatencies - The fast scheduler doesn't care about real latencies. |
Stephen Hines | 36b5688 | 2014-04-23 16:57:46 -0700 | [diff] [blame] | 111 | bool forceUnitLatencies() const override { return true; } |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 112 | }; |
| 113 | } // end anonymous namespace |
| 114 | |
| 115 | |
| 116 | /// Schedule - Schedule the DAG using list scheduling. |
| 117 | void ScheduleDAGFast::Schedule() { |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 118 | DEBUG(dbgs() << "********** List Scheduling **********\n"); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 119 | |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 120 | NumLiveRegs = 0; |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 121 | LiveRegDefs.resize(TRI->getNumRegs(), nullptr); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 122 | LiveRegCycles.resize(TRI->getNumRegs(), 0); |
| 123 | |
Dan Gohman | c9a5b9e | 2008-12-23 18:36:58 +0000 | [diff] [blame] | 124 | // Build the scheduling graph. |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 125 | BuildSchedGraph(nullptr); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 126 | |
| 127 | DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) |
Dan Gohman | 3cc6243 | 2008-11-18 02:06:40 +0000 | [diff] [blame] | 128 | SUnits[su].dumpAll(this)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 129 | |
| 130 | // Execute the actual scheduling loop. |
| 131 | ListScheduleBottomUp(); |
| 132 | } |
| 133 | |
| 134 | //===----------------------------------------------------------------------===// |
| 135 | // Bottom-Up Scheduling |
| 136 | //===----------------------------------------------------------------------===// |
| 137 | |
| 138 | /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to |
| 139 | /// the AvailableQueue if the count reaches zero. Also update its cycle bound. |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 140 | void ScheduleDAGFast::ReleasePred(SUnit *SU, SDep *PredEdge) { |
| 141 | SUnit *PredSU = PredEdge->getSUnit(); |
Reid Kleckner | c277ab0 | 2009-09-30 20:15:38 +0000 | [diff] [blame] | 142 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 143 | #ifndef NDEBUG |
Reid Kleckner | c277ab0 | 2009-09-30 20:15:38 +0000 | [diff] [blame] | 144 | if (PredSU->NumSuccsLeft == 0) { |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 145 | dbgs() << "*** Scheduling failed! ***\n"; |
Dan Gohman | 3cc6243 | 2008-11-18 02:06:40 +0000 | [diff] [blame] | 146 | PredSU->dump(this); |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 147 | dbgs() << " has been released too many times!\n"; |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 148 | llvm_unreachable(nullptr); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 149 | } |
| 150 | #endif |
Reid Kleckner | c277ab0 | 2009-09-30 20:15:38 +0000 | [diff] [blame] | 151 | --PredSU->NumSuccsLeft; |
| 152 | |
Dan Gohman | 9e64bbb | 2009-02-10 23:27:53 +0000 | [diff] [blame] | 153 | // If all the node's successors are scheduled, this node is ready |
| 154 | // to be scheduled. Ignore the special EntrySU node. |
| 155 | if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) { |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 156 | PredSU->isAvailable = true; |
| 157 | AvailableQueue.push(PredSU); |
| 158 | } |
| 159 | } |
| 160 | |
Dan Gohman | 9e64bbb | 2009-02-10 23:27:53 +0000 | [diff] [blame] | 161 | void ScheduleDAGFast::ReleasePredecessors(SUnit *SU, unsigned CurCycle) { |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 162 | // Bottom up: release predecessors |
| 163 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 164 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 165 | ReleasePred(SU, &*I); |
| 166 | if (I->isAssignedRegDep()) { |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 167 | // This is a physical register dependency and it's impossible or |
Andrew Trick | dbdca36 | 2012-03-07 05:21:32 +0000 | [diff] [blame] | 168 | // expensive to copy the register. Make sure nothing that can |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 169 | // clobber the register is scheduled between the predecessor and |
| 170 | // this node. |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 171 | if (!LiveRegDefs[I->getReg()]) { |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 172 | ++NumLiveRegs; |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 173 | LiveRegDefs[I->getReg()] = I->getSUnit(); |
| 174 | LiveRegCycles[I->getReg()] = CurCycle; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 175 | } |
| 176 | } |
| 177 | } |
Dan Gohman | 9e64bbb | 2009-02-10 23:27:53 +0000 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending |
| 181 | /// count of its predecessors. If a predecessor pending count is zero, add it to |
| 182 | /// the Available queue. |
| 183 | void ScheduleDAGFast::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 184 | DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); |
Dan Gohman | 9e64bbb | 2009-02-10 23:27:53 +0000 | [diff] [blame] | 185 | DEBUG(SU->dump(this)); |
| 186 | |
| 187 | assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!"); |
| 188 | SU->setHeightToAtLeast(CurCycle); |
| 189 | Sequence.push_back(SU); |
| 190 | |
| 191 | ReleasePredecessors(SU, CurCycle); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 192 | |
| 193 | // Release all the implicit physical register defs that are live. |
| 194 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 195 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 196 | if (I->isAssignedRegDep()) { |
Dan Gohman | 3f23744 | 2008-12-16 03:25:46 +0000 | [diff] [blame] | 197 | if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) { |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 198 | assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 199 | assert(LiveRegDefs[I->getReg()] == SU && |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 200 | "Physical register dependency violated?"); |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 201 | --NumLiveRegs; |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 202 | LiveRegDefs[I->getReg()] = nullptr; |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 203 | LiveRegCycles[I->getReg()] = 0; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 204 | } |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | SU->isScheduled = true; |
| 209 | } |
| 210 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 211 | /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled |
| 212 | /// successors to the newly created node. |
| 213 | SUnit *ScheduleDAGFast::CopyAndMoveSuccessors(SUnit *SU) { |
Chris Lattner | 29d8f0c | 2010-12-23 17:24:32 +0000 | [diff] [blame] | 214 | if (SU->getNode()->getGluedNode()) |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 215 | return nullptr; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 216 | |
Dan Gohman | 550f5af | 2008-11-13 21:36:12 +0000 | [diff] [blame] | 217 | SDNode *N = SU->getNode(); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 218 | if (!N) |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 219 | return nullptr; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 220 | |
| 221 | SUnit *NewSU; |
| 222 | bool TryUnfold = false; |
| 223 | for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 224 | EVT VT = N->getValueType(i); |
Chris Lattner | f1b4eaf | 2010-12-21 02:38:05 +0000 | [diff] [blame] | 225 | if (VT == MVT::Glue) |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 226 | return nullptr; |
Owen Anderson | 825b72b | 2009-08-11 20:47:22 +0000 | [diff] [blame] | 227 | else if (VT == MVT::Other) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 228 | TryUnfold = true; |
| 229 | } |
| 230 | for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { |
| 231 | const SDValue &Op = N->getOperand(i); |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 232 | EVT VT = Op.getNode()->getValueType(Op.getResNo()); |
Chris Lattner | f1b4eaf | 2010-12-21 02:38:05 +0000 | [diff] [blame] | 233 | if (VT == MVT::Glue) |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 234 | return nullptr; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 235 | } |
| 236 | |
| 237 | if (TryUnfold) { |
| 238 | SmallVector<SDNode*, 2> NewNodes; |
Dan Gohman | a23b3b8 | 2008-11-13 21:21:28 +0000 | [diff] [blame] | 239 | if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 240 | return nullptr; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 241 | |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 242 | DEBUG(dbgs() << "Unfolding SU # " << SU->NodeNum << "\n"); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 243 | assert(NewNodes.size() == 2 && "Expected a load folding node!"); |
| 244 | |
| 245 | N = NewNodes[1]; |
| 246 | SDNode *LoadNode = NewNodes[0]; |
| 247 | unsigned NumVals = N->getNumValues(); |
Dan Gohman | 550f5af | 2008-11-13 21:36:12 +0000 | [diff] [blame] | 248 | unsigned OldNumVals = SU->getNode()->getNumValues(); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 249 | for (unsigned i = 0; i != NumVals; ++i) |
Dan Gohman | 550f5af | 2008-11-13 21:36:12 +0000 | [diff] [blame] | 250 | DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); |
| 251 | DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), |
Dan Gohman | a23b3b8 | 2008-11-13 21:21:28 +0000 | [diff] [blame] | 252 | SDValue(LoadNode, 1)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 253 | |
Andrew Trick | 953be89 | 2012-03-07 23:00:49 +0000 | [diff] [blame] | 254 | SUnit *NewSU = newSUnit(N); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 255 | assert(N->getNodeId() == -1 && "Node already inserted!"); |
| 256 | N->setNodeId(NewSU->NodeNum); |
Andrew Trick | dbdca36 | 2012-03-07 05:21:32 +0000 | [diff] [blame] | 257 | |
Evan Cheng | e837dea | 2011-06-28 19:10:37 +0000 | [diff] [blame] | 258 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
| 259 | for (unsigned i = 0; i != MCID.getNumOperands(); ++i) { |
| 260 | if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) { |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 261 | NewSU->isTwoAddress = true; |
| 262 | break; |
| 263 | } |
| 264 | } |
Evan Cheng | e837dea | 2011-06-28 19:10:37 +0000 | [diff] [blame] | 265 | if (MCID.isCommutable()) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 266 | NewSU->isCommutable = true; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 267 | |
| 268 | // LoadNode may already exist. This can happen when there is another |
| 269 | // load from the same location and producing the same type of value |
| 270 | // but it has different alignment or volatileness. |
| 271 | bool isNewLoad = true; |
| 272 | SUnit *LoadSU; |
| 273 | if (LoadNode->getNodeId() != -1) { |
| 274 | LoadSU = &SUnits[LoadNode->getNodeId()]; |
| 275 | isNewLoad = false; |
| 276 | } else { |
Andrew Trick | 953be89 | 2012-03-07 23:00:49 +0000 | [diff] [blame] | 277 | LoadSU = newSUnit(LoadNode); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 278 | LoadNode->setNodeId(LoadSU->NodeNum); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 279 | } |
| 280 | |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 281 | SDep ChainPred; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 282 | SmallVector<SDep, 4> ChainSuccs; |
| 283 | SmallVector<SDep, 4> LoadPreds; |
| 284 | SmallVector<SDep, 4> NodePreds; |
| 285 | SmallVector<SDep, 4> NodeSuccs; |
| 286 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 287 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 288 | if (I->isCtrl()) |
| 289 | ChainPred = *I; |
| 290 | else if (I->getSUnit()->getNode() && |
| 291 | I->getSUnit()->getNode()->isOperandOf(LoadNode)) |
| 292 | LoadPreds.push_back(*I); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 293 | else |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 294 | NodePreds.push_back(*I); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 295 | } |
| 296 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 297 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 298 | if (I->isCtrl()) |
| 299 | ChainSuccs.push_back(*I); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 300 | else |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 301 | NodeSuccs.push_back(*I); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 302 | } |
| 303 | |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 304 | if (ChainPred.getSUnit()) { |
| 305 | RemovePred(SU, ChainPred); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 306 | if (isNewLoad) |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 307 | AddPred(LoadSU, ChainPred); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 308 | } |
| 309 | for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 310 | const SDep &Pred = LoadPreds[i]; |
| 311 | RemovePred(SU, Pred); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 312 | if (isNewLoad) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 313 | AddPred(LoadSU, Pred); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 314 | } |
| 315 | } |
| 316 | for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 317 | const SDep &Pred = NodePreds[i]; |
| 318 | RemovePred(SU, Pred); |
| 319 | AddPred(NewSU, Pred); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 320 | } |
| 321 | for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 322 | SDep D = NodeSuccs[i]; |
| 323 | SUnit *SuccDep = D.getSUnit(); |
| 324 | D.setSUnit(SU); |
| 325 | RemovePred(SuccDep, D); |
| 326 | D.setSUnit(NewSU); |
| 327 | AddPred(SuccDep, D); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 328 | } |
| 329 | for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 330 | SDep D = ChainSuccs[i]; |
| 331 | SUnit *SuccDep = D.getSUnit(); |
| 332 | D.setSUnit(SU); |
| 333 | RemovePred(SuccDep, D); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 334 | if (isNewLoad) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 335 | D.setSUnit(LoadSU); |
| 336 | AddPred(SuccDep, D); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 337 | } |
Andrew Trick | dbdca36 | 2012-03-07 05:21:32 +0000 | [diff] [blame] | 338 | } |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 339 | if (isNewLoad) { |
Andrew Trick | a78d322 | 2012-11-06 03:13:46 +0000 | [diff] [blame] | 340 | SDep D(LoadSU, SDep::Barrier); |
| 341 | D.setLatency(LoadSU->Latency); |
| 342 | AddPred(NewSU, D); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 343 | } |
| 344 | |
| 345 | ++NumUnfolds; |
| 346 | |
| 347 | if (NewSU->NumSuccsLeft == 0) { |
| 348 | NewSU->isAvailable = true; |
| 349 | return NewSU; |
| 350 | } |
| 351 | SU = NewSU; |
| 352 | } |
| 353 | |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 354 | DEBUG(dbgs() << "Duplicating SU # " << SU->NodeNum << "\n"); |
Dan Gohman | cdb260d | 2008-11-19 23:39:02 +0000 | [diff] [blame] | 355 | NewSU = Clone(SU); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 356 | |
| 357 | // New SUnit has the exact same predecessors. |
| 358 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 359 | I != E; ++I) |
Dan Gohman | 3f23744 | 2008-12-16 03:25:46 +0000 | [diff] [blame] | 360 | if (!I->isArtificial()) |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 361 | AddPred(NewSU, *I); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 362 | |
| 363 | // Only copy scheduled successors. Cut them from old node's successor |
| 364 | // list and move them over. |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 365 | SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 366 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 367 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 368 | if (I->isArtificial()) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 369 | continue; |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 370 | SUnit *SuccSU = I->getSUnit(); |
| 371 | if (SuccSU->isScheduled) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 372 | SDep D = *I; |
| 373 | D.setSUnit(NewSU); |
| 374 | AddPred(SuccSU, D); |
| 375 | D.setSUnit(SU); |
| 376 | DelDeps.push_back(std::make_pair(SuccSU, D)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 377 | } |
| 378 | } |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 379 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 380 | RemovePred(DelDeps[i].first, DelDeps[i].second); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 381 | |
| 382 | ++NumDups; |
| 383 | return NewSU; |
| 384 | } |
| 385 | |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 386 | /// InsertCopiesAndMoveSuccs - Insert register copies and move all |
| 387 | /// scheduled successors of the given SUnit to the last copy. |
| 388 | void ScheduleDAGFast::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 389 | const TargetRegisterClass *DestRC, |
| 390 | const TargetRegisterClass *SrcRC, |
Craig Topper | a0ec3f9 | 2013-07-14 04:42:23 +0000 | [diff] [blame] | 391 | SmallVectorImpl<SUnit*> &Copies) { |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 392 | SUnit *CopyFromSU = newSUnit(static_cast<SDNode *>(nullptr)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 393 | CopyFromSU->CopySrcRC = SrcRC; |
| 394 | CopyFromSU->CopyDstRC = DestRC; |
| 395 | |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 396 | SUnit *CopyToSU = newSUnit(static_cast<SDNode *>(nullptr)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 397 | CopyToSU->CopySrcRC = DestRC; |
| 398 | CopyToSU->CopyDstRC = SrcRC; |
| 399 | |
| 400 | // Only copy scheduled successors. Cut them from old node's successor |
| 401 | // list and move them over. |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 402 | SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 403 | for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); |
| 404 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 405 | if (I->isArtificial()) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 406 | continue; |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 407 | SUnit *SuccSU = I->getSUnit(); |
| 408 | if (SuccSU->isScheduled) { |
| 409 | SDep D = *I; |
| 410 | D.setSUnit(CopyToSU); |
| 411 | AddPred(SuccSU, D); |
| 412 | DelDeps.push_back(std::make_pair(SuccSU, *I)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 413 | } |
| 414 | } |
| 415 | for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 416 | RemovePred(DelDeps[i].first, DelDeps[i].second); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 417 | } |
Andrew Trick | a78d322 | 2012-11-06 03:13:46 +0000 | [diff] [blame] | 418 | SDep FromDep(SU, SDep::Data, Reg); |
| 419 | FromDep.setLatency(SU->Latency); |
| 420 | AddPred(CopyFromSU, FromDep); |
| 421 | SDep ToDep(CopyFromSU, SDep::Data, 0); |
| 422 | ToDep.setLatency(CopyFromSU->Latency); |
| 423 | AddPred(CopyToSU, ToDep); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 424 | |
| 425 | Copies.push_back(CopyFromSU); |
| 426 | Copies.push_back(CopyToSU); |
| 427 | |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 428 | ++NumPRCopies; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 429 | } |
| 430 | |
| 431 | /// getPhysicalRegisterVT - Returns the ValueType of the physical register |
| 432 | /// definition of the specified node. |
| 433 | /// FIXME: Move to SelectionDAG? |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 434 | static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 435 | const TargetInstrInfo *TII) { |
Evan Cheng | e837dea | 2011-06-28 19:10:37 +0000 | [diff] [blame] | 436 | const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); |
| 437 | assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!"); |
| 438 | unsigned NumRes = MCID.getNumDefs(); |
Craig Topper | fac2598 | 2012-03-08 08:22:45 +0000 | [diff] [blame] | 439 | for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) { |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 440 | if (Reg == *ImpDef) |
| 441 | break; |
| 442 | ++NumRes; |
| 443 | } |
| 444 | return N->getValueType(NumRes); |
| 445 | } |
| 446 | |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 447 | /// CheckForLiveRegDef - Return true and update live register vector if the |
| 448 | /// specified register def of the specified SUnit clobbers any "live" registers. |
| 449 | static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg, |
| 450 | std::vector<SUnit*> &LiveRegDefs, |
| 451 | SmallSet<unsigned, 4> &RegAdded, |
Craig Topper | a0ec3f9 | 2013-07-14 04:42:23 +0000 | [diff] [blame] | 452 | SmallVectorImpl<unsigned> &LRegs, |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 453 | const TargetRegisterInfo *TRI) { |
| 454 | bool Added = false; |
Jakob Stoklund Olesen | 8c70ea4 | 2012-06-01 22:38:17 +0000 | [diff] [blame] | 455 | for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI) { |
| 456 | if (LiveRegDefs[*AI] && LiveRegDefs[*AI] != SU) { |
| 457 | if (RegAdded.insert(*AI)) { |
| 458 | LRegs.push_back(*AI); |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 459 | Added = true; |
| 460 | } |
| 461 | } |
Jakob Stoklund Olesen | 8c70ea4 | 2012-06-01 22:38:17 +0000 | [diff] [blame] | 462 | } |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 463 | return Added; |
| 464 | } |
| 465 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 466 | /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay |
| 467 | /// scheduling of the given node to satisfy live physical register dependencies. |
| 468 | /// If the specific node is the last one that's available to schedule, do |
| 469 | /// whatever is necessary (i.e. backtracking or cloning) to make it possible. |
| 470 | bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU, |
Craig Topper | a0ec3f9 | 2013-07-14 04:42:23 +0000 | [diff] [blame] | 471 | SmallVectorImpl<unsigned> &LRegs){ |
Dan Gohman | 086ec99 | 2008-09-23 18:50:48 +0000 | [diff] [blame] | 472 | if (NumLiveRegs == 0) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 473 | return false; |
| 474 | |
| 475 | SmallSet<unsigned, 4> RegAdded; |
| 476 | // If this node would clobber any "live" register, then it's not ready. |
| 477 | for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); |
| 478 | I != E; ++I) { |
Dan Gohman | 54e4c36 | 2008-12-09 22:54:47 +0000 | [diff] [blame] | 479 | if (I->isAssignedRegDep()) { |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 480 | CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs, |
| 481 | RegAdded, LRegs, TRI); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 482 | } |
| 483 | } |
| 484 | |
Chris Lattner | 29d8f0c | 2010-12-23 17:24:32 +0000 | [diff] [blame] | 485 | for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) { |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 486 | if (Node->getOpcode() == ISD::INLINEASM) { |
| 487 | // Inline asm can clobber physical defs. |
| 488 | unsigned NumOps = Node->getNumOperands(); |
Chris Lattner | f1b4eaf | 2010-12-21 02:38:05 +0000 | [diff] [blame] | 489 | if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) |
Chris Lattner | 29d8f0c | 2010-12-23 17:24:32 +0000 | [diff] [blame] | 490 | --NumOps; // Ignore the glue operand. |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 491 | |
| 492 | for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { |
| 493 | unsigned Flags = |
| 494 | cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); |
| 495 | unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); |
| 496 | |
| 497 | ++i; // Skip the ID value. |
| 498 | if (InlineAsm::isRegDefKind(Flags) || |
Jakob Stoklund Olesen | f792fa9 | 2011-06-27 04:08:33 +0000 | [diff] [blame] | 499 | InlineAsm::isRegDefEarlyClobberKind(Flags) || |
| 500 | InlineAsm::isClobberKind(Flags)) { |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 501 | // Check for def of register or earlyclobber register. |
| 502 | for (; NumVals; --NumVals, ++i) { |
| 503 | unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); |
| 504 | if (TargetRegisterInfo::isPhysicalRegister(Reg)) |
| 505 | CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI); |
| 506 | } |
| 507 | } else |
| 508 | i += NumVals; |
| 509 | } |
| 510 | continue; |
| 511 | } |
Dan Gohman | d23e0f8 | 2008-11-13 23:24:17 +0000 | [diff] [blame] | 512 | if (!Node->isMachineOpcode()) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 513 | continue; |
Evan Cheng | e837dea | 2011-06-28 19:10:37 +0000 | [diff] [blame] | 514 | const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode()); |
| 515 | if (!MCID.ImplicitDefs) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 516 | continue; |
Craig Topper | fac2598 | 2012-03-08 08:22:45 +0000 | [diff] [blame] | 517 | for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) { |
Dale Johannesen | 6cf64a6 | 2010-08-17 22:17:24 +0000 | [diff] [blame] | 518 | CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 519 | } |
| 520 | } |
| 521 | return !LRegs.empty(); |
| 522 | } |
| 523 | |
| 524 | |
| 525 | /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up |
| 526 | /// schedulers. |
| 527 | void ScheduleDAGFast::ListScheduleBottomUp() { |
| 528 | unsigned CurCycle = 0; |
Dan Gohman | 9e64bbb | 2009-02-10 23:27:53 +0000 | [diff] [blame] | 529 | |
| 530 | // Release any predecessors of the special Exit node. |
| 531 | ReleasePredecessors(&ExitSU, CurCycle); |
| 532 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 533 | // Add root to Available queue. |
| 534 | if (!SUnits.empty()) { |
Dan Gohman | a23b3b8 | 2008-11-13 21:21:28 +0000 | [diff] [blame] | 535 | SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 536 | assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); |
| 537 | RootSU->isAvailable = true; |
| 538 | AvailableQueue.push(RootSU); |
| 539 | } |
| 540 | |
| 541 | // While Available queue is not empty, grab the node with the highest |
| 542 | // priority. If it is not ready put it back. Schedule the node. |
| 543 | SmallVector<SUnit*, 4> NotReady; |
| 544 | DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; |
| 545 | Sequence.reserve(SUnits.size()); |
| 546 | while (!AvailableQueue.empty()) { |
| 547 | bool Delayed = false; |
| 548 | LRegsMap.clear(); |
| 549 | SUnit *CurSU = AvailableQueue.pop(); |
| 550 | while (CurSU) { |
Dan Gohman | e93483d | 2008-11-17 19:52:36 +0000 | [diff] [blame] | 551 | SmallVector<unsigned, 4> LRegs; |
| 552 | if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) |
| 553 | break; |
| 554 | Delayed = true; |
| 555 | LRegsMap.insert(std::make_pair(CurSU, LRegs)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 556 | |
| 557 | CurSU->isPending = true; // This SU is not in AvailableQueue right now. |
| 558 | NotReady.push_back(CurSU); |
| 559 | CurSU = AvailableQueue.pop(); |
| 560 | } |
| 561 | |
| 562 | // All candidates are delayed due to live physical reg dependencies. |
| 563 | // Try code duplication or inserting cross class copies |
| 564 | // to resolve it. |
| 565 | if (Delayed && !CurSU) { |
| 566 | if (!CurSU) { |
| 567 | // Try duplicating the nodes that produces these |
| 568 | // "expensive to copy" values to break the dependency. In case even |
| 569 | // that doesn't work, insert cross class copies. |
| 570 | SUnit *TrySU = NotReady[0]; |
Craig Topper | a0ec3f9 | 2013-07-14 04:42:23 +0000 | [diff] [blame] | 571 | SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU]; |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 572 | assert(LRegs.size() == 1 && "Can't handle this yet!"); |
| 573 | unsigned Reg = LRegs[0]; |
| 574 | SUnit *LRDef = LiveRegDefs[Reg]; |
Owen Anderson | e50ed30 | 2009-08-10 22:56:29 +0000 | [diff] [blame] | 575 | EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 576 | const TargetRegisterClass *RC = |
Rafael Espindola | d31f972 | 2010-06-29 14:02:34 +0000 | [diff] [blame] | 577 | TRI->getMinimalPhysRegClass(Reg, VT); |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 578 | const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); |
| 579 | |
Evan Cheng | b0519e1 | 2011-03-10 00:16:32 +0000 | [diff] [blame] | 580 | // If cross copy register class is the same as RC, then it must be |
| 581 | // possible copy the value directly. Do not try duplicate the def. |
| 582 | // If cross copy register class is not the same as RC, then it's |
| 583 | // possible to copy the value but it require cross register class copies |
| 584 | // and it is expensive. |
| 585 | // If cross copy register class is null, then it's not possible to copy |
| 586 | // the value at all. |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 587 | SUnit *NewDef = nullptr; |
Evan Cheng | b0519e1 | 2011-03-10 00:16:32 +0000 | [diff] [blame] | 588 | if (DestRC != RC) { |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 589 | NewDef = CopyAndMoveSuccessors(LRDef); |
Evan Cheng | b0519e1 | 2011-03-10 00:16:32 +0000 | [diff] [blame] | 590 | if (!DestRC && !NewDef) |
| 591 | report_fatal_error("Can't handle live physical " |
| 592 | "register dependency!"); |
| 593 | } |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 594 | if (!NewDef) { |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 595 | // Issue copies, these can be expensive cross register class copies. |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 596 | SmallVector<SUnit*, 2> Copies; |
Evan Cheng | c29a56d | 2009-01-12 03:19:55 +0000 | [diff] [blame] | 597 | InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 598 | DEBUG(dbgs() << "Adding an edge from SU # " << TrySU->NodeNum |
Chris Lattner | bbbfa99 | 2009-08-23 06:35:02 +0000 | [diff] [blame] | 599 | << " to SU #" << Copies.front()->NodeNum << "\n"); |
Andrew Trick | a78d322 | 2012-11-06 03:13:46 +0000 | [diff] [blame] | 600 | AddPred(TrySU, SDep(Copies.front(), SDep::Artificial)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 601 | NewDef = Copies.back(); |
| 602 | } |
| 603 | |
David Greene | 33db62c | 2010-01-05 01:25:09 +0000 | [diff] [blame] | 604 | DEBUG(dbgs() << "Adding an edge from SU # " << NewDef->NodeNum |
Chris Lattner | bbbfa99 | 2009-08-23 06:35:02 +0000 | [diff] [blame] | 605 | << " to SU #" << TrySU->NodeNum << "\n"); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 606 | LiveRegDefs[Reg] = NewDef; |
Andrew Trick | a78d322 | 2012-11-06 03:13:46 +0000 | [diff] [blame] | 607 | AddPred(NewDef, SDep(TrySU, SDep::Artificial)); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 608 | TrySU->isAvailable = false; |
| 609 | CurSU = NewDef; |
| 610 | } |
| 611 | |
| 612 | if (!CurSU) { |
Torok Edwin | c23197a | 2009-07-14 16:55:14 +0000 | [diff] [blame] | 613 | llvm_unreachable("Unable to resolve live physical register dependencies!"); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 614 | } |
| 615 | } |
| 616 | |
| 617 | // Add the nodes that aren't ready back onto the available list. |
| 618 | for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { |
| 619 | NotReady[i]->isPending = false; |
| 620 | // May no longer be available due to backtracking. |
| 621 | if (NotReady[i]->isAvailable) |
| 622 | AvailableQueue.push(NotReady[i]); |
| 623 | } |
| 624 | NotReady.clear(); |
| 625 | |
Dan Gohman | 47d1a21 | 2008-11-21 00:10:42 +0000 | [diff] [blame] | 626 | if (CurSU) |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 627 | ScheduleNodeBottomUp(CurSU, CurCycle); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 628 | ++CurCycle; |
| 629 | } |
| 630 | |
Dan Gohman | 937d2d8 | 2009-09-28 16:09:41 +0000 | [diff] [blame] | 631 | // Reverse the order since it is bottom up. |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 632 | std::reverse(Sequence.begin(), Sequence.end()); |
Dan Gohman | 937d2d8 | 2009-09-28 16:09:41 +0000 | [diff] [blame] | 633 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 634 | #ifndef NDEBUG |
Andrew Trick | 4c72720 | 2012-03-07 05:21:36 +0000 | [diff] [blame] | 635 | VerifyScheduledSequence(/*isBottomUp=*/true); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 636 | #endif |
| 637 | } |
| 638 | |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 639 | |
Benjamin Kramer | 63a4c246 | 2012-10-20 12:53:26 +0000 | [diff] [blame] | 640 | namespace { |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 641 | //===----------------------------------------------------------------------===// |
| 642 | // ScheduleDAGLinearize - No scheduling scheduler, it simply linearize the |
| 643 | // DAG in topological order. |
| 644 | // IMPORTANT: this may not work for targets with phyreg dependency. |
| 645 | // |
| 646 | class ScheduleDAGLinearize : public ScheduleDAGSDNodes { |
| 647 | public: |
| 648 | ScheduleDAGLinearize(MachineFunction &mf) : ScheduleDAGSDNodes(mf) {} |
| 649 | |
Stephen Hines | 36b5688 | 2014-04-23 16:57:46 -0700 | [diff] [blame] | 650 | void Schedule() override; |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 651 | |
Stephen Hines | 36b5688 | 2014-04-23 16:57:46 -0700 | [diff] [blame] | 652 | MachineBasicBlock * |
| 653 | EmitSchedule(MachineBasicBlock::iterator &InsertPos) override; |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 654 | |
| 655 | private: |
| 656 | std::vector<SDNode*> Sequence; |
| 657 | DenseMap<SDNode*, SDNode*> GluedMap; // Cache glue to its user |
| 658 | |
| 659 | void ScheduleNode(SDNode *N); |
| 660 | }; |
Benjamin Kramer | 63a4c246 | 2012-10-20 12:53:26 +0000 | [diff] [blame] | 661 | } // end anonymous namespace |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 662 | |
| 663 | void ScheduleDAGLinearize::ScheduleNode(SDNode *N) { |
| 664 | if (N->getNodeId() != 0) |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 665 | llvm_unreachable(nullptr); |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 666 | |
| 667 | if (!N->isMachineOpcode() && |
| 668 | (N->getOpcode() == ISD::EntryToken || isPassiveNode(N))) |
| 669 | // These nodes do not need to be translated into MIs. |
| 670 | return; |
| 671 | |
| 672 | DEBUG(dbgs() << "\n*** Scheduling: "); |
| 673 | DEBUG(N->dump(DAG)); |
| 674 | Sequence.push_back(N); |
| 675 | |
| 676 | unsigned NumOps = N->getNumOperands(); |
| 677 | if (unsigned NumLeft = NumOps) { |
Stephen Hines | dce4a40 | 2014-05-29 02:49:00 -0700 | [diff] [blame^] | 678 | SDNode *GluedOpN = nullptr; |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 679 | do { |
| 680 | const SDValue &Op = N->getOperand(NumLeft-1); |
| 681 | SDNode *OpN = Op.getNode(); |
| 682 | |
| 683 | if (NumLeft == NumOps && Op.getValueType() == MVT::Glue) { |
| 684 | // Schedule glue operand right above N. |
| 685 | GluedOpN = OpN; |
| 686 | assert(OpN->getNodeId() != 0 && "Glue operand not ready?"); |
| 687 | OpN->setNodeId(0); |
| 688 | ScheduleNode(OpN); |
| 689 | continue; |
| 690 | } |
| 691 | |
| 692 | if (OpN == GluedOpN) |
| 693 | // Glue operand is already scheduled. |
| 694 | continue; |
| 695 | |
| 696 | DenseMap<SDNode*, SDNode*>::iterator DI = GluedMap.find(OpN); |
| 697 | if (DI != GluedMap.end() && DI->second != N) |
| 698 | // Users of glues are counted against the glued users. |
| 699 | OpN = DI->second; |
| 700 | |
| 701 | unsigned Degree = OpN->getNodeId(); |
| 702 | assert(Degree > 0 && "Predecessor over-released!"); |
| 703 | OpN->setNodeId(--Degree); |
| 704 | if (Degree == 0) |
| 705 | ScheduleNode(OpN); |
| 706 | } while (--NumLeft); |
| 707 | } |
| 708 | } |
| 709 | |
| 710 | /// findGluedUser - Find the representative use of a glue value by walking |
| 711 | /// the use chain. |
| 712 | static SDNode *findGluedUser(SDNode *N) { |
| 713 | while (SDNode *Glued = N->getGluedUser()) |
| 714 | N = Glued; |
| 715 | return N; |
| 716 | } |
| 717 | |
| 718 | void ScheduleDAGLinearize::Schedule() { |
| 719 | DEBUG(dbgs() << "********** DAG Linearization **********\n"); |
| 720 | |
| 721 | SmallVector<SDNode*, 8> Glues; |
| 722 | unsigned DAGSize = 0; |
| 723 | for (SelectionDAG::allnodes_iterator I = DAG->allnodes_begin(), |
| 724 | E = DAG->allnodes_end(); I != E; ++I) { |
| 725 | SDNode *N = I; |
| 726 | |
| 727 | // Use node id to record degree. |
| 728 | unsigned Degree = N->use_size(); |
| 729 | N->setNodeId(Degree); |
| 730 | unsigned NumVals = N->getNumValues(); |
| 731 | if (NumVals && N->getValueType(NumVals-1) == MVT::Glue && |
| 732 | N->hasAnyUseOfValue(NumVals-1)) { |
| 733 | SDNode *User = findGluedUser(N); |
| 734 | if (User) { |
| 735 | Glues.push_back(N); |
| 736 | GluedMap.insert(std::make_pair(N, User)); |
| 737 | } |
| 738 | } |
| 739 | |
| 740 | if (N->isMachineOpcode() || |
| 741 | (N->getOpcode() != ISD::EntryToken && !isPassiveNode(N))) |
| 742 | ++DAGSize; |
| 743 | } |
| 744 | |
| 745 | for (unsigned i = 0, e = Glues.size(); i != e; ++i) { |
| 746 | SDNode *Glue = Glues[i]; |
| 747 | SDNode *GUser = GluedMap[Glue]; |
| 748 | unsigned Degree = Glue->getNodeId(); |
| 749 | unsigned UDegree = GUser->getNodeId(); |
| 750 | |
| 751 | // Glue user must be scheduled together with the glue operand. So other |
| 752 | // users of the glue operand must be treated as its users. |
| 753 | SDNode *ImmGUser = Glue->getGluedUser(); |
| 754 | for (SDNode::use_iterator ui = Glue->use_begin(), ue = Glue->use_end(); |
| 755 | ui != ue; ++ui) |
| 756 | if (*ui == ImmGUser) |
| 757 | --Degree; |
| 758 | GUser->setNodeId(UDegree + Degree); |
| 759 | Glue->setNodeId(1); |
| 760 | } |
| 761 | |
| 762 | Sequence.reserve(DAGSize); |
| 763 | ScheduleNode(DAG->getRoot().getNode()); |
| 764 | } |
| 765 | |
| 766 | MachineBasicBlock* |
| 767 | ScheduleDAGLinearize::EmitSchedule(MachineBasicBlock::iterator &InsertPos) { |
| 768 | InstrEmitter Emitter(BB, InsertPos); |
| 769 | DenseMap<SDValue, unsigned> VRBaseMap; |
| 770 | |
| 771 | DEBUG({ |
| 772 | dbgs() << "\n*** Final schedule ***\n"; |
| 773 | }); |
| 774 | |
| 775 | // FIXME: Handle dbg_values. |
| 776 | unsigned NumNodes = Sequence.size(); |
| 777 | for (unsigned i = 0; i != NumNodes; ++i) { |
| 778 | SDNode *N = Sequence[NumNodes-i-1]; |
| 779 | DEBUG(N->dump(DAG)); |
| 780 | Emitter.EmitNode(N, false, false, VRBaseMap); |
| 781 | } |
| 782 | |
| 783 | DEBUG(dbgs() << '\n'); |
| 784 | |
| 785 | InsertPos = Emitter.getInsertPos(); |
| 786 | return Emitter.getBlock(); |
| 787 | } |
| 788 | |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 789 | //===----------------------------------------------------------------------===// |
| 790 | // Public Constructor Functions |
| 791 | //===----------------------------------------------------------------------===// |
| 792 | |
Dan Gohman | 47ac0f0 | 2009-02-11 04:27:20 +0000 | [diff] [blame] | 793 | llvm::ScheduleDAGSDNodes * |
Bill Wendling | 98a366d | 2009-04-29 23:29:43 +0000 | [diff] [blame] | 794 | llvm::createFastDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) { |
Dan Gohman | 79ce276 | 2009-01-15 19:20:50 +0000 | [diff] [blame] | 795 | return new ScheduleDAGFast(*IS->MF); |
Dan Gohman | ee2e403 | 2008-09-18 16:26:26 +0000 | [diff] [blame] | 796 | } |
Evan Cheng | d4f7596 | 2012-10-17 19:39:36 +0000 | [diff] [blame] | 797 | |
| 798 | llvm::ScheduleDAGSDNodes * |
| 799 | llvm::createDAGLinearizer(SelectionDAGISel *IS, CodeGenOpt::Level) { |
| 800 | return new ScheduleDAGLinearize(*IS->MF); |
| 801 | } |