blob: f2b18fc84bf5e73d1954565e2cb0a2767682ae83 [file] [log] [blame]
Dan Gohman60cb69e2008-11-19 23:18:57 +00001//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
Dan Gohman483377c2009-02-06 17:22:58 +000015#include "ScheduleDAGSDNodes.h"
Dan Gohmanb8120772009-10-10 01:32:21 +000016#include "InstrEmitter.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000017#include "SDNodeDbgValue.h"
Evan Cheng9d92aaa2010-01-22 03:36:51 +000018#include "llvm/ADT/DenseMap.h"
19#include "llvm/ADT/SmallPtrSet.h"
Evan Cheng563fe3c2010-03-25 01:38:16 +000020#include "llvm/ADT/SmallSet.h"
Evan Cheng9d92aaa2010-01-22 03:36:51 +000021#include "llvm/ADT/SmallVector.h"
22#include "llvm/ADT/Statistic.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000023#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/CodeGen/SelectionDAG.h"
26#include "llvm/MC/MCInstrItineraries.h"
Andrew Trick641e2d42011-03-05 08:00:22 +000027#include "llvm/Support/CommandLine.h"
Dan Gohman60cb69e2008-11-19 23:18:57 +000028#include "llvm/Support/Debug.h"
29#include "llvm/Support/raw_ostream.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000030#include "llvm/Target/TargetInstrInfo.h"
31#include "llvm/Target/TargetLowering.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000032#include "llvm/Target/TargetRegisterInfo.h"
33#include "llvm/Target/TargetSubtargetInfo.h"
Dan Gohman60cb69e2008-11-19 23:18:57 +000034using namespace llvm;
35
Chandler Carruth1b9dde02014-04-22 02:02:50 +000036#define DEBUG_TYPE "pre-RA-sched"
37
Evan Cheng9d92aaa2010-01-22 03:36:51 +000038STATISTIC(LoadsClustered, "Number of loads clustered together");
39
Sanjay Patel25d3c1c2014-10-07 17:38:33 +000040// This allows the latency-based scheduler to notice high latency instructions
Sanjay Pateleb0cc1b2014-10-07 17:36:50 +000041// without a target itinerary. The choice of number here has more to do with
Sanjay Patel25d3c1c2014-10-07 17:38:33 +000042// balancing scheduler heuristics than with the actual machine latency.
Andrew Trick641e2d42011-03-05 08:00:22 +000043static cl::opt<int> HighLatencyCycles(
44 "sched-high-latency-cycles", cl::Hidden, cl::init(10),
45 cl::desc("Roughly estimate the number of cycles that 'long latency'"
46 "instructions take for targets with no itinerary"));
47
Dan Gohman619ef482009-01-15 19:20:50 +000048ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
Eric Christopherd9134482014-08-04 21:25:23 +000049 : ScheduleDAG(mf), BB(nullptr), DAG(nullptr),
Eric Christopherfc6de422014-08-05 02:39:49 +000050 InstrItins(mf.getSubtarget().getInstrItineraryData()) {}
Dan Gohman60cb69e2008-11-19 23:18:57 +000051
Dan Gohmandfaf6462009-02-11 04:27:20 +000052/// Run - perform scheduling.
53///
Andrew Trick60cf03e2012-03-07 05:21:52 +000054void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb) {
55 BB = bb;
Dan Gohmandfaf6462009-02-11 04:27:20 +000056 DAG = dag;
Andrew Trick60cf03e2012-03-07 05:21:52 +000057
58 // Clear the scheduler's SUnit DAG.
59 ScheduleDAG::clearDAG();
60 Sequence.clear();
61
62 // Invoke the target's selection of scheduler.
63 Schedule();
Dan Gohmandfaf6462009-02-11 04:27:20 +000064}
65
Evan Cheng4401f882010-05-20 23:26:43 +000066/// NewSUnit - Creates a new SUnit and return a ptr to it.
67///
Andrew Trick52226d42012-03-07 23:00:49 +000068SUnit *ScheduleDAGSDNodes::newSUnit(SDNode *N) {
Evan Cheng4401f882010-05-20 23:26:43 +000069#ifndef NDEBUG
Craig Topperc0196b12014-04-14 00:51:57 +000070 const SUnit *Addr = nullptr;
Evan Cheng4401f882010-05-20 23:26:43 +000071 if (!SUnits.empty())
72 Addr = &SUnits[0];
73#endif
74 SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
Craig Topperc0196b12014-04-14 00:51:57 +000075 assert((Addr == nullptr || Addr == &SUnits[0]) &&
Evan Cheng4401f882010-05-20 23:26:43 +000076 "SUnits std::vector reallocated on the fly!");
77 SUnits.back().OrigNode = &SUnits.back();
78 SUnit *SU = &SUnits.back();
79 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
Evan Cheng23ef8292010-08-10 02:39:45 +000080 if (!N ||
81 (N->isMachineOpcode() &&
82 N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF))
Evan Chengcc2efe12010-05-28 23:26:21 +000083 SU->SchedulingPref = Sched::None;
84 else
85 SU->SchedulingPref = TLI.getSchedulingPreference(N);
Evan Cheng4401f882010-05-20 23:26:43 +000086 return SU;
87}
88
Dan Gohman60cb69e2008-11-19 23:18:57 +000089SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
Andrew Trick52226d42012-03-07 23:00:49 +000090 SUnit *SU = newSUnit(Old->getNode());
Dan Gohman60cb69e2008-11-19 23:18:57 +000091 SU->OrigNode = Old->OrigNode;
92 SU->Latency = Old->Latency;
Andrew Trick2ad0b372011-04-07 19:54:57 +000093 SU->isVRegCycle = Old->isVRegCycle;
Evan Chengdebf9c52010-11-03 00:45:17 +000094 SU->isCall = Old->isCall;
Evan Cheng1355bbd2011-04-26 21:31:35 +000095 SU->isCallOp = Old->isCallOp;
Dan Gohman60cb69e2008-11-19 23:18:57 +000096 SU->isTwoAddress = Old->isTwoAddress;
97 SU->isCommutable = Old->isCommutable;
98 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
Dan Gohman52c278e2009-03-23 16:10:52 +000099 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
Andrew Trickbfbd9722011-04-14 05:15:06 +0000100 SU->isScheduleHigh = Old->isScheduleHigh;
101 SU->isScheduleLow = Old->isScheduleLow;
Evan Cheng4401f882010-05-20 23:26:43 +0000102 SU->SchedulingPref = Old->SchedulingPref;
Evan Cheng968e2e72009-01-16 20:57:18 +0000103 Old->isCloned = true;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000104 return SU;
105}
106
107/// CheckForPhysRegDependency - Check if the dependency between def and use of
108/// a specified operand is a physical register dependency. If so, returns the
Evan Chengb2c42c62009-01-12 03:19:55 +0000109/// register and the cost of copying the register.
Dan Gohman60cb69e2008-11-19 23:18:57 +0000110static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
Andrew Trick3f924e42011-02-03 23:00:17 +0000111 const TargetRegisterInfo *TRI,
Dan Gohman60cb69e2008-11-19 23:18:57 +0000112 const TargetInstrInfo *TII,
Evan Chengb2c42c62009-01-12 03:19:55 +0000113 unsigned &PhysReg, int &Cost) {
Dan Gohman60cb69e2008-11-19 23:18:57 +0000114 if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
115 return;
116
117 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
118 if (TargetRegisterInfo::isVirtualRegister(Reg))
119 return;
120
121 unsigned ResNo = User->getOperand(2).getResNo();
Tim Northovere4c7be52014-10-23 22:31:48 +0000122 if (Def->getOpcode() == ISD::CopyFromReg &&
123 cast<RegisterSDNode>(Def->getOperand(1))->getReg() == Reg) {
124 PhysReg = Reg;
125 } else if (Def->isMachineOpcode()) {
Evan Cheng6cc775f2011-06-28 19:10:37 +0000126 const MCInstrDesc &II = TII->get(Def->getMachineOpcode());
Dan Gohman60cb69e2008-11-19 23:18:57 +0000127 if (ResNo >= II.getNumDefs() &&
Tim Northovere4c7be52014-10-23 22:31:48 +0000128 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg)
Dan Gohman60cb69e2008-11-19 23:18:57 +0000129 PhysReg = Reg;
Tim Northovere4c7be52014-10-23 22:31:48 +0000130 }
131
132 if (PhysReg != 0) {
133 const TargetRegisterClass *RC =
Craig Topper7f416c82014-11-16 21:17:18 +0000134 TRI->getMinimalPhysRegClass(Reg, Def->getSimpleValueType(ResNo));
Tim Northovere4c7be52014-10-23 22:31:48 +0000135 Cost = RC->getCopyCost();
Dan Gohman60cb69e2008-11-19 23:18:57 +0000136 }
137}
138
Andrew Trick833f0492012-04-28 01:03:23 +0000139// Helper for AddGlue to clone node operands.
140static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG,
141 SmallVectorImpl<EVT> &VTs,
142 SDValue ExtraOper = SDValue()) {
Hans Wennborg5f5b8cc2014-08-11 13:47:57 +0000143 SmallVector<SDValue, 8> Ops;
Bill Wendling2d3c4902010-06-24 22:00:37 +0000144 for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I)
145 Ops.push_back(N->getOperand(I));
Bill Wendlinga1365212010-06-23 18:16:24 +0000146
Andrew Trick833f0492012-04-28 01:03:23 +0000147 if (ExtraOper.getNode())
148 Ops.push_back(ExtraOper);
Bill Wendlinga1365212010-06-23 18:16:24 +0000149
Craig Topperabb4ac72014-04-16 06:10:51 +0000150 SDVTList VTList = DAG->getVTList(VTs);
Craig Topperc0196b12014-04-14 00:51:57 +0000151 MachineSDNode::mmo_iterator Begin = nullptr, End = nullptr;
Bill Wendlinga1365212010-06-23 18:16:24 +0000152 MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
153
154 // Store memory references.
155 if (MN) {
156 Begin = MN->memoperands_begin();
157 End = MN->memoperands_end();
158 }
159
Craig Topper131de822014-04-27 19:21:16 +0000160 DAG->MorphNodeTo(N, N->getOpcode(), VTList, Ops);
Bill Wendlinga1365212010-06-23 18:16:24 +0000161
162 // Reset the memory references
163 if (MN)
164 MN->setMemRefs(Begin, End);
Evan Cheng9d92aaa2010-01-22 03:36:51 +0000165}
166
Andrew Trick833f0492012-04-28 01:03:23 +0000167static bool AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) {
168 SmallVector<EVT, 4> VTs;
169 SDNode *GlueDestNode = Glue.getNode();
170
171 // Don't add glue from a node to itself.
172 if (GlueDestNode == N) return false;
173
174 // Don't add a glue operand to something that already uses glue.
175 if (GlueDestNode &&
176 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
177 return false;
178 }
179 // Don't add glue to something that already has a glue value.
180 if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return false;
181
182 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
183 VTs.push_back(N->getValueType(I));
184
185 if (AddGlue)
186 VTs.push_back(MVT::Glue);
187
188 CloneNodeWithValues(N, DAG, VTs, Glue);
189
190 return true;
191}
192
193// Cleanup after unsuccessful AddGlue. Use the standard method of morphing the
194// node even though simply shrinking the value list is sufficient.
195static void RemoveUnusedGlue(SDNode *N, SelectionDAG *DAG) {
196 assert((N->getValueType(N->getNumValues() - 1) == MVT::Glue &&
197 !N->hasAnyUseOfValue(N->getNumValues() - 1)) &&
198 "expected an unused glue value");
199
200 SmallVector<EVT, 4> VTs;
201 for (unsigned I = 0, E = N->getNumValues()-1; I != E; ++I)
202 VTs.push_back(N->getValueType(I));
203
204 CloneNodeWithValues(N, DAG, VTs);
205}
206
Chris Lattner11a33812010-12-23 17:24:32 +0000207/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them.
Evan Cheng9d92aaa2010-01-22 03:36:51 +0000208/// This function finds loads of the same base and different offsets. If the
Chris Lattner3e5fbd72010-12-21 02:38:05 +0000209/// offsets are not far apart (target specific), it add MVT::Glue inputs and
Evan Cheng9d92aaa2010-01-22 03:36:51 +0000210/// outputs to ensure they are scheduled together and in order. This
211/// optimization may benefit some targets by improving cache locality.
Evan Cheng38f65602010-06-10 02:09:31 +0000212void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
Craig Topperc0196b12014-04-14 00:51:57 +0000213 SDNode *Chain = nullptr;
Evan Cheng38f65602010-06-10 02:09:31 +0000214 unsigned NumOps = Node->getNumOperands();
215 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
216 Chain = Node->getOperand(NumOps-1).getNode();
217 if (!Chain)
218 return;
219
220 // Look for other loads of the same chain. Find loads that are loading from
221 // the same base pointer and different offsets.
Evan Cheng9d92aaa2010-01-22 03:36:51 +0000222 SmallPtrSet<SDNode*, 16> Visited;
223 SmallVector<int64_t, 4> Offsets;
224 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
Evan Cheng38f65602010-06-10 02:09:31 +0000225 bool Cluster = false;
226 SDNode *Base = Node;
Andrew Trick8d007bb2014-04-07 21:29:22 +0000227 // This algorithm requires a reasonably low use count before finding a match
228 // to avoid uselessly blowing up compile time in large blocks.
229 unsigned UseCount = 0;
Evan Cheng38f65602010-06-10 02:09:31 +0000230 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
Andrew Trick8d007bb2014-04-07 21:29:22 +0000231 I != E && UseCount < 100; ++I, ++UseCount) {
Evan Cheng38f65602010-06-10 02:09:31 +0000232 SDNode *User = *I;
David Blaikie70573dc2014-11-19 07:49:26 +0000233 if (User == Node || !Visited.insert(User).second)
Evan Cheng38f65602010-06-10 02:09:31 +0000234 continue;
235 int64_t Offset1, Offset2;
236 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
237 Offset1 == Offset2)
238 // FIXME: Should be ok if they addresses are identical. But earlier
239 // optimizations really should have eliminated one of the loads.
240 continue;
241 if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
242 Offsets.push_back(Offset1);
243 O2SMap.insert(std::make_pair(Offset2, User));
244 Offsets.push_back(Offset2);
Duncan Sands2dc70be2010-06-25 14:48:39 +0000245 if (Offset2 < Offset1)
Evan Cheng38f65602010-06-10 02:09:31 +0000246 Base = User;
Evan Cheng38f65602010-06-10 02:09:31 +0000247 Cluster = true;
Andrew Trick8d007bb2014-04-07 21:29:22 +0000248 // Reset UseCount to allow more matches.
249 UseCount = 0;
Evan Cheng38f65602010-06-10 02:09:31 +0000250 }
251
252 if (!Cluster)
253 return;
254
255 // Sort them in increasing order.
256 std::sort(Offsets.begin(), Offsets.end());
257
258 // Check if the loads are close enough.
259 SmallVector<SDNode*, 4> Loads;
260 unsigned NumLoads = 0;
261 int64_t BaseOff = Offsets[0];
262 SDNode *BaseLoad = O2SMap[BaseOff];
263 Loads.push_back(BaseLoad);
264 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
265 int64_t Offset = Offsets[i];
266 SDNode *Load = O2SMap[Offset];
267 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
268 break; // Stop right here. Ignore loads that are further away.
269 Loads.push_back(Load);
270 ++NumLoads;
271 }
272
273 if (NumLoads == 0)
274 return;
275
Chris Lattner3e5fbd72010-12-21 02:38:05 +0000276 // Cluster loads by adding MVT::Glue outputs and inputs. This also
Evan Cheng38f65602010-06-10 02:09:31 +0000277 // ensure they are scheduled in order of increasing addresses.
278 SDNode *Lead = Loads[0];
Craig Topperc0196b12014-04-14 00:51:57 +0000279 SDValue InGlue = SDValue(nullptr, 0);
Andrew Trick833f0492012-04-28 01:03:23 +0000280 if (AddGlue(Lead, InGlue, true, DAG))
281 InGlue = SDValue(Lead, Lead->getNumValues() - 1);
Bill Wendling2d3c4902010-06-24 22:00:37 +0000282 for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
Chris Lattner11a33812010-12-23 17:24:32 +0000283 bool OutGlue = I < E - 1;
Bill Wendling2d3c4902010-06-24 22:00:37 +0000284 SDNode *Load = Loads[I];
285
Andrew Trick833f0492012-04-28 01:03:23 +0000286 // If AddGlue fails, we could leave an unsused glue value. This should not
287 // cause any
288 if (AddGlue(Load, InGlue, OutGlue, DAG)) {
289 if (OutGlue)
290 InGlue = SDValue(Load, Load->getNumValues() - 1);
Bill Wendlinga1365212010-06-23 18:16:24 +0000291
Andrew Trick833f0492012-04-28 01:03:23 +0000292 ++LoadsClustered;
293 }
294 else if (!OutGlue && InGlue.getNode())
295 RemoveUnusedGlue(InGlue.getNode(), DAG);
Evan Cheng38f65602010-06-10 02:09:31 +0000296 }
297}
298
299/// ClusterNodes - Cluster certain nodes which should be scheduled together.
300///
301void ScheduleDAGSDNodes::ClusterNodes() {
Evan Cheng9d92aaa2010-01-22 03:36:51 +0000302 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
303 E = DAG->allnodes_end(); NI != E; ++NI) {
304 SDNode *Node = &*NI;
305 if (!Node || !Node->isMachineOpcode())
306 continue;
307
308 unsigned Opc = Node->getMachineOpcode();
Evan Cheng6cc775f2011-06-28 19:10:37 +0000309 const MCInstrDesc &MCID = TII->get(Opc);
310 if (MCID.mayLoad())
Evan Cheng38f65602010-06-10 02:09:31 +0000311 // Cluster loads from "near" addresses into combined SUnits.
312 ClusterNeighboringLoads(Node);
Evan Cheng9d92aaa2010-01-22 03:36:51 +0000313 }
314}
315
Dan Gohman60cb69e2008-11-19 23:18:57 +0000316void ScheduleDAGSDNodes::BuildSchedUnits() {
Dan Gohman92cf2802008-12-23 17:24:50 +0000317 // During scheduling, the NodeId field of SDNode is used to map SDNodes
318 // to their associated SUnits by holding SUnits table indices. A value
319 // of -1 means the SDNode does not yet have an associated SUnit.
320 unsigned NumNodes = 0;
321 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
322 E = DAG->allnodes_end(); NI != E; ++NI) {
323 NI->setNodeId(-1);
324 ++NumNodes;
325 }
326
Dan Gohman60cb69e2008-11-19 23:18:57 +0000327 // Reserve entries in the vector for each of the SUnits we are creating. This
328 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
329 // invalidated.
Dan Gohmance70fe22008-12-17 04:30:46 +0000330 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
331 // This is a temporary workaround.
Dan Gohman92cf2802008-12-23 17:24:50 +0000332 SUnits.reserve(NumNodes * 2);
Andrew Trick3f924e42011-02-03 23:00:17 +0000333
Chris Lattnerdf8a8a82010-02-24 06:11:37 +0000334 // Add all nodes in depth first order.
335 SmallVector<SDNode*, 64> Worklist;
336 SmallPtrSet<SDNode*, 64> Visited;
337 Worklist.push_back(DAG->getRoot().getNode());
338 Visited.insert(DAG->getRoot().getNode());
Andrew Trick3f924e42011-02-03 23:00:17 +0000339
Evan Cheng1355bbd2011-04-26 21:31:35 +0000340 SmallVector<SUnit*, 8> CallSUnits;
Chris Lattnerdf8a8a82010-02-24 06:11:37 +0000341 while (!Worklist.empty()) {
342 SDNode *NI = Worklist.pop_back_val();
Andrew Trick3f924e42011-02-03 23:00:17 +0000343
Chris Lattnerdf8a8a82010-02-24 06:11:37 +0000344 // Add all operands to the worklist unless they've already been added.
345 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
David Blaikie70573dc2014-11-19 07:49:26 +0000346 if (Visited.insert(NI->getOperand(i).getNode()).second)
Chris Lattnerdf8a8a82010-02-24 06:11:37 +0000347 Worklist.push_back(NI->getOperand(i).getNode());
Andrew Trick3f924e42011-02-03 23:00:17 +0000348
Dan Gohman60cb69e2008-11-19 23:18:57 +0000349 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
350 continue;
Andrew Trick3f924e42011-02-03 23:00:17 +0000351
Dan Gohman60cb69e2008-11-19 23:18:57 +0000352 // If this node has already been processed, stop now.
353 if (NI->getNodeId() != -1) continue;
Andrew Trick3f924e42011-02-03 23:00:17 +0000354
Andrew Trick52226d42012-03-07 23:00:49 +0000355 SUnit *NodeSUnit = newSUnit(NI);
Andrew Trick3f924e42011-02-03 23:00:17 +0000356
Chris Lattner11a33812010-12-23 17:24:32 +0000357 // See if anything is glued to this node, if so, add them to glued
358 // nodes. Nodes can have at most one glue input and one glue output. Glue
359 // is required to be the last operand and result of a node.
Andrew Trick3f924e42011-02-03 23:00:17 +0000360
Chris Lattner11a33812010-12-23 17:24:32 +0000361 // Scan up to find glued preds.
Dan Gohman60cb69e2008-11-19 23:18:57 +0000362 SDNode *N = NI;
Dan Gohman3bdc4bd2009-03-20 20:42:23 +0000363 while (N->getNumOperands() &&
Chris Lattner3e5fbd72010-12-21 02:38:05 +0000364 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) {
Dan Gohman3bdc4bd2009-03-20 20:42:23 +0000365 N = N->getOperand(N->getNumOperands()-1).getNode();
366 assert(N->getNodeId() == -1 && "Node already inserted!");
367 N->setNodeId(NodeSUnit->NodeNum);
Evan Chengdebf9c52010-11-03 00:45:17 +0000368 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
369 NodeSUnit->isCall = true;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000370 }
Andrew Trick3f924e42011-02-03 23:00:17 +0000371
Chris Lattner11a33812010-12-23 17:24:32 +0000372 // Scan down to find any glued succs.
Dan Gohman60cb69e2008-11-19 23:18:57 +0000373 N = NI;
Chris Lattner3e5fbd72010-12-21 02:38:05 +0000374 while (N->getValueType(N->getNumValues()-1) == MVT::Glue) {
Chris Lattner11a33812010-12-23 17:24:32 +0000375 SDValue GlueVal(N, N->getNumValues()-1);
Andrew Trick3f924e42011-02-03 23:00:17 +0000376
Chris Lattner11a33812010-12-23 17:24:32 +0000377 // There are either zero or one users of the Glue result.
378 bool HasGlueUse = false;
Andrew Trick3f924e42011-02-03 23:00:17 +0000379 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
Dan Gohman60cb69e2008-11-19 23:18:57 +0000380 UI != E; ++UI)
Chris Lattner11a33812010-12-23 17:24:32 +0000381 if (GlueVal.isOperandOf(*UI)) {
382 HasGlueUse = true;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000383 assert(N->getNodeId() == -1 && "Node already inserted!");
384 N->setNodeId(NodeSUnit->NodeNum);
385 N = *UI;
Evan Chengdebf9c52010-11-03 00:45:17 +0000386 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
387 NodeSUnit->isCall = true;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000388 break;
389 }
Chris Lattner11a33812010-12-23 17:24:32 +0000390 if (!HasGlueUse) break;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000391 }
Andrew Trick3f924e42011-02-03 23:00:17 +0000392
Evan Cheng1355bbd2011-04-26 21:31:35 +0000393 if (NodeSUnit->isCall)
394 CallSUnits.push_back(NodeSUnit);
395
Andrew Trickbfbd9722011-04-14 05:15:06 +0000396 // Schedule zero-latency TokenFactor below any nodes that may increase the
397 // schedule height. Otherwise, ancestors of the TokenFactor may appear to
398 // have false stalls.
399 if (NI->getOpcode() == ISD::TokenFactor)
400 NodeSUnit->isScheduleLow = true;
401
Chris Lattner11a33812010-12-23 17:24:32 +0000402 // If there are glue operands involved, N is now the bottom-most node
403 // of the sequence of nodes that are glued together.
Dan Gohman60cb69e2008-11-19 23:18:57 +0000404 // Update the SUnit.
405 NodeSUnit->setNode(N);
406 assert(N->getNodeId() == -1 && "Node already inserted!");
407 N->setNodeId(NodeSUnit->NodeNum);
408
Andrew Trickd0548ae2011-02-04 03:18:17 +0000409 // Compute NumRegDefsLeft. This must be done before AddSchedEdges.
410 InitNumRegDefsLeft(NodeSUnit);
411
Dan Gohmand1f33e22008-11-21 01:44:51 +0000412 // Assign the Latency field of NodeSUnit using target-provided information.
Andrew Trick52226d42012-03-07 23:00:49 +0000413 computeLatency(NodeSUnit);
Dan Gohman60cb69e2008-11-19 23:18:57 +0000414 }
Evan Cheng1355bbd2011-04-26 21:31:35 +0000415
416 // Find all call operands.
417 while (!CallSUnits.empty()) {
418 SUnit *SU = CallSUnits.pop_back_val();
419 for (const SDNode *SUNode = SU->getNode(); SUNode;
420 SUNode = SUNode->getGluedNode()) {
421 if (SUNode->getOpcode() != ISD::CopyToReg)
422 continue;
423 SDNode *SrcN = SUNode->getOperand(2).getNode();
424 if (isPassiveNode(SrcN)) continue; // Not scheduled.
425 SUnit *SrcSU = &SUnits[SrcN->getNodeId()];
426 SrcSU->isCallOp = true;
427 }
428 }
Dan Gohman04543e72008-12-23 18:36:58 +0000429}
430
431void ScheduleDAGSDNodes::AddSchedEdges() {
Eric Christopheredba30c2014-10-09 06:28:06 +0000432 const TargetSubtargetInfo &ST = MF.getSubtarget();
David Goodwin90e6b8b2009-08-13 16:05:04 +0000433
David Goodwin9b48cd42009-08-19 16:08:58 +0000434 // Check to see if the scheduler cares about latencies.
Andrew Trick52226d42012-03-07 23:00:49 +0000435 bool UnitLatencies = forceUnitLatencies();
David Goodwin9b48cd42009-08-19 16:08:58 +0000436
Dan Gohman60cb69e2008-11-19 23:18:57 +0000437 // Pass 2: add the preds, succs, etc.
438 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
439 SUnit *SU = &SUnits[su];
440 SDNode *MainNode = SU->getNode();
Andrew Trick3f924e42011-02-03 23:00:17 +0000441
Dan Gohman60cb69e2008-11-19 23:18:57 +0000442 if (MainNode->isMachineOpcode()) {
443 unsigned Opc = MainNode->getMachineOpcode();
Evan Cheng6cc775f2011-06-28 19:10:37 +0000444 const MCInstrDesc &MCID = TII->get(Opc);
445 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
446 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
Dan Gohman60cb69e2008-11-19 23:18:57 +0000447 SU->isTwoAddress = true;
448 break;
449 }
450 }
Evan Cheng6cc775f2011-06-28 19:10:37 +0000451 if (MCID.isCommutable())
Dan Gohman60cb69e2008-11-19 23:18:57 +0000452 SU->isCommutable = true;
453 }
Andrew Trick3f924e42011-02-03 23:00:17 +0000454
Dan Gohman60cb69e2008-11-19 23:18:57 +0000455 // Find all predecessors and successors of the group.
Chris Lattner11a33812010-12-23 17:24:32 +0000456 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) {
Dan Gohman60cb69e2008-11-19 23:18:57 +0000457 if (N->isMachineOpcode() &&
Dan Gohman52c278e2009-03-23 16:10:52 +0000458 TII->get(N->getMachineOpcode()).getImplicitDefs()) {
459 SU->hasPhysRegClobbers = true;
Dan Gohmanb8120772009-10-10 01:32:21 +0000460 unsigned NumUsed = InstrEmitter::CountResults(N);
Dan Gohmanf4772622009-03-23 17:39:36 +0000461 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
462 --NumUsed; // Skip over unused values at the end.
463 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
Dan Gohman52c278e2009-03-23 16:10:52 +0000464 SU->hasPhysRegDefs = true;
465 }
Andrew Trick3f924e42011-02-03 23:00:17 +0000466
Dan Gohman60cb69e2008-11-19 23:18:57 +0000467 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
468 SDNode *OpN = N->getOperand(i).getNode();
469 if (isPassiveNode(OpN)) continue; // Not scheduled.
470 SUnit *OpSU = &SUnits[OpN->getNodeId()];
471 assert(OpSU && "Node has no SUnit!");
472 if (OpSU == SU) continue; // In the same group.
473
Owen Anderson53aa7a92009-08-10 22:56:29 +0000474 EVT OpVT = N->getOperand(i).getValueType();
Chris Lattner11a33812010-12-23 17:24:32 +0000475 assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!");
Owen Anderson9f944592009-08-11 20:47:22 +0000476 bool isChain = OpVT == MVT::Other;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000477
478 unsigned PhysReg = 0;
Evan Chengb2c42c62009-01-12 03:19:55 +0000479 int Cost = 1;
Dan Gohman60cb69e2008-11-19 23:18:57 +0000480 // Determine if this is a physical register dependency.
Evan Chengb2c42c62009-01-12 03:19:55 +0000481 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
Dan Gohman2d170892008-12-09 22:54:47 +0000482 assert((PhysReg == 0 || !isChain) &&
483 "Chain dependence via physreg data?");
Evan Chengb2c42c62009-01-12 03:19:55 +0000484 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
485 // emits a copy from the physical register to a virtual register unless
486 // it requires a cross class copy (cost < 0). That means we are only
487 // treating "expensive to copy" register dependency as physical register
488 // dependency. This may change in the future though.
Andrew Trick3013b6a2011-06-15 17:16:12 +0000489 if (Cost >= 0 && !StressSched)
Evan Chengb2c42c62009-01-12 03:19:55 +0000490 PhysReg = 0;
David Goodwin90e6b8b2009-08-13 16:05:04 +0000491
Evan Chengcc2efe12010-05-28 23:26:21 +0000492 // If this is a ctrl dep, latency is 1.
Andrew Trick1b60ad62011-04-12 20:14:07 +0000493 unsigned OpLatency = isChain ? 1 : OpSU->Latency;
Andrew Trickb53a00d2011-04-13 00:38:32 +0000494 // Special-case TokenFactor chains as zero-latency.
495 if(isChain && OpN->getOpcode() == ISD::TokenFactor)
496 OpLatency = 0;
497
Andrew Trickbaeaabb2012-11-06 03:13:46 +0000498 SDep Dep = isChain ? SDep(OpSU, SDep::Barrier)
499 : SDep(OpSU, SDep::Data, PhysReg);
500 Dep.setLatency(OpLatency);
David Goodwin9b48cd42009-08-19 16:08:58 +0000501 if (!isChain && !UnitLatencies) {
Andrew Trickbaeaabb2012-11-06 03:13:46 +0000502 computeOperandLatency(OpN, N, i, Dep);
503 ST.adjustSchedDependency(OpSU, SU, Dep);
David Goodwin9b48cd42009-08-19 16:08:58 +0000504 }
David Goodwin90e6b8b2009-08-13 16:05:04 +0000505
Andrew Trickbaeaabb2012-11-06 03:13:46 +0000506 if (!SU->addPred(Dep) && !Dep.isCtrl() && OpSU->NumRegDefsLeft > 1) {
Andrew Trickd0548ae2011-02-04 03:18:17 +0000507 // Multiple register uses are combined in the same SUnit. For example,
508 // we could have a set of glued nodes with all their defs consumed by
509 // another set of glued nodes. Register pressure tracking sees this as
510 // a single use, so to keep pressure balanced we reduce the defs.
Andrew Trick072ed2e2011-03-09 19:12:43 +0000511 //
512 // We can't tell (without more book-keeping) if this results from
513 // glued nodes or duplicate operands. As long as we don't reduce
514 // NumRegDefsLeft to zero, we handle the common cases well.
Andrew Trickd0548ae2011-02-04 03:18:17 +0000515 --OpSU->NumRegDefsLeft;
516 }
Dan Gohman60cb69e2008-11-19 23:18:57 +0000517 }
518 }
519 }
520}
521
Dan Gohman04543e72008-12-23 18:36:58 +0000522/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
523/// are input. This SUnit graph is similar to the SelectionDAG, but
524/// excludes nodes that aren't interesting to scheduling, and represents
Chris Lattner11a33812010-12-23 17:24:32 +0000525/// glued together nodes with a single SUnit.
Dan Gohman918ec532009-10-09 23:33:48 +0000526void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
Evan Cheng38f65602010-06-10 02:09:31 +0000527 // Cluster certain nodes which should be scheduled together.
528 ClusterNodes();
Dan Gohman04543e72008-12-23 18:36:58 +0000529 // Populate the SUnits array.
530 BuildSchedUnits();
531 // Compute all the scheduling dependencies between nodes.
532 AddSchedEdges();
533}
534
Andrew Trickd0548ae2011-02-04 03:18:17 +0000535// Initialize NumNodeDefs for the current Node's opcode.
536void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() {
Eric Christopher7238cba2011-03-08 19:35:47 +0000537 // Check for phys reg copy.
538 if (!Node)
539 return;
540
Andrew Trickd0548ae2011-02-04 03:18:17 +0000541 if (!Node->isMachineOpcode()) {
542 if (Node->getOpcode() == ISD::CopyFromReg)
543 NodeNumDefs = 1;
544 else
545 NodeNumDefs = 0;
546 return;
547 }
548 unsigned POpc = Node->getMachineOpcode();
549 if (POpc == TargetOpcode::IMPLICIT_DEF) {
550 // No register need be allocated for this.
551 NodeNumDefs = 0;
552 return;
553 }
Hal Finkel665026832015-01-14 01:07:03 +0000554 if (POpc == TargetOpcode::PATCHPOINT &&
555 Node->getValueType(0) == MVT::Other) {
556 // PATCHPOINT is defined to have one result, but it might really have none
557 // if we're not using CallingConv::AnyReg. Don't mistake the chain for a
558 // real definition.
559 NodeNumDefs = 0;
560 return;
561 }
Andrew Trickd0548ae2011-02-04 03:18:17 +0000562 unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs();
563 // Some instructions define regs that are not represented in the selection DAG
564 // (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues.
565 NodeNumDefs = std::min(Node->getNumValues(), NRegDefs);
566 DefIdx = 0;
567}
568
569// Construct a RegDefIter for this SUnit and find the first valid value.
570ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU,
571 const ScheduleDAGSDNodes *SD)
572 : SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) {
573 InitNodeNumDefs();
574 Advance();
575}
576
577// Advance to the next valid value defined by the SUnit.
578void ScheduleDAGSDNodes::RegDefIter::Advance() {
579 for (;Node;) { // Visit all glued nodes.
580 for (;DefIdx < NodeNumDefs; ++DefIdx) {
581 if (!Node->hasAnyUseOfValue(DefIdx))
582 continue;
Patrik Hagglund05394352012-12-13 18:45:35 +0000583 ValueType = Node->getSimpleValueType(DefIdx);
Andrew Trickd0548ae2011-02-04 03:18:17 +0000584 ++DefIdx;
585 return; // Found a normal regdef.
586 }
587 Node = Node->getGluedNode();
Craig Topperc0196b12014-04-14 00:51:57 +0000588 if (!Node) {
Andrew Trickd0548ae2011-02-04 03:18:17 +0000589 return; // No values left to visit.
590 }
591 InitNodeNumDefs();
592 }
593}
594
595void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) {
596 assert(SU->NumRegDefsLeft == 0 && "expect a new node");
597 for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) {
598 assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected");
599 ++SU->NumRegDefsLeft;
600 }
601}
602
Andrew Trick52226d42012-03-07 23:00:49 +0000603void ScheduleDAGSDNodes::computeLatency(SUnit *SU) {
Andrew Trickb53a00d2011-04-13 00:38:32 +0000604 SDNode *N = SU->getNode();
605
606 // TokenFactor operands are considered zero latency, and some schedulers
607 // (e.g. Top-Down list) may rely on the fact that operand latency is nonzero
608 // whenever node latency is nonzero.
609 if (N && N->getOpcode() == ISD::TokenFactor) {
610 SU->Latency = 0;
611 return;
612 }
613
Evan Cheng70e506e2010-05-19 22:42:23 +0000614 // Check to see if the scheduler cares about latencies.
Andrew Trick52226d42012-03-07 23:00:49 +0000615 if (forceUnitLatencies()) {
Evan Cheng70e506e2010-05-19 22:42:23 +0000616 SU->Latency = 1;
617 return;
618 }
619
Evan Chengbf407072010-09-10 01:29:16 +0000620 if (!InstrItins || InstrItins->isEmpty()) {
Andrew Trickd7f4c212011-03-05 09:18:16 +0000621 if (N && N->isMachineOpcode() &&
622 TII->isHighLatencyDef(N->getMachineOpcode()))
Andrew Trick641e2d42011-03-05 08:00:22 +0000623 SU->Latency = HighLatencyCycles;
624 else
625 SU->Latency = 1;
Evan Chengbdd062d2010-05-20 06:13:19 +0000626 return;
627 }
Andrew Trick3f924e42011-02-03 23:00:17 +0000628
Dan Gohman60cb69e2008-11-19 23:18:57 +0000629 // Compute the latency for the node. We use the sum of the latencies for
Chris Lattner11a33812010-12-23 17:24:32 +0000630 // all nodes glued together into this SUnit.
Dan Gohman60cb69e2008-11-19 23:18:57 +0000631 SU->Latency = 0;
Chris Lattner11a33812010-12-23 17:24:32 +0000632 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode())
Evan Chengdebf9c52010-11-03 00:45:17 +0000633 if (N->isMachineOpcode())
634 SU->Latency += TII->getInstrLatency(InstrItins, N);
Dan Gohman60cb69e2008-11-19 23:18:57 +0000635}
636
Andrew Trick52226d42012-03-07 23:00:49 +0000637void ScheduleDAGSDNodes::computeOperandLatency(SDNode *Def, SDNode *Use,
Evan Chengbdd062d2010-05-20 06:13:19 +0000638 unsigned OpIdx, SDep& dep) const{
639 // Check to see if the scheduler cares about latencies.
Andrew Trick52226d42012-03-07 23:00:49 +0000640 if (forceUnitLatencies())
Evan Chengbdd062d2010-05-20 06:13:19 +0000641 return;
642
Evan Chengbdd062d2010-05-20 06:13:19 +0000643 if (dep.getKind() != SDep::Data)
644 return;
645
646 unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
Evan Chengff310732010-10-28 06:47:08 +0000647 if (Use->isMachineOpcode())
648 // Adjust the use operand index by num of defs.
649 OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs();
Evan Cheng49d4c0b2010-10-06 06:27:31 +0000650 int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx);
Evan Cheng6c1414f2010-10-29 18:09:28 +0000651 if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg &&
652 !BB->succ_empty()) {
653 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
654 if (TargetRegisterInfo::isVirtualRegister(Reg))
655 // This copy is a liveout value. It is likely coalesced, so reduce the
656 // latency so not to penalize the def.
657 // FIXME: need target specific adjustment here?
658 Latency = (Latency > 1) ? Latency - 1 : 1;
659 }
Evan Cheng4a010fd2010-09-29 22:42:35 +0000660 if (Latency >= 0)
661 dep.setLatency(Latency);
Evan Chengbdd062d2010-05-20 06:13:19 +0000662}
663
Dan Gohman60cb69e2008-11-19 23:18:57 +0000664void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
Manman Ren19f49ac2012-09-11 22:23:19 +0000665#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Evan Chengb2c42c62009-01-12 03:19:55 +0000666 if (!SU->getNode()) {
David Greene4eb5bed2010-01-05 01:25:11 +0000667 dbgs() << "PHYS REG COPY\n";
Evan Chengb2c42c62009-01-12 03:19:55 +0000668 return;
669 }
670
671 SU->getNode()->dump(DAG);
David Greene4eb5bed2010-01-05 01:25:11 +0000672 dbgs() << "\n";
Chris Lattner11a33812010-12-23 17:24:32 +0000673 SmallVector<SDNode *, 4> GluedNodes;
674 for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
675 GluedNodes.push_back(N);
676 while (!GluedNodes.empty()) {
David Greene4eb5bed2010-01-05 01:25:11 +0000677 dbgs() << " ";
Chris Lattner11a33812010-12-23 17:24:32 +0000678 GluedNodes.back()->dump(DAG);
David Greene4eb5bed2010-01-05 01:25:11 +0000679 dbgs() << "\n";
Chris Lattner11a33812010-12-23 17:24:32 +0000680 GluedNodes.pop_back();
Dan Gohman60cb69e2008-11-19 23:18:57 +0000681 }
Manman Ren742534c2012-09-06 19:06:06 +0000682#endif
Dan Gohman60cb69e2008-11-19 23:18:57 +0000683}
Dan Gohmanb8120772009-10-10 01:32:21 +0000684
Manman Ren19f49ac2012-09-11 22:23:19 +0000685#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Andrew Trickedee68c2012-03-07 05:21:40 +0000686void ScheduleDAGSDNodes::dumpSchedule() const {
687 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
688 if (SUnit *SU = Sequence[i])
689 SU->dump(this);
690 else
691 dbgs() << "**** NOOP ****\n";
692 }
693}
Manman Ren742534c2012-09-06 19:06:06 +0000694#endif
Andrew Trickedee68c2012-03-07 05:21:40 +0000695
Andrew Trick46a58662012-03-07 05:21:36 +0000696#ifndef NDEBUG
697/// VerifyScheduledSequence - Verify that all SUnits were scheduled and that
698/// their state is consistent with the nodes listed in Sequence.
699///
700void ScheduleDAGSDNodes::VerifyScheduledSequence(bool isBottomUp) {
701 unsigned ScheduledNodes = ScheduleDAG::VerifyScheduledDAG(isBottomUp);
702 unsigned Noops = 0;
703 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
704 if (!Sequence[i])
705 ++Noops;
706 assert(Sequence.size() - Noops == ScheduledNodes &&
707 "The number of nodes scheduled doesn't match the expected number!");
708}
709#endif // NDEBUG
710
Chris Lattner0ab5e2c2011-04-15 05:18:47 +0000711/// ProcessSDDbgValues - Process SDDbgValues associated with this node.
Craig Topperb94011f2013-07-14 04:42:23 +0000712static void
713ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
714 SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
715 DenseMap<SDValue, unsigned> &VRBaseMap, unsigned Order) {
Devang Patel1448e7c2011-01-26 18:20:04 +0000716 if (!N->getHasDebugValue())
717 return;
718
719 // Opportunistically insert immediate dbg_value uses, i.e. those with source
720 // order number right after the N.
721 MachineBasicBlock *BB = Emitter.getBlock();
722 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
Benjamin Kramere1fc29b2011-06-18 13:13:44 +0000723 ArrayRef<SDDbgValue*> DVs = DAG->GetDbgValues(N);
Devang Patel1448e7c2011-01-26 18:20:04 +0000724 for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
725 if (DVs[i]->isInvalidated())
726 continue;
727 unsigned DVOrder = DVs[i]->getOrder();
728 if (!Order || DVOrder == ++Order) {
729 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
730 if (DbgMI) {
731 Orders.push_back(std::make_pair(DVOrder, DbgMI));
732 BB->insert(InsertPos, DbgMI);
733 }
734 DVs[i]->setIsInvalidated();
735 }
736 }
737}
738
Evan Cheng563fe3c2010-03-25 01:38:16 +0000739// ProcessSourceNode - Process nodes with source order numbers. These are added
Jim Grosbachcaf9b3a2010-06-30 21:27:56 +0000740// to a vector which EmitSchedule uses to determine how to insert dbg_value
Evan Cheng563fe3c2010-03-25 01:38:16 +0000741// instructions in the right order.
Craig Topperb94011f2013-07-14 04:42:23 +0000742static void
743ProcessSourceNode(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter,
744 DenseMap<SDValue, unsigned> &VRBaseMap,
745 SmallVectorImpl<std::pair<unsigned, MachineInstr*> > &Orders,
746 SmallSet<unsigned, 8> &Seen) {
Andrew Tricke2431c62013-05-25 03:08:10 +0000747 unsigned Order = N->getIROrder();
David Blaikie70573dc2014-11-19 07:49:26 +0000748 if (!Order || !Seen.insert(Order).second) {
Devang Patel92b70772011-01-27 00:13:27 +0000749 // Process any valid SDDbgValues even if node does not have any order
750 // assigned.
751 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0);
Evan Cheng563fe3c2010-03-25 01:38:16 +0000752 return;
Devang Patel92b70772011-01-27 00:13:27 +0000753 }
Evan Cheng563fe3c2010-03-25 01:38:16 +0000754
755 MachineBasicBlock *BB = Emitter.getBlock();
Bill Schmidt3684fdd2013-10-18 14:20:11 +0000756 if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI() ||
757 // Fast-isel may have inserted some instructions, in which case the
758 // BB->back().isPHI() test will not fire when we want it to.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000759 std::prev(Emitter.getInsertPos())->isPHI()) {
Evan Cheng563fe3c2010-03-25 01:38:16 +0000760 // Did not insert any instruction.
Craig Topperc0196b12014-04-14 00:51:57 +0000761 Orders.push_back(std::make_pair(Order, (MachineInstr*)nullptr));
Evan Cheng563fe3c2010-03-25 01:38:16 +0000762 return;
763 }
764
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000765 Orders.push_back(std::make_pair(Order, std::prev(Emitter.getInsertPos())));
Devang Patel1448e7c2011-01-26 18:20:04 +0000766 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order);
Evan Cheng563fe3c2010-03-25 01:38:16 +0000767}
768
Andrew Tricke932bb72012-03-07 05:21:44 +0000769void ScheduleDAGSDNodes::
770EmitPhysRegCopy(SUnit *SU, DenseMap<SUnit*, unsigned> &VRBaseMap,
771 MachineBasicBlock::iterator InsertPos) {
772 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
773 I != E; ++I) {
774 if (I->isCtrl()) continue; // ignore chain preds
775 if (I->getSUnit()->CopyDstRC) {
776 // Copy to physical register.
777 DenseMap<SUnit*, unsigned>::iterator VRI = VRBaseMap.find(I->getSUnit());
778 assert(VRI != VRBaseMap.end() && "Node emitted out of order - late");
779 // Find the destination physical register.
780 unsigned Reg = 0;
781 for (SUnit::const_succ_iterator II = SU->Succs.begin(),
782 EE = SU->Succs.end(); II != EE; ++II) {
783 if (II->isCtrl()) continue; // ignore chain preds
784 if (II->getReg()) {
785 Reg = II->getReg();
786 break;
787 }
788 }
789 BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), Reg)
790 .addReg(VRI->second);
791 } else {
792 // Copy from physical register.
793 assert(I->getReg() && "Unknown physical register!");
794 unsigned VRBase = MRI.createVirtualRegister(SU->CopyDstRC);
795 bool isNew = VRBaseMap.insert(std::make_pair(SU, VRBase)).second;
796 (void)isNew; // Silence compiler warning.
797 assert(isNew && "Node emitted out of order - early");
798 BuildMI(*BB, InsertPos, DebugLoc(), TII->get(TargetOpcode::COPY), VRBase)
799 .addReg(I->getReg());
800 }
801 break;
802 }
803}
Evan Cheng563fe3c2010-03-25 01:38:16 +0000804
Andrew Tricke932bb72012-03-07 05:21:44 +0000805/// EmitSchedule - Emit the machine code in scheduled order. Return the new
806/// InsertPos and MachineBasicBlock that contains this insertion
807/// point. ScheduleDAGSDNodes holds a BB pointer for convenience, but this does
808/// not necessarily refer to returned BB. The emitter may split blocks.
Andrew Trick60cf03e2012-03-07 05:21:52 +0000809MachineBasicBlock *ScheduleDAGSDNodes::
810EmitSchedule(MachineBasicBlock::iterator &InsertPos) {
Dan Gohmanb8120772009-10-10 01:32:21 +0000811 InstrEmitter Emitter(BB, InsertPos);
812 DenseMap<SDValue, unsigned> VRBaseMap;
813 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
Evan Cheng563fe3c2010-03-25 01:38:16 +0000814 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
815 SmallSet<unsigned, 8> Seen;
816 bool HasDbg = DAG->hasDebugValues();
Dale Johannesen49de0602010-03-10 22:13:47 +0000817
Dale Johannesene0983522010-04-26 20:06:49 +0000818 // If this is the first BB, emit byval parameter dbg_value's.
819 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
820 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
821 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
822 for (; PDI != PDE; ++PDI) {
Dan Gohman8acc8f72010-04-30 19:35:33 +0000823 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
Dale Johannesene0983522010-04-26 20:06:49 +0000824 if (DbgMI)
Dan Gohmand7b5ce32010-07-10 09:00:22 +0000825 BB->insert(InsertPos, DbgMI);
Dale Johannesene0983522010-04-26 20:06:49 +0000826 }
827 }
828
Dan Gohmanb8120772009-10-10 01:32:21 +0000829 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
830 SUnit *SU = Sequence[i];
831 if (!SU) {
832 // Null SUnit* is a noop.
Andrew Tricke932bb72012-03-07 05:21:44 +0000833 TII->insertNoop(*Emitter.getBlock(), InsertPos);
Dan Gohmanb8120772009-10-10 01:32:21 +0000834 continue;
835 }
836
837 // For pre-regalloc scheduling, create instructions corresponding to the
Chris Lattner11a33812010-12-23 17:24:32 +0000838 // SDNode and any glued SDNodes and append them to the block.
Dan Gohmanb8120772009-10-10 01:32:21 +0000839 if (!SU->getNode()) {
840 // Emit a copy.
Andrew Tricke932bb72012-03-07 05:21:44 +0000841 EmitPhysRegCopy(SU, CopyVRBaseMap, InsertPos);
Dan Gohmanb8120772009-10-10 01:32:21 +0000842 continue;
843 }
844
Chris Lattner11a33812010-12-23 17:24:32 +0000845 SmallVector<SDNode *, 4> GluedNodes;
Evan Cheng839fb652012-10-17 19:39:36 +0000846 for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode())
Chris Lattner11a33812010-12-23 17:24:32 +0000847 GluedNodes.push_back(N);
848 while (!GluedNodes.empty()) {
849 SDNode *N = GluedNodes.back();
850 Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned,
Dan Gohman25c16532010-05-01 00:01:06 +0000851 VRBaseMap);
Dale Johannesene0983522010-04-26 20:06:49 +0000852 // Remember the source order of the inserted instruction.
Evan Cheng563fe3c2010-03-25 01:38:16 +0000853 if (HasDbg)
Dan Gohman8acc8f72010-04-30 19:35:33 +0000854 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
Chris Lattner11a33812010-12-23 17:24:32 +0000855 GluedNodes.pop_back();
Dan Gohmanb8120772009-10-10 01:32:21 +0000856 }
857 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
Dan Gohman25c16532010-05-01 00:01:06 +0000858 VRBaseMap);
Dale Johannesene0983522010-04-26 20:06:49 +0000859 // Remember the source order of the inserted instruction.
Evan Cheng563fe3c2010-03-25 01:38:16 +0000860 if (HasDbg)
Dan Gohman8acc8f72010-04-30 19:35:33 +0000861 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
Evan Cheng563fe3c2010-03-25 01:38:16 +0000862 Seen);
863 }
864
Dale Johannesene0983522010-04-26 20:06:49 +0000865 // Insert all the dbg_values which have not already been inserted in source
Evan Cheng563fe3c2010-03-25 01:38:16 +0000866 // order sequence.
867 if (HasDbg) {
Dan Gohmand7b5ce32010-07-10 09:00:22 +0000868 MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
Evan Cheng563fe3c2010-03-25 01:38:16 +0000869
870 // Sort the source order instructions and use the order to insert debug
871 // values.
Benjamin Kramerb12cf012013-08-24 12:54:27 +0000872 std::sort(Orders.begin(), Orders.end(), less_first());
Evan Cheng563fe3c2010-03-25 01:38:16 +0000873
874 SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
875 SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
876 // Now emit the rest according to source order.
877 unsigned LastOrder = 0;
Evan Cheng563fe3c2010-03-25 01:38:16 +0000878 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
879 unsigned Order = Orders[i].first;
880 MachineInstr *MI = Orders[i].second;
881 // Insert all SDDbgValue's whose order(s) are before "Order".
882 if (!MI)
883 continue;
Evan Cheng563fe3c2010-03-25 01:38:16 +0000884 for (; DI != DE &&
885 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
886 if ((*DI)->isInvalidated())
887 continue;
Dan Gohman8acc8f72010-04-30 19:35:33 +0000888 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Chenged69b382010-04-26 07:38:55 +0000889 if (DbgMI) {
890 if (!LastOrder)
891 // Insert to start of the BB (after PHIs).
892 BB->insert(BBBegin, DbgMI);
893 else {
Dan Gohmana64a3232010-07-10 22:42:31 +0000894 // Insert at the instruction, which may be in a different
895 // block, if the block was split by a custom inserter.
Evan Chenged69b382010-04-26 07:38:55 +0000896 MachineBasicBlock::iterator Pos = MI;
Andrew Trickc66d26a2013-05-26 08:58:50 +0000897 MI->getParent()->insert(Pos, DbgMI);
Evan Chenged69b382010-04-26 07:38:55 +0000898 }
Evan Cheng563fe3c2010-03-25 01:38:16 +0000899 }
Dale Johannesen49de0602010-03-10 22:13:47 +0000900 }
Evan Cheng563fe3c2010-03-25 01:38:16 +0000901 LastOrder = Order;
Evan Cheng563fe3c2010-03-25 01:38:16 +0000902 }
903 // Add trailing DbgValue's before the terminator. FIXME: May want to add
904 // some of them before one or more conditional branches?
Bill Wendling618d5732012-03-14 07:14:25 +0000905 SmallVector<MachineInstr*, 8> DbgMIs;
Evan Cheng563fe3c2010-03-25 01:38:16 +0000906 while (DI != DE) {
Bill Wendling618d5732012-03-14 07:14:25 +0000907 if (!(*DI)->isInvalidated())
908 if (MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap))
909 DbgMIs.push_back(DbgMI);
Evan Cheng563fe3c2010-03-25 01:38:16 +0000910 ++DI;
911 }
Bill Wendling618d5732012-03-14 07:14:25 +0000912
913 MachineBasicBlock *InsertBB = Emitter.getBlock();
914 MachineBasicBlock::iterator Pos = InsertBB->getFirstTerminator();
915 InsertBB->insert(Pos, DbgMIs.begin(), DbgMIs.end());
Dan Gohmanb8120772009-10-10 01:32:21 +0000916 }
917
Dan Gohmanb8120772009-10-10 01:32:21 +0000918 InsertPos = Emitter.getInsertPos();
Andrew Trick60cf03e2012-03-07 05:21:52 +0000919 return Emitter.getBlock();
Dan Gohmanb8120772009-10-10 01:32:21 +0000920}
Andrew Trick1b2324d2012-03-07 00:18:22 +0000921
922/// Return the basic block label.
923std::string ScheduleDAGSDNodes::getDAGName() const {
924 return "sunit-dag." + BB->getFullName();
925}