blob: 3185c88b82bfedb27226635bb8715dcf532dfe05 [file] [log] [blame]
Dan Gohman343f0c02008-11-19 23:18:57 +00001//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "pre-RA-sched"
Evan Chenga8efe282010-03-14 19:56:39 +000016#include "SDNodeDbgValue.h"
Dan Gohman84fbac52009-02-06 17:22:58 +000017#include "ScheduleDAGSDNodes.h"
Dan Gohmanbcea8592009-10-10 01:32:21 +000018#include "InstrEmitter.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000019#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/Target/TargetMachine.h"
21#include "llvm/Target/TargetInstrInfo.h"
Evan Cheng1cc39842010-05-20 23:26:43 +000022#include "llvm/Target/TargetLowering.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000023#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin71046162009-08-13 16:05:04 +000024#include "llvm/Target/TargetSubtarget.h"
Evan Chengc589e032010-01-22 03:36:51 +000025#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/SmallPtrSet.h"
Evan Chengbfcb3052010-03-25 01:38:16 +000027#include "llvm/ADT/SmallSet.h"
Evan Chengc589e032010-01-22 03:36:51 +000028#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/Statistic.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000030#include "llvm/Support/Debug.h"
31#include "llvm/Support/raw_ostream.h"
32using namespace llvm;
33
Evan Chengc589e032010-01-22 03:36:51 +000034STATISTIC(LoadsClustered, "Number of loads clustered together");
35
Dan Gohman79ce2762009-01-15 19:20:50 +000036ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
37 : ScheduleDAG(mf) {
Dan Gohman343f0c02008-11-19 23:18:57 +000038}
39
Dan Gohman47ac0f02009-02-11 04:27:20 +000040/// Run - perform scheduling.
41///
42void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
43 MachineBasicBlock::iterator insertPos) {
44 DAG = dag;
45 ScheduleDAG::Run(bb, insertPos);
46}
47
Evan Cheng1cc39842010-05-20 23:26:43 +000048/// NewSUnit - Creates a new SUnit and return a ptr to it.
49///
50SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
51#ifndef NDEBUG
52 const SUnit *Addr = 0;
53 if (!SUnits.empty())
54 Addr = &SUnits[0];
55#endif
56 SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
57 assert((Addr == 0 || Addr == &SUnits[0]) &&
58 "SUnits std::vector reallocated on the fly!");
59 SUnits.back().OrigNode = &SUnits.back();
60 SUnit *SU = &SUnits.back();
61 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
62 SU->SchedulingPref = TLI.getSchedulingPreference(N);
63 return SU;
64}
65
Dan Gohman343f0c02008-11-19 23:18:57 +000066SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
67 SUnit *SU = NewSUnit(Old->getNode());
68 SU->OrigNode = Old->OrigNode;
69 SU->Latency = Old->Latency;
70 SU->isTwoAddress = Old->isTwoAddress;
71 SU->isCommutable = Old->isCommutable;
72 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
Dan Gohman39746672009-03-23 16:10:52 +000073 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
Evan Cheng1cc39842010-05-20 23:26:43 +000074 SU->SchedulingPref = Old->SchedulingPref;
Evan Chenge57187c2009-01-16 20:57:18 +000075 Old->isCloned = true;
Dan Gohman343f0c02008-11-19 23:18:57 +000076 return SU;
77}
78
79/// CheckForPhysRegDependency - Check if the dependency between def and use of
80/// a specified operand is a physical register dependency. If so, returns the
Evan Chengc29a56d2009-01-12 03:19:55 +000081/// register and the cost of copying the register.
Dan Gohman343f0c02008-11-19 23:18:57 +000082static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
83 const TargetRegisterInfo *TRI,
84 const TargetInstrInfo *TII,
Evan Chengc29a56d2009-01-12 03:19:55 +000085 unsigned &PhysReg, int &Cost) {
Dan Gohman343f0c02008-11-19 23:18:57 +000086 if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
87 return;
88
89 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
90 if (TargetRegisterInfo::isVirtualRegister(Reg))
91 return;
92
93 unsigned ResNo = User->getOperand(2).getResNo();
94 if (Def->isMachineOpcode()) {
95 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
96 if (ResNo >= II.getNumDefs() &&
Evan Chengc29a56d2009-01-12 03:19:55 +000097 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
Dan Gohman343f0c02008-11-19 23:18:57 +000098 PhysReg = Reg;
Evan Chengc29a56d2009-01-12 03:19:55 +000099 const TargetRegisterClass *RC =
100 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
101 Cost = RC->getCopyCost();
102 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000103 }
104}
105
Evan Chengc589e032010-01-22 03:36:51 +0000106static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
107 SelectionDAG *DAG) {
108 SmallVector<EVT, 4> VTs;
109 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
110 VTs.push_back(N->getValueType(i));
111 if (AddFlag)
112 VTs.push_back(MVT::Flag);
113 SmallVector<SDValue, 4> Ops;
114 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
115 Ops.push_back(N->getOperand(i));
116 if (Flag.getNode())
117 Ops.push_back(Flag);
118 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
119 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
120}
121
122/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
123/// This function finds loads of the same base and different offsets. If the
124/// offsets are not far apart (target specific), it add MVT::Flag inputs and
125/// outputs to ensure they are scheduled together and in order. This
126/// optimization may benefit some targets by improving cache locality.
127void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
128 SmallPtrSet<SDNode*, 16> Visited;
129 SmallVector<int64_t, 4> Offsets;
130 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
131 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
132 E = DAG->allnodes_end(); NI != E; ++NI) {
133 SDNode *Node = &*NI;
134 if (!Node || !Node->isMachineOpcode())
135 continue;
136
137 unsigned Opc = Node->getMachineOpcode();
138 const TargetInstrDesc &TID = TII->get(Opc);
139 if (!TID.mayLoad())
140 continue;
141
142 SDNode *Chain = 0;
143 unsigned NumOps = Node->getNumOperands();
144 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
145 Chain = Node->getOperand(NumOps-1).getNode();
146 if (!Chain)
147 continue;
148
149 // Look for other loads of the same chain. Find loads that are loading from
150 // the same base pointer and different offsets.
151 Visited.clear();
152 Offsets.clear();
153 O2SMap.clear();
154 bool Cluster = false;
155 SDNode *Base = Node;
156 int64_t BaseOffset;
157 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
158 I != E; ++I) {
159 SDNode *User = *I;
160 if (User == Node || !Visited.insert(User))
161 continue;
162 int64_t Offset1, Offset2;
163 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
164 Offset1 == Offset2)
165 // FIXME: Should be ok if they addresses are identical. But earlier
166 // optimizations really should have eliminated one of the loads.
167 continue;
168 if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
169 Offsets.push_back(Offset1);
170 O2SMap.insert(std::make_pair(Offset2, User));
171 Offsets.push_back(Offset2);
172 if (Offset2 < Offset1) {
173 Base = User;
174 BaseOffset = Offset2;
175 } else {
176 BaseOffset = Offset1;
177 }
178 Cluster = true;
179 }
180
181 if (!Cluster)
182 continue;
183
184 // Sort them in increasing order.
185 std::sort(Offsets.begin(), Offsets.end());
186
187 // Check if the loads are close enough.
188 SmallVector<SDNode*, 4> Loads;
189 unsigned NumLoads = 0;
190 int64_t BaseOff = Offsets[0];
191 SDNode *BaseLoad = O2SMap[BaseOff];
192 Loads.push_back(BaseLoad);
193 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
194 int64_t Offset = Offsets[i];
195 SDNode *Load = O2SMap[Offset];
196 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
197 NumLoads))
198 break; // Stop right here. Ignore loads that are further away.
199 Loads.push_back(Load);
200 ++NumLoads;
201 }
202
203 if (NumLoads == 0)
204 continue;
205
206 // Cluster loads by adding MVT::Flag outputs and inputs. This also
207 // ensure they are scheduled in order of increasing addresses.
208 SDNode *Lead = Loads[0];
209 AddFlags(Lead, SDValue(0,0), true, DAG);
210 SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
211 for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
212 bool OutFlag = i < e-1;
213 SDNode *Load = Loads[i];
214 AddFlags(Load, InFlag, OutFlag, DAG);
215 if (OutFlag)
216 InFlag = SDValue(Load, Load->getNumValues()-1);
217 ++LoadsClustered;
218 }
219 }
220}
221
Dan Gohman343f0c02008-11-19 23:18:57 +0000222void ScheduleDAGSDNodes::BuildSchedUnits() {
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000223 // During scheduling, the NodeId field of SDNode is used to map SDNodes
224 // to their associated SUnits by holding SUnits table indices. A value
225 // of -1 means the SDNode does not yet have an associated SUnit.
226 unsigned NumNodes = 0;
227 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
228 E = DAG->allnodes_end(); NI != E; ++NI) {
229 NI->setNodeId(-1);
230 ++NumNodes;
231 }
232
Dan Gohman343f0c02008-11-19 23:18:57 +0000233 // Reserve entries in the vector for each of the SUnits we are creating. This
234 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
235 // invalidated.
Dan Gohman89b64bd2008-12-17 04:30:46 +0000236 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
237 // This is a temporary workaround.
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000238 SUnits.reserve(NumNodes * 2);
Dan Gohman343f0c02008-11-19 23:18:57 +0000239
Chris Lattner736a6ea2010-02-24 06:11:37 +0000240 // Add all nodes in depth first order.
241 SmallVector<SDNode*, 64> Worklist;
242 SmallPtrSet<SDNode*, 64> Visited;
243 Worklist.push_back(DAG->getRoot().getNode());
244 Visited.insert(DAG->getRoot().getNode());
245
246 while (!Worklist.empty()) {
247 SDNode *NI = Worklist.pop_back_val();
248
249 // Add all operands to the worklist unless they've already been added.
250 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
251 if (Visited.insert(NI->getOperand(i).getNode()))
252 Worklist.push_back(NI->getOperand(i).getNode());
253
Dan Gohman343f0c02008-11-19 23:18:57 +0000254 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
255 continue;
256
257 // If this node has already been processed, stop now.
258 if (NI->getNodeId() != -1) continue;
259
260 SUnit *NodeSUnit = NewSUnit(NI);
261
262 // See if anything is flagged to this node, if so, add them to flagged
263 // nodes. Nodes can have at most one flag input and one flag output. Flags
Dan Gohmandb95fa12009-03-20 20:42:23 +0000264 // are required to be the last operand and result of a node.
Dan Gohman343f0c02008-11-19 23:18:57 +0000265
266 // Scan up to find flagged preds.
267 SDNode *N = NI;
Dan Gohmandb95fa12009-03-20 20:42:23 +0000268 while (N->getNumOperands() &&
Owen Anderson825b72b2009-08-11 20:47:22 +0000269 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
Dan Gohmandb95fa12009-03-20 20:42:23 +0000270 N = N->getOperand(N->getNumOperands()-1).getNode();
271 assert(N->getNodeId() == -1 && "Node already inserted!");
272 N->setNodeId(NodeSUnit->NodeNum);
Dan Gohman343f0c02008-11-19 23:18:57 +0000273 }
274
275 // Scan down to find any flagged succs.
276 N = NI;
Owen Anderson825b72b2009-08-11 20:47:22 +0000277 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000278 SDValue FlagVal(N, N->getNumValues()-1);
279
280 // There are either zero or one users of the Flag result.
281 bool HasFlagUse = false;
282 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
283 UI != E; ++UI)
284 if (FlagVal.isOperandOf(*UI)) {
285 HasFlagUse = true;
286 assert(N->getNodeId() == -1 && "Node already inserted!");
287 N->setNodeId(NodeSUnit->NodeNum);
288 N = *UI;
289 break;
290 }
291 if (!HasFlagUse) break;
292 }
293
294 // If there are flag operands involved, N is now the bottom-most node
295 // of the sequence of nodes that are flagged together.
296 // Update the SUnit.
297 NodeSUnit->setNode(N);
298 assert(N->getNodeId() == -1 && "Node already inserted!");
299 N->setNodeId(NodeSUnit->NodeNum);
300
Dan Gohman787782f2008-11-21 01:44:51 +0000301 // Assign the Latency field of NodeSUnit using target-provided information.
Evan Chenge1631682010-05-19 22:42:23 +0000302 ComputeLatency(NodeSUnit);
Dan Gohman343f0c02008-11-19 23:18:57 +0000303 }
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000304}
305
306void ScheduleDAGSDNodes::AddSchedEdges() {
David Goodwin71046162009-08-13 16:05:04 +0000307 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
308
David Goodwindc4bdcd2009-08-19 16:08:58 +0000309 // Check to see if the scheduler cares about latencies.
310 bool UnitLatencies = ForceUnitLatencies();
311
Dan Gohman343f0c02008-11-19 23:18:57 +0000312 // Pass 2: add the preds, succs, etc.
313 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
314 SUnit *SU = &SUnits[su];
315 SDNode *MainNode = SU->getNode();
316
317 if (MainNode->isMachineOpcode()) {
318 unsigned Opc = MainNode->getMachineOpcode();
319 const TargetInstrDesc &TID = TII->get(Opc);
320 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
321 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
322 SU->isTwoAddress = true;
323 break;
324 }
325 }
326 if (TID.isCommutable())
327 SU->isCommutable = true;
328 }
329
330 // Find all predecessors and successors of the group.
331 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
332 if (N->isMachineOpcode() &&
Dan Gohman39746672009-03-23 16:10:52 +0000333 TII->get(N->getMachineOpcode()).getImplicitDefs()) {
334 SU->hasPhysRegClobbers = true;
Dan Gohmanbcea8592009-10-10 01:32:21 +0000335 unsigned NumUsed = InstrEmitter::CountResults(N);
Dan Gohman8cccf0e2009-03-23 17:39:36 +0000336 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
337 --NumUsed; // Skip over unused values at the end.
338 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
Dan Gohman39746672009-03-23 16:10:52 +0000339 SU->hasPhysRegDefs = true;
340 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000341
342 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
343 SDNode *OpN = N->getOperand(i).getNode();
344 if (isPassiveNode(OpN)) continue; // Not scheduled.
345 SUnit *OpSU = &SUnits[OpN->getNodeId()];
346 assert(OpSU && "Node has no SUnit!");
347 if (OpSU == SU) continue; // In the same group.
348
Owen Andersone50ed302009-08-10 22:56:29 +0000349 EVT OpVT = N->getOperand(i).getValueType();
Owen Anderson825b72b2009-08-11 20:47:22 +0000350 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
351 bool isChain = OpVT == MVT::Other;
Dan Gohman343f0c02008-11-19 23:18:57 +0000352
353 unsigned PhysReg = 0;
Evan Chengc29a56d2009-01-12 03:19:55 +0000354 int Cost = 1;
Dan Gohman343f0c02008-11-19 23:18:57 +0000355 // Determine if this is a physical register dependency.
Evan Chengc29a56d2009-01-12 03:19:55 +0000356 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
Dan Gohman54e4c362008-12-09 22:54:47 +0000357 assert((PhysReg == 0 || !isChain) &&
358 "Chain dependence via physreg data?");
Evan Chengc29a56d2009-01-12 03:19:55 +0000359 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
360 // emits a copy from the physical register to a virtual register unless
361 // it requires a cross class copy (cost < 0). That means we are only
362 // treating "expensive to copy" register dependency as physical register
363 // dependency. This may change in the future though.
364 if (Cost >= 0)
365 PhysReg = 0;
David Goodwin71046162009-08-13 16:05:04 +0000366
367 const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
368 OpSU->Latency, PhysReg);
David Goodwindc4bdcd2009-08-19 16:08:58 +0000369 if (!isChain && !UnitLatencies) {
Evan Cheng15a16de2010-05-20 06:13:19 +0000370 ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
Dan Gohman3fb150a2010-04-17 17:42:52 +0000371 ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
David Goodwindc4bdcd2009-08-19 16:08:58 +0000372 }
David Goodwin71046162009-08-13 16:05:04 +0000373
374 SU->addPred(dep);
Dan Gohman343f0c02008-11-19 23:18:57 +0000375 }
376 }
377 }
378}
379
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000380/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
381/// are input. This SUnit graph is similar to the SelectionDAG, but
382/// excludes nodes that aren't interesting to scheduling, and represents
383/// flagged together nodes with a single SUnit.
Dan Gohman98976e42009-10-09 23:33:48 +0000384void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
Evan Chengc589e032010-01-22 03:36:51 +0000385 // Cluster loads from "near" addresses into combined SUnits.
Evan Cheng42dae2d2010-01-22 23:49:45 +0000386 ClusterNeighboringLoads();
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000387 // Populate the SUnits array.
388 BuildSchedUnits();
389 // Compute all the scheduling dependencies between nodes.
390 AddSchedEdges();
391}
392
Dan Gohman343f0c02008-11-19 23:18:57 +0000393void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
Evan Chenge1631682010-05-19 22:42:23 +0000394 // Check to see if the scheduler cares about latencies.
395 if (ForceUnitLatencies()) {
396 SU->Latency = 1;
397 return;
398 }
399
Dan Gohman343f0c02008-11-19 23:18:57 +0000400 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
Evan Cheng15a16de2010-05-20 06:13:19 +0000401 if (InstrItins.isEmpty()) {
402 SU->Latency = 1;
403 return;
404 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000405
406 // Compute the latency for the node. We use the sum of the latencies for
407 // all nodes flagged together into this SUnit.
Dan Gohman343f0c02008-11-19 23:18:57 +0000408 SU->Latency = 0;
Dan Gohmanc8c28272008-11-21 00:12:10 +0000409 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
Dan Gohman343f0c02008-11-19 23:18:57 +0000410 if (N->isMachineOpcode()) {
David Goodwindc4bdcd2009-08-19 16:08:58 +0000411 SU->Latency += InstrItins.
412 getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
Dan Gohman343f0c02008-11-19 23:18:57 +0000413 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000414}
415
Evan Cheng15a16de2010-05-20 06:13:19 +0000416void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
417 unsigned OpIdx, SDep& dep) const{
418 // Check to see if the scheduler cares about latencies.
419 if (ForceUnitLatencies())
420 return;
421
422 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
423 if (InstrItins.isEmpty())
424 return;
425
426 if (dep.getKind() != SDep::Data)
427 return;
428
429 unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
430 if (Def->isMachineOpcode() && Use->isMachineOpcode()) {
431 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
432 if (DefIdx >= II.getNumDefs())
433 return;
434 int DefCycle = InstrItins.getOperandCycle(II.getSchedClass(), DefIdx);
435 if (DefCycle < 0)
436 return;
437 const unsigned UseClass = TII->get(Use->getMachineOpcode()).getSchedClass();
438 int UseCycle = InstrItins.getOperandCycle(UseClass, OpIdx);
439 if (UseCycle >= 0) {
440 int Latency = DefCycle - UseCycle + 1;
441 if (Latency >= 0)
442 dep.setLatency(Latency);
443 }
444 }
445}
446
Dan Gohman343f0c02008-11-19 23:18:57 +0000447void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
Evan Chengc29a56d2009-01-12 03:19:55 +0000448 if (!SU->getNode()) {
David Greene84fa8222010-01-05 01:25:11 +0000449 dbgs() << "PHYS REG COPY\n";
Evan Chengc29a56d2009-01-12 03:19:55 +0000450 return;
451 }
452
453 SU->getNode()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000454 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000455 SmallVector<SDNode *, 4> FlaggedNodes;
456 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
457 FlaggedNodes.push_back(N);
458 while (!FlaggedNodes.empty()) {
David Greene84fa8222010-01-05 01:25:11 +0000459 dbgs() << " ";
Dan Gohman343f0c02008-11-19 23:18:57 +0000460 FlaggedNodes.back()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000461 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000462 FlaggedNodes.pop_back();
463 }
464}
Dan Gohmanbcea8592009-10-10 01:32:21 +0000465
Evan Chengbfcb3052010-03-25 01:38:16 +0000466namespace {
467 struct OrderSorter {
468 bool operator()(const std::pair<unsigned, MachineInstr*> &A,
469 const std::pair<unsigned, MachineInstr*> &B) {
470 return A.first < B.first;
471 }
472 };
473}
474
475// ProcessSourceNode - Process nodes with source order numbers. These are added
476// to a vector which EmitSchedule use to determine how to insert dbg_value
477// instructions in the right order.
478static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
479 InstrEmitter &Emitter,
Evan Chengbfcb3052010-03-25 01:38:16 +0000480 DenseMap<SDValue, unsigned> &VRBaseMap,
481 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
482 SmallSet<unsigned, 8> &Seen) {
483 unsigned Order = DAG->GetOrdering(N);
484 if (!Order || !Seen.insert(Order))
485 return;
486
487 MachineBasicBlock *BB = Emitter.getBlock();
488 if (BB->empty() || BB->back().isPHI()) {
489 // Did not insert any instruction.
490 Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
491 return;
492 }
493
494 Orders.push_back(std::make_pair(Order, &BB->back()));
495 if (!N->getHasDebugValue())
496 return;
497 // Opportunistically insert immediate dbg_value uses, i.e. those with source
498 // order number right after the N.
499 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
500 SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
501 for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
502 if (DVs[i]->isInvalidated())
503 continue;
504 unsigned DVOrder = DVs[i]->getOrder();
505 if (DVOrder == ++Order) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000506 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000507 if (DbgMI) {
508 Orders.push_back(std::make_pair(DVOrder, DbgMI));
509 BB->insert(InsertPos, DbgMI);
510 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000511 DVs[i]->setIsInvalidated();
512 }
513 }
514}
515
516
Dan Gohmanbcea8592009-10-10 01:32:21 +0000517/// EmitSchedule - Emit the machine code in scheduled order.
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000518MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
Dan Gohmanbcea8592009-10-10 01:32:21 +0000519 InstrEmitter Emitter(BB, InsertPos);
520 DenseMap<SDValue, unsigned> VRBaseMap;
521 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
Evan Chengbfcb3052010-03-25 01:38:16 +0000522 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
523 SmallSet<unsigned, 8> Seen;
524 bool HasDbg = DAG->hasDebugValues();
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000525
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000526 // If this is the first BB, emit byval parameter dbg_value's.
527 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
528 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
529 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
530 for (; PDI != PDE; ++PDI) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000531 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000532 if (DbgMI)
533 BB->insert(BB->end(), DbgMI);
534 }
535 }
536
Dan Gohmanbcea8592009-10-10 01:32:21 +0000537 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
538 SUnit *SU = Sequence[i];
539 if (!SU) {
540 // Null SUnit* is a noop.
541 EmitNoop();
542 continue;
543 }
544
545 // For pre-regalloc scheduling, create instructions corresponding to the
546 // SDNode and any flagged SDNodes and append them to the block.
547 if (!SU->getNode()) {
548 // Emit a copy.
549 EmitPhysRegCopy(SU, CopyVRBaseMap);
550 continue;
551 }
552
553 SmallVector<SDNode *, 4> FlaggedNodes;
554 for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
555 N = N->getFlaggedNode())
556 FlaggedNodes.push_back(N);
557 while (!FlaggedNodes.empty()) {
Evan Chengbfcb3052010-03-25 01:38:16 +0000558 SDNode *N = FlaggedNodes.back();
Dan Gohmanbcea8592009-10-10 01:32:21 +0000559 Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000560 VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000561 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000562 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000563 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
Dan Gohmanbcea8592009-10-10 01:32:21 +0000564 FlaggedNodes.pop_back();
565 }
566 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000567 VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000568 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000569 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000570 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
Evan Chengbfcb3052010-03-25 01:38:16 +0000571 Seen);
572 }
573
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000574 // Insert all the dbg_values which have not already been inserted in source
Evan Chengbfcb3052010-03-25 01:38:16 +0000575 // order sequence.
576 if (HasDbg) {
577 MachineBasicBlock::iterator BBBegin = BB->empty() ? BB->end() : BB->begin();
578 while (BBBegin != BB->end() && BBBegin->isPHI())
579 ++BBBegin;
580
581 // Sort the source order instructions and use the order to insert debug
582 // values.
583 std::sort(Orders.begin(), Orders.end(), OrderSorter());
584
585 SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
586 SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
587 // Now emit the rest according to source order.
588 unsigned LastOrder = 0;
589 MachineInstr *LastMI = 0;
590 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
591 unsigned Order = Orders[i].first;
592 MachineInstr *MI = Orders[i].second;
593 // Insert all SDDbgValue's whose order(s) are before "Order".
594 if (!MI)
595 continue;
596 MachineBasicBlock *MIBB = MI->getParent();
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000597#ifndef NDEBUG
598 unsigned LastDIOrder = 0;
599#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000600 for (; DI != DE &&
601 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000602#ifndef NDEBUG
603 assert((*DI)->getOrder() >= LastDIOrder &&
604 "SDDbgValue nodes must be in source order!");
605 LastDIOrder = (*DI)->getOrder();
606#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000607 if ((*DI)->isInvalidated())
608 continue;
Dan Gohman891ff8f2010-04-30 19:35:33 +0000609 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000610 if (DbgMI) {
611 if (!LastOrder)
612 // Insert to start of the BB (after PHIs).
613 BB->insert(BBBegin, DbgMI);
614 else {
615 MachineBasicBlock::iterator Pos = MI;
616 MIBB->insert(llvm::next(Pos), DbgMI);
617 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000618 }
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000619 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000620 LastOrder = Order;
621 LastMI = MI;
622 }
623 // Add trailing DbgValue's before the terminator. FIXME: May want to add
624 // some of them before one or more conditional branches?
625 while (DI != DE) {
626 MachineBasicBlock *InsertBB = Emitter.getBlock();
627 MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
628 if (!(*DI)->isInvalidated()) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000629 MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000630 if (DbgMI)
631 InsertBB->insert(Pos, DbgMI);
Evan Chengbfcb3052010-03-25 01:38:16 +0000632 }
633 ++DI;
634 }
Dan Gohmanbcea8592009-10-10 01:32:21 +0000635 }
636
637 BB = Emitter.getBlock();
638 InsertPos = Emitter.getInsertPos();
639 return BB;
640}