blob: 98ba380dfb8135fd2c70f9166e69bd566850b03e [file] [log] [blame]
Dan Gohman343f0c02008-11-19 23:18:57 +00001//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "pre-RA-sched"
Evan Chenga8efe282010-03-14 19:56:39 +000016#include "SDNodeDbgValue.h"
Dan Gohman84fbac52009-02-06 17:22:58 +000017#include "ScheduleDAGSDNodes.h"
Dan Gohmanbcea8592009-10-10 01:32:21 +000018#include "InstrEmitter.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000019#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/Target/TargetMachine.h"
21#include "llvm/Target/TargetInstrInfo.h"
22#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin71046162009-08-13 16:05:04 +000023#include "llvm/Target/TargetSubtarget.h"
Evan Chengc589e032010-01-22 03:36:51 +000024#include "llvm/ADT/DenseMap.h"
25#include "llvm/ADT/SmallPtrSet.h"
Evan Chengbfcb3052010-03-25 01:38:16 +000026#include "llvm/ADT/SmallSet.h"
Evan Chengc589e032010-01-22 03:36:51 +000027#include "llvm/ADT/SmallVector.h"
28#include "llvm/ADT/Statistic.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000029#include "llvm/Support/Debug.h"
30#include "llvm/Support/raw_ostream.h"
31using namespace llvm;
32
Evan Chengc589e032010-01-22 03:36:51 +000033STATISTIC(LoadsClustered, "Number of loads clustered together");
34
Dan Gohman79ce2762009-01-15 19:20:50 +000035ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
36 : ScheduleDAG(mf) {
Dan Gohman343f0c02008-11-19 23:18:57 +000037}
38
Dan Gohman47ac0f02009-02-11 04:27:20 +000039/// Run - perform scheduling.
40///
41void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
42 MachineBasicBlock::iterator insertPos) {
43 DAG = dag;
44 ScheduleDAG::Run(bb, insertPos);
45}
46
Dan Gohman343f0c02008-11-19 23:18:57 +000047SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
48 SUnit *SU = NewSUnit(Old->getNode());
49 SU->OrigNode = Old->OrigNode;
50 SU->Latency = Old->Latency;
51 SU->isTwoAddress = Old->isTwoAddress;
52 SU->isCommutable = Old->isCommutable;
53 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
Dan Gohman39746672009-03-23 16:10:52 +000054 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
Evan Chenge57187c2009-01-16 20:57:18 +000055 Old->isCloned = true;
Dan Gohman343f0c02008-11-19 23:18:57 +000056 return SU;
57}
58
59/// CheckForPhysRegDependency - Check if the dependency between def and use of
60/// a specified operand is a physical register dependency. If so, returns the
Evan Chengc29a56d2009-01-12 03:19:55 +000061/// register and the cost of copying the register.
Dan Gohman343f0c02008-11-19 23:18:57 +000062static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
63 const TargetRegisterInfo *TRI,
64 const TargetInstrInfo *TII,
Evan Chengc29a56d2009-01-12 03:19:55 +000065 unsigned &PhysReg, int &Cost) {
Dan Gohman343f0c02008-11-19 23:18:57 +000066 if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
67 return;
68
69 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
70 if (TargetRegisterInfo::isVirtualRegister(Reg))
71 return;
72
73 unsigned ResNo = User->getOperand(2).getResNo();
74 if (Def->isMachineOpcode()) {
75 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
76 if (ResNo >= II.getNumDefs() &&
Evan Chengc29a56d2009-01-12 03:19:55 +000077 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
Dan Gohman343f0c02008-11-19 23:18:57 +000078 PhysReg = Reg;
Evan Chengc29a56d2009-01-12 03:19:55 +000079 const TargetRegisterClass *RC =
80 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
81 Cost = RC->getCopyCost();
82 }
Dan Gohman343f0c02008-11-19 23:18:57 +000083 }
84}
85
Evan Chengc589e032010-01-22 03:36:51 +000086static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
87 SelectionDAG *DAG) {
88 SmallVector<EVT, 4> VTs;
89 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
90 VTs.push_back(N->getValueType(i));
91 if (AddFlag)
92 VTs.push_back(MVT::Flag);
93 SmallVector<SDValue, 4> Ops;
94 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
95 Ops.push_back(N->getOperand(i));
96 if (Flag.getNode())
97 Ops.push_back(Flag);
98 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
99 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
100}
101
102/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
103/// This function finds loads of the same base and different offsets. If the
104/// offsets are not far apart (target specific), it add MVT::Flag inputs and
105/// outputs to ensure they are scheduled together and in order. This
106/// optimization may benefit some targets by improving cache locality.
107void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
108 SmallPtrSet<SDNode*, 16> Visited;
109 SmallVector<int64_t, 4> Offsets;
110 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
111 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
112 E = DAG->allnodes_end(); NI != E; ++NI) {
113 SDNode *Node = &*NI;
114 if (!Node || !Node->isMachineOpcode())
115 continue;
116
117 unsigned Opc = Node->getMachineOpcode();
118 const TargetInstrDesc &TID = TII->get(Opc);
119 if (!TID.mayLoad())
120 continue;
121
122 SDNode *Chain = 0;
123 unsigned NumOps = Node->getNumOperands();
124 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
125 Chain = Node->getOperand(NumOps-1).getNode();
126 if (!Chain)
127 continue;
128
129 // Look for other loads of the same chain. Find loads that are loading from
130 // the same base pointer and different offsets.
131 Visited.clear();
132 Offsets.clear();
133 O2SMap.clear();
134 bool Cluster = false;
135 SDNode *Base = Node;
136 int64_t BaseOffset;
137 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
138 I != E; ++I) {
139 SDNode *User = *I;
140 if (User == Node || !Visited.insert(User))
141 continue;
142 int64_t Offset1, Offset2;
143 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
144 Offset1 == Offset2)
145 // FIXME: Should be ok if they addresses are identical. But earlier
146 // optimizations really should have eliminated one of the loads.
147 continue;
148 if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
149 Offsets.push_back(Offset1);
150 O2SMap.insert(std::make_pair(Offset2, User));
151 Offsets.push_back(Offset2);
152 if (Offset2 < Offset1) {
153 Base = User;
154 BaseOffset = Offset2;
155 } else {
156 BaseOffset = Offset1;
157 }
158 Cluster = true;
159 }
160
161 if (!Cluster)
162 continue;
163
164 // Sort them in increasing order.
165 std::sort(Offsets.begin(), Offsets.end());
166
167 // Check if the loads are close enough.
168 SmallVector<SDNode*, 4> Loads;
169 unsigned NumLoads = 0;
170 int64_t BaseOff = Offsets[0];
171 SDNode *BaseLoad = O2SMap[BaseOff];
172 Loads.push_back(BaseLoad);
173 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
174 int64_t Offset = Offsets[i];
175 SDNode *Load = O2SMap[Offset];
176 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
177 NumLoads))
178 break; // Stop right here. Ignore loads that are further away.
179 Loads.push_back(Load);
180 ++NumLoads;
181 }
182
183 if (NumLoads == 0)
184 continue;
185
186 // Cluster loads by adding MVT::Flag outputs and inputs. This also
187 // ensure they are scheduled in order of increasing addresses.
188 SDNode *Lead = Loads[0];
189 AddFlags(Lead, SDValue(0,0), true, DAG);
190 SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
191 for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
192 bool OutFlag = i < e-1;
193 SDNode *Load = Loads[i];
194 AddFlags(Load, InFlag, OutFlag, DAG);
195 if (OutFlag)
196 InFlag = SDValue(Load, Load->getNumValues()-1);
197 ++LoadsClustered;
198 }
199 }
200}
201
Dan Gohman343f0c02008-11-19 23:18:57 +0000202void ScheduleDAGSDNodes::BuildSchedUnits() {
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000203 // During scheduling, the NodeId field of SDNode is used to map SDNodes
204 // to their associated SUnits by holding SUnits table indices. A value
205 // of -1 means the SDNode does not yet have an associated SUnit.
206 unsigned NumNodes = 0;
207 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
208 E = DAG->allnodes_end(); NI != E; ++NI) {
209 NI->setNodeId(-1);
210 ++NumNodes;
211 }
212
Dan Gohman343f0c02008-11-19 23:18:57 +0000213 // Reserve entries in the vector for each of the SUnits we are creating. This
214 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
215 // invalidated.
Dan Gohman89b64bd2008-12-17 04:30:46 +0000216 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
217 // This is a temporary workaround.
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000218 SUnits.reserve(NumNodes * 2);
Dan Gohman343f0c02008-11-19 23:18:57 +0000219
Dan Gohman3f237442008-12-16 03:25:46 +0000220 // Check to see if the scheduler cares about latencies.
221 bool UnitLatencies = ForceUnitLatencies();
222
Chris Lattner736a6ea2010-02-24 06:11:37 +0000223 // Add all nodes in depth first order.
224 SmallVector<SDNode*, 64> Worklist;
225 SmallPtrSet<SDNode*, 64> Visited;
226 Worklist.push_back(DAG->getRoot().getNode());
227 Visited.insert(DAG->getRoot().getNode());
228
229 while (!Worklist.empty()) {
230 SDNode *NI = Worklist.pop_back_val();
231
232 // Add all operands to the worklist unless they've already been added.
233 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
234 if (Visited.insert(NI->getOperand(i).getNode()))
235 Worklist.push_back(NI->getOperand(i).getNode());
236
Dan Gohman343f0c02008-11-19 23:18:57 +0000237 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
238 continue;
239
240 // If this node has already been processed, stop now.
241 if (NI->getNodeId() != -1) continue;
242
243 SUnit *NodeSUnit = NewSUnit(NI);
244
245 // See if anything is flagged to this node, if so, add them to flagged
246 // nodes. Nodes can have at most one flag input and one flag output. Flags
Dan Gohmandb95fa12009-03-20 20:42:23 +0000247 // are required to be the last operand and result of a node.
Dan Gohman343f0c02008-11-19 23:18:57 +0000248
249 // Scan up to find flagged preds.
250 SDNode *N = NI;
Dan Gohmandb95fa12009-03-20 20:42:23 +0000251 while (N->getNumOperands() &&
Owen Anderson825b72b2009-08-11 20:47:22 +0000252 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
Dan Gohmandb95fa12009-03-20 20:42:23 +0000253 N = N->getOperand(N->getNumOperands()-1).getNode();
254 assert(N->getNodeId() == -1 && "Node already inserted!");
255 N->setNodeId(NodeSUnit->NodeNum);
Dan Gohman343f0c02008-11-19 23:18:57 +0000256 }
257
258 // Scan down to find any flagged succs.
259 N = NI;
Owen Anderson825b72b2009-08-11 20:47:22 +0000260 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000261 SDValue FlagVal(N, N->getNumValues()-1);
262
263 // There are either zero or one users of the Flag result.
264 bool HasFlagUse = false;
265 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
266 UI != E; ++UI)
267 if (FlagVal.isOperandOf(*UI)) {
268 HasFlagUse = true;
269 assert(N->getNodeId() == -1 && "Node already inserted!");
270 N->setNodeId(NodeSUnit->NodeNum);
271 N = *UI;
272 break;
273 }
274 if (!HasFlagUse) break;
275 }
276
277 // If there are flag operands involved, N is now the bottom-most node
278 // of the sequence of nodes that are flagged together.
279 // Update the SUnit.
280 NodeSUnit->setNode(N);
281 assert(N->getNodeId() == -1 && "Node already inserted!");
282 N->setNodeId(NodeSUnit->NodeNum);
283
Dan Gohman787782f2008-11-21 01:44:51 +0000284 // Assign the Latency field of NodeSUnit using target-provided information.
Dan Gohman3f237442008-12-16 03:25:46 +0000285 if (UnitLatencies)
286 NodeSUnit->Latency = 1;
287 else
288 ComputeLatency(NodeSUnit);
Dan Gohman343f0c02008-11-19 23:18:57 +0000289 }
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000290}
291
292void ScheduleDAGSDNodes::AddSchedEdges() {
David Goodwin71046162009-08-13 16:05:04 +0000293 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
294
David Goodwindc4bdcd2009-08-19 16:08:58 +0000295 // Check to see if the scheduler cares about latencies.
296 bool UnitLatencies = ForceUnitLatencies();
297
Dan Gohman343f0c02008-11-19 23:18:57 +0000298 // Pass 2: add the preds, succs, etc.
299 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
300 SUnit *SU = &SUnits[su];
301 SDNode *MainNode = SU->getNode();
302
303 if (MainNode->isMachineOpcode()) {
304 unsigned Opc = MainNode->getMachineOpcode();
305 const TargetInstrDesc &TID = TII->get(Opc);
306 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
307 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
308 SU->isTwoAddress = true;
309 break;
310 }
311 }
312 if (TID.isCommutable())
313 SU->isCommutable = true;
314 }
315
316 // Find all predecessors and successors of the group.
317 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
318 if (N->isMachineOpcode() &&
Dan Gohman39746672009-03-23 16:10:52 +0000319 TII->get(N->getMachineOpcode()).getImplicitDefs()) {
320 SU->hasPhysRegClobbers = true;
Dan Gohmanbcea8592009-10-10 01:32:21 +0000321 unsigned NumUsed = InstrEmitter::CountResults(N);
Dan Gohman8cccf0e2009-03-23 17:39:36 +0000322 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
323 --NumUsed; // Skip over unused values at the end.
324 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
Dan Gohman39746672009-03-23 16:10:52 +0000325 SU->hasPhysRegDefs = true;
326 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000327
328 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
329 SDNode *OpN = N->getOperand(i).getNode();
330 if (isPassiveNode(OpN)) continue; // Not scheduled.
331 SUnit *OpSU = &SUnits[OpN->getNodeId()];
332 assert(OpSU && "Node has no SUnit!");
333 if (OpSU == SU) continue; // In the same group.
334
Owen Andersone50ed302009-08-10 22:56:29 +0000335 EVT OpVT = N->getOperand(i).getValueType();
Owen Anderson825b72b2009-08-11 20:47:22 +0000336 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
337 bool isChain = OpVT == MVT::Other;
Dan Gohman343f0c02008-11-19 23:18:57 +0000338
339 unsigned PhysReg = 0;
Evan Chengc29a56d2009-01-12 03:19:55 +0000340 int Cost = 1;
Dan Gohman343f0c02008-11-19 23:18:57 +0000341 // Determine if this is a physical register dependency.
Evan Chengc29a56d2009-01-12 03:19:55 +0000342 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
Dan Gohman54e4c362008-12-09 22:54:47 +0000343 assert((PhysReg == 0 || !isChain) &&
344 "Chain dependence via physreg data?");
Evan Chengc29a56d2009-01-12 03:19:55 +0000345 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
346 // emits a copy from the physical register to a virtual register unless
347 // it requires a cross class copy (cost < 0). That means we are only
348 // treating "expensive to copy" register dependency as physical register
349 // dependency. This may change in the future though.
350 if (Cost >= 0)
351 PhysReg = 0;
David Goodwin71046162009-08-13 16:05:04 +0000352
353 const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
354 OpSU->Latency, PhysReg);
David Goodwindc4bdcd2009-08-19 16:08:58 +0000355 if (!isChain && !UnitLatencies) {
Dan Gohman3fb150a2010-04-17 17:42:52 +0000356 ComputeOperandLatency(OpSU, SU, const_cast<SDep &>(dep));
357 ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
David Goodwindc4bdcd2009-08-19 16:08:58 +0000358 }
David Goodwin71046162009-08-13 16:05:04 +0000359
360 SU->addPred(dep);
Dan Gohman343f0c02008-11-19 23:18:57 +0000361 }
362 }
363 }
364}
365
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000366/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
367/// are input. This SUnit graph is similar to the SelectionDAG, but
368/// excludes nodes that aren't interesting to scheduling, and represents
369/// flagged together nodes with a single SUnit.
Dan Gohman98976e42009-10-09 23:33:48 +0000370void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
Evan Chengc589e032010-01-22 03:36:51 +0000371 // Cluster loads from "near" addresses into combined SUnits.
Evan Cheng42dae2d2010-01-22 23:49:45 +0000372 ClusterNeighboringLoads();
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000373 // Populate the SUnits array.
374 BuildSchedUnits();
375 // Compute all the scheduling dependencies between nodes.
376 AddSchedEdges();
377}
378
Dan Gohman343f0c02008-11-19 23:18:57 +0000379void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
380 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
381
382 // Compute the latency for the node. We use the sum of the latencies for
383 // all nodes flagged together into this SUnit.
Dan Gohman343f0c02008-11-19 23:18:57 +0000384 SU->Latency = 0;
Dan Gohmanc8c28272008-11-21 00:12:10 +0000385 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
Dan Gohman343f0c02008-11-19 23:18:57 +0000386 if (N->isMachineOpcode()) {
David Goodwindc4bdcd2009-08-19 16:08:58 +0000387 SU->Latency += InstrItins.
388 getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
Dan Gohman343f0c02008-11-19 23:18:57 +0000389 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000390}
391
Dan Gohman343f0c02008-11-19 23:18:57 +0000392void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
Evan Chengc29a56d2009-01-12 03:19:55 +0000393 if (!SU->getNode()) {
David Greene84fa8222010-01-05 01:25:11 +0000394 dbgs() << "PHYS REG COPY\n";
Evan Chengc29a56d2009-01-12 03:19:55 +0000395 return;
396 }
397
398 SU->getNode()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000399 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000400 SmallVector<SDNode *, 4> FlaggedNodes;
401 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
402 FlaggedNodes.push_back(N);
403 while (!FlaggedNodes.empty()) {
David Greene84fa8222010-01-05 01:25:11 +0000404 dbgs() << " ";
Dan Gohman343f0c02008-11-19 23:18:57 +0000405 FlaggedNodes.back()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000406 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000407 FlaggedNodes.pop_back();
408 }
409}
Dan Gohmanbcea8592009-10-10 01:32:21 +0000410
Evan Chengbfcb3052010-03-25 01:38:16 +0000411namespace {
412 struct OrderSorter {
413 bool operator()(const std::pair<unsigned, MachineInstr*> &A,
414 const std::pair<unsigned, MachineInstr*> &B) {
415 return A.first < B.first;
416 }
417 };
418}
419
420// ProcessSourceNode - Process nodes with source order numbers. These are added
421// to a vector which EmitSchedule use to determine how to insert dbg_value
422// instructions in the right order.
423static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
424 InstrEmitter &Emitter,
Evan Chengbfcb3052010-03-25 01:38:16 +0000425 DenseMap<SDValue, unsigned> &VRBaseMap,
426 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
427 SmallSet<unsigned, 8> &Seen) {
428 unsigned Order = DAG->GetOrdering(N);
429 if (!Order || !Seen.insert(Order))
430 return;
431
432 MachineBasicBlock *BB = Emitter.getBlock();
433 if (BB->empty() || BB->back().isPHI()) {
434 // Did not insert any instruction.
435 Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
436 return;
437 }
438
439 Orders.push_back(std::make_pair(Order, &BB->back()));
440 if (!N->getHasDebugValue())
441 return;
442 // Opportunistically insert immediate dbg_value uses, i.e. those with source
443 // order number right after the N.
444 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
445 SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
446 for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
447 if (DVs[i]->isInvalidated())
448 continue;
449 unsigned DVOrder = DVs[i]->getOrder();
450 if (DVOrder == ++Order) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000451 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000452 if (DbgMI) {
453 Orders.push_back(std::make_pair(DVOrder, DbgMI));
454 BB->insert(InsertPos, DbgMI);
455 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000456 DVs[i]->setIsInvalidated();
457 }
458 }
459}
460
461
Dan Gohmanbcea8592009-10-10 01:32:21 +0000462/// EmitSchedule - Emit the machine code in scheduled order.
463MachineBasicBlock *ScheduleDAGSDNodes::
464EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
465 InstrEmitter Emitter(BB, InsertPos);
466 DenseMap<SDValue, unsigned> VRBaseMap;
467 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
Evan Chengbfcb3052010-03-25 01:38:16 +0000468 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
469 SmallSet<unsigned, 8> Seen;
470 bool HasDbg = DAG->hasDebugValues();
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000471
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000472 // If this is the first BB, emit byval parameter dbg_value's.
473 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
474 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
475 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
476 for (; PDI != PDE; ++PDI) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000477 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000478 if (DbgMI)
479 BB->insert(BB->end(), DbgMI);
480 }
481 }
482
Dan Gohmanbcea8592009-10-10 01:32:21 +0000483 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
484 SUnit *SU = Sequence[i];
485 if (!SU) {
486 // Null SUnit* is a noop.
487 EmitNoop();
488 continue;
489 }
490
491 // For pre-regalloc scheduling, create instructions corresponding to the
492 // SDNode and any flagged SDNodes and append them to the block.
493 if (!SU->getNode()) {
494 // Emit a copy.
495 EmitPhysRegCopy(SU, CopyVRBaseMap);
496 continue;
497 }
498
499 SmallVector<SDNode *, 4> FlaggedNodes;
500 for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
501 N = N->getFlaggedNode())
502 FlaggedNodes.push_back(N);
503 while (!FlaggedNodes.empty()) {
Evan Chengbfcb3052010-03-25 01:38:16 +0000504 SDNode *N = FlaggedNodes.back();
Dan Gohmanbcea8592009-10-10 01:32:21 +0000505 Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
506 VRBaseMap, EM);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000507 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000508 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000509 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
Dan Gohmanbcea8592009-10-10 01:32:21 +0000510 FlaggedNodes.pop_back();
511 }
512 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
513 VRBaseMap, EM);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000514 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000515 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000516 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
Evan Chengbfcb3052010-03-25 01:38:16 +0000517 Seen);
518 }
519
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000520 // Insert all the dbg_values which have not already been inserted in source
Evan Chengbfcb3052010-03-25 01:38:16 +0000521 // order sequence.
522 if (HasDbg) {
523 MachineBasicBlock::iterator BBBegin = BB->empty() ? BB->end() : BB->begin();
524 while (BBBegin != BB->end() && BBBegin->isPHI())
525 ++BBBegin;
526
527 // Sort the source order instructions and use the order to insert debug
528 // values.
529 std::sort(Orders.begin(), Orders.end(), OrderSorter());
530
531 SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
532 SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
533 // Now emit the rest according to source order.
534 unsigned LastOrder = 0;
535 MachineInstr *LastMI = 0;
536 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
537 unsigned Order = Orders[i].first;
538 MachineInstr *MI = Orders[i].second;
539 // Insert all SDDbgValue's whose order(s) are before "Order".
540 if (!MI)
541 continue;
542 MachineBasicBlock *MIBB = MI->getParent();
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000543#ifndef NDEBUG
544 unsigned LastDIOrder = 0;
545#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000546 for (; DI != DE &&
547 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000548#ifndef NDEBUG
549 assert((*DI)->getOrder() >= LastDIOrder &&
550 "SDDbgValue nodes must be in source order!");
551 LastDIOrder = (*DI)->getOrder();
552#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000553 if ((*DI)->isInvalidated())
554 continue;
Dan Gohman891ff8f2010-04-30 19:35:33 +0000555 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000556 if (DbgMI) {
557 if (!LastOrder)
558 // Insert to start of the BB (after PHIs).
559 BB->insert(BBBegin, DbgMI);
560 else {
561 MachineBasicBlock::iterator Pos = MI;
562 MIBB->insert(llvm::next(Pos), DbgMI);
563 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000564 }
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000565 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000566 LastOrder = Order;
567 LastMI = MI;
568 }
569 // Add trailing DbgValue's before the terminator. FIXME: May want to add
570 // some of them before one or more conditional branches?
571 while (DI != DE) {
572 MachineBasicBlock *InsertBB = Emitter.getBlock();
573 MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
574 if (!(*DI)->isInvalidated()) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000575 MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000576 if (DbgMI)
577 InsertBB->insert(Pos, DbgMI);
Evan Chengbfcb3052010-03-25 01:38:16 +0000578 }
579 ++DI;
580 }
Dan Gohmanbcea8592009-10-10 01:32:21 +0000581 }
582
583 BB = Emitter.getBlock();
584 InsertPos = Emitter.getInsertPos();
585 return BB;
586}