blob: 07277b3f349e8c18995265a593baf94f08880672 [file] [log] [blame]
Dan Gohman343f0c02008-11-19 23:18:57 +00001//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "pre-RA-sched"
Evan Chenga8efe282010-03-14 19:56:39 +000016#include "SDNodeDbgValue.h"
Dan Gohman84fbac52009-02-06 17:22:58 +000017#include "ScheduleDAGSDNodes.h"
Dan Gohmanbcea8592009-10-10 01:32:21 +000018#include "InstrEmitter.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000019#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/Target/TargetMachine.h"
21#include "llvm/Target/TargetInstrInfo.h"
Evan Cheng1cc39842010-05-20 23:26:43 +000022#include "llvm/Target/TargetLowering.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000023#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin71046162009-08-13 16:05:04 +000024#include "llvm/Target/TargetSubtarget.h"
Evan Chengc589e032010-01-22 03:36:51 +000025#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/SmallPtrSet.h"
Evan Chengbfcb3052010-03-25 01:38:16 +000027#include "llvm/ADT/SmallSet.h"
Evan Chengc589e032010-01-22 03:36:51 +000028#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/Statistic.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000030#include "llvm/Support/Debug.h"
31#include "llvm/Support/raw_ostream.h"
32using namespace llvm;
33
Evan Chengc589e032010-01-22 03:36:51 +000034STATISTIC(LoadsClustered, "Number of loads clustered together");
35
Dan Gohman79ce2762009-01-15 19:20:50 +000036ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
37 : ScheduleDAG(mf) {
Dan Gohman343f0c02008-11-19 23:18:57 +000038}
39
Dan Gohman47ac0f02009-02-11 04:27:20 +000040/// Run - perform scheduling.
41///
42void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
43 MachineBasicBlock::iterator insertPos) {
44 DAG = dag;
45 ScheduleDAG::Run(bb, insertPos);
46}
47
Evan Cheng1cc39842010-05-20 23:26:43 +000048/// NewSUnit - Creates a new SUnit and return a ptr to it.
49///
50SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
51#ifndef NDEBUG
52 const SUnit *Addr = 0;
53 if (!SUnits.empty())
54 Addr = &SUnits[0];
55#endif
56 SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
57 assert((Addr == 0 || Addr == &SUnits[0]) &&
58 "SUnits std::vector reallocated on the fly!");
59 SUnits.back().OrigNode = &SUnits.back();
60 SUnit *SU = &SUnits.back();
61 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
Evan Cheng046fa3f2010-05-28 23:26:21 +000062 if (N->isMachineOpcode() &&
63 N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)
64 SU->SchedulingPref = Sched::None;
65 else
66 SU->SchedulingPref = TLI.getSchedulingPreference(N);
Evan Cheng1cc39842010-05-20 23:26:43 +000067 return SU;
68}
69
Dan Gohman343f0c02008-11-19 23:18:57 +000070SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
71 SUnit *SU = NewSUnit(Old->getNode());
72 SU->OrigNode = Old->OrigNode;
73 SU->Latency = Old->Latency;
74 SU->isTwoAddress = Old->isTwoAddress;
75 SU->isCommutable = Old->isCommutable;
76 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
Dan Gohman39746672009-03-23 16:10:52 +000077 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
Evan Cheng1cc39842010-05-20 23:26:43 +000078 SU->SchedulingPref = Old->SchedulingPref;
Evan Chenge57187c2009-01-16 20:57:18 +000079 Old->isCloned = true;
Dan Gohman343f0c02008-11-19 23:18:57 +000080 return SU;
81}
82
83/// CheckForPhysRegDependency - Check if the dependency between def and use of
84/// a specified operand is a physical register dependency. If so, returns the
Evan Chengc29a56d2009-01-12 03:19:55 +000085/// register and the cost of copying the register.
Dan Gohman343f0c02008-11-19 23:18:57 +000086static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
87 const TargetRegisterInfo *TRI,
88 const TargetInstrInfo *TII,
Evan Chengc29a56d2009-01-12 03:19:55 +000089 unsigned &PhysReg, int &Cost) {
Dan Gohman343f0c02008-11-19 23:18:57 +000090 if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
91 return;
92
93 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
94 if (TargetRegisterInfo::isVirtualRegister(Reg))
95 return;
96
97 unsigned ResNo = User->getOperand(2).getResNo();
98 if (Def->isMachineOpcode()) {
99 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
100 if (ResNo >= II.getNumDefs() &&
Evan Chengc29a56d2009-01-12 03:19:55 +0000101 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000102 PhysReg = Reg;
Evan Chengc29a56d2009-01-12 03:19:55 +0000103 const TargetRegisterClass *RC =
104 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
105 Cost = RC->getCopyCost();
106 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000107 }
108}
109
Evan Chengc589e032010-01-22 03:36:51 +0000110static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
111 SelectionDAG *DAG) {
112 SmallVector<EVT, 4> VTs;
Bill Wendling151d26d2010-06-23 18:16:24 +0000113
Evan Chengc589e032010-01-22 03:36:51 +0000114 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
115 VTs.push_back(N->getValueType(i));
Bill Wendling151d26d2010-06-23 18:16:24 +0000116
Evan Chengc589e032010-01-22 03:36:51 +0000117 if (AddFlag)
118 VTs.push_back(MVT::Flag);
Bill Wendling151d26d2010-06-23 18:16:24 +0000119
Evan Chengc589e032010-01-22 03:36:51 +0000120 SmallVector<SDValue, 4> Ops;
121 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
122 Ops.push_back(N->getOperand(i));
Bill Wendling151d26d2010-06-23 18:16:24 +0000123
Evan Chengc589e032010-01-22 03:36:51 +0000124 if (Flag.getNode())
125 Ops.push_back(Flag);
Bill Wendling151d26d2010-06-23 18:16:24 +0000126
Evan Chengc589e032010-01-22 03:36:51 +0000127 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
Bill Wendling151d26d2010-06-23 18:16:24 +0000128 MachineSDNode::mmo_iterator Begin = 0, End = 0;
129 MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
130
131 // Store memory references.
132 if (MN) {
133 Begin = MN->memoperands_begin();
134 End = MN->memoperands_end();
135 }
136
Evan Chengc589e032010-01-22 03:36:51 +0000137 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
Bill Wendling151d26d2010-06-23 18:16:24 +0000138
139 // Reset the memory references
140 if (MN)
141 MN->setMemRefs(Begin, End);
Evan Chengc589e032010-01-22 03:36:51 +0000142}
143
144/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
145/// This function finds loads of the same base and different offsets. If the
146/// offsets are not far apart (target specific), it add MVT::Flag inputs and
147/// outputs to ensure they are scheduled together and in order. This
148/// optimization may benefit some targets by improving cache locality.
Evan Cheng302ef832010-06-10 02:09:31 +0000149void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
150 SDNode *Chain = 0;
151 unsigned NumOps = Node->getNumOperands();
152 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
153 Chain = Node->getOperand(NumOps-1).getNode();
154 if (!Chain)
155 return;
156
157 // Look for other loads of the same chain. Find loads that are loading from
158 // the same base pointer and different offsets.
Evan Chengc589e032010-01-22 03:36:51 +0000159 SmallPtrSet<SDNode*, 16> Visited;
160 SmallVector<int64_t, 4> Offsets;
161 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
Evan Cheng302ef832010-06-10 02:09:31 +0000162 bool Cluster = false;
163 SDNode *Base = Node;
164 int64_t BaseOffset;
165 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
166 I != E; ++I) {
167 SDNode *User = *I;
168 if (User == Node || !Visited.insert(User))
169 continue;
170 int64_t Offset1, Offset2;
171 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
172 Offset1 == Offset2)
173 // FIXME: Should be ok if they addresses are identical. But earlier
174 // optimizations really should have eliminated one of the loads.
175 continue;
176 if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
177 Offsets.push_back(Offset1);
178 O2SMap.insert(std::make_pair(Offset2, User));
179 Offsets.push_back(Offset2);
180 if (Offset2 < Offset1) {
181 Base = User;
182 BaseOffset = Offset2;
183 } else {
184 BaseOffset = Offset1;
185 }
186 Cluster = true;
187 }
188
189 if (!Cluster)
190 return;
191
192 // Sort them in increasing order.
193 std::sort(Offsets.begin(), Offsets.end());
194
195 // Check if the loads are close enough.
196 SmallVector<SDNode*, 4> Loads;
197 unsigned NumLoads = 0;
198 int64_t BaseOff = Offsets[0];
199 SDNode *BaseLoad = O2SMap[BaseOff];
200 Loads.push_back(BaseLoad);
201 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
202 int64_t Offset = Offsets[i];
203 SDNode *Load = O2SMap[Offset];
204 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
205 break; // Stop right here. Ignore loads that are further away.
206 Loads.push_back(Load);
207 ++NumLoads;
208 }
209
210 if (NumLoads == 0)
211 return;
212
213 // Cluster loads by adding MVT::Flag outputs and inputs. This also
214 // ensure they are scheduled in order of increasing addresses.
215 SDNode *Lead = Loads[0];
216 AddFlags(Lead, SDValue(0,0), true, DAG);
Bill Wendling151d26d2010-06-23 18:16:24 +0000217
Evan Cheng302ef832010-06-10 02:09:31 +0000218 SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
219 for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
220 bool OutFlag = i < e-1;
221 SDNode *Load = Loads[i];
222 AddFlags(Load, InFlag, OutFlag, DAG);
Bill Wendling151d26d2010-06-23 18:16:24 +0000223
Evan Cheng302ef832010-06-10 02:09:31 +0000224 if (OutFlag)
225 InFlag = SDValue(Load, Load->getNumValues()-1);
Bill Wendling151d26d2010-06-23 18:16:24 +0000226
Evan Cheng302ef832010-06-10 02:09:31 +0000227 ++LoadsClustered;
228 }
229}
230
231/// ClusterNodes - Cluster certain nodes which should be scheduled together.
232///
233void ScheduleDAGSDNodes::ClusterNodes() {
Evan Chengc589e032010-01-22 03:36:51 +0000234 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
235 E = DAG->allnodes_end(); NI != E; ++NI) {
236 SDNode *Node = &*NI;
237 if (!Node || !Node->isMachineOpcode())
238 continue;
239
240 unsigned Opc = Node->getMachineOpcode();
241 const TargetInstrDesc &TID = TII->get(Opc);
Evan Cheng302ef832010-06-10 02:09:31 +0000242 if (TID.mayLoad())
243 // Cluster loads from "near" addresses into combined SUnits.
244 ClusterNeighboringLoads(Node);
Evan Chengc589e032010-01-22 03:36:51 +0000245 }
246}
247
Dan Gohman343f0c02008-11-19 23:18:57 +0000248void ScheduleDAGSDNodes::BuildSchedUnits() {
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000249 // During scheduling, the NodeId field of SDNode is used to map SDNodes
250 // to their associated SUnits by holding SUnits table indices. A value
251 // of -1 means the SDNode does not yet have an associated SUnit.
252 unsigned NumNodes = 0;
253 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
254 E = DAG->allnodes_end(); NI != E; ++NI) {
255 NI->setNodeId(-1);
256 ++NumNodes;
257 }
258
Dan Gohman343f0c02008-11-19 23:18:57 +0000259 // Reserve entries in the vector for each of the SUnits we are creating. This
260 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
261 // invalidated.
Dan Gohman89b64bd2008-12-17 04:30:46 +0000262 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
263 // This is a temporary workaround.
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000264 SUnits.reserve(NumNodes * 2);
Dan Gohman343f0c02008-11-19 23:18:57 +0000265
Chris Lattner736a6ea2010-02-24 06:11:37 +0000266 // Add all nodes in depth first order.
267 SmallVector<SDNode*, 64> Worklist;
268 SmallPtrSet<SDNode*, 64> Visited;
269 Worklist.push_back(DAG->getRoot().getNode());
270 Visited.insert(DAG->getRoot().getNode());
271
272 while (!Worklist.empty()) {
273 SDNode *NI = Worklist.pop_back_val();
274
275 // Add all operands to the worklist unless they've already been added.
276 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
277 if (Visited.insert(NI->getOperand(i).getNode()))
278 Worklist.push_back(NI->getOperand(i).getNode());
279
Dan Gohman343f0c02008-11-19 23:18:57 +0000280 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
281 continue;
282
283 // If this node has already been processed, stop now.
284 if (NI->getNodeId() != -1) continue;
285
286 SUnit *NodeSUnit = NewSUnit(NI);
287
288 // See if anything is flagged to this node, if so, add them to flagged
289 // nodes. Nodes can have at most one flag input and one flag output. Flags
Dan Gohmandb95fa12009-03-20 20:42:23 +0000290 // are required to be the last operand and result of a node.
Dan Gohman343f0c02008-11-19 23:18:57 +0000291
292 // Scan up to find flagged preds.
293 SDNode *N = NI;
Dan Gohmandb95fa12009-03-20 20:42:23 +0000294 while (N->getNumOperands() &&
Owen Anderson825b72b2009-08-11 20:47:22 +0000295 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
Dan Gohmandb95fa12009-03-20 20:42:23 +0000296 N = N->getOperand(N->getNumOperands()-1).getNode();
297 assert(N->getNodeId() == -1 && "Node already inserted!");
298 N->setNodeId(NodeSUnit->NodeNum);
Dan Gohman343f0c02008-11-19 23:18:57 +0000299 }
300
301 // Scan down to find any flagged succs.
302 N = NI;
Owen Anderson825b72b2009-08-11 20:47:22 +0000303 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000304 SDValue FlagVal(N, N->getNumValues()-1);
305
306 // There are either zero or one users of the Flag result.
307 bool HasFlagUse = false;
308 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
309 UI != E; ++UI)
310 if (FlagVal.isOperandOf(*UI)) {
311 HasFlagUse = true;
312 assert(N->getNodeId() == -1 && "Node already inserted!");
313 N->setNodeId(NodeSUnit->NodeNum);
314 N = *UI;
315 break;
316 }
317 if (!HasFlagUse) break;
318 }
319
320 // If there are flag operands involved, N is now the bottom-most node
321 // of the sequence of nodes that are flagged together.
322 // Update the SUnit.
323 NodeSUnit->setNode(N);
324 assert(N->getNodeId() == -1 && "Node already inserted!");
325 N->setNodeId(NodeSUnit->NodeNum);
326
Dan Gohman787782f2008-11-21 01:44:51 +0000327 // Assign the Latency field of NodeSUnit using target-provided information.
Evan Chenge1631682010-05-19 22:42:23 +0000328 ComputeLatency(NodeSUnit);
Dan Gohman343f0c02008-11-19 23:18:57 +0000329 }
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000330}
331
332void ScheduleDAGSDNodes::AddSchedEdges() {
David Goodwin71046162009-08-13 16:05:04 +0000333 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
334
David Goodwindc4bdcd2009-08-19 16:08:58 +0000335 // Check to see if the scheduler cares about latencies.
336 bool UnitLatencies = ForceUnitLatencies();
337
Dan Gohman343f0c02008-11-19 23:18:57 +0000338 // Pass 2: add the preds, succs, etc.
339 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
340 SUnit *SU = &SUnits[su];
341 SDNode *MainNode = SU->getNode();
342
343 if (MainNode->isMachineOpcode()) {
344 unsigned Opc = MainNode->getMachineOpcode();
345 const TargetInstrDesc &TID = TII->get(Opc);
346 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
347 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
348 SU->isTwoAddress = true;
349 break;
350 }
351 }
352 if (TID.isCommutable())
353 SU->isCommutable = true;
354 }
355
356 // Find all predecessors and successors of the group.
357 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
358 if (N->isMachineOpcode() &&
Dan Gohman39746672009-03-23 16:10:52 +0000359 TII->get(N->getMachineOpcode()).getImplicitDefs()) {
360 SU->hasPhysRegClobbers = true;
Dan Gohmanbcea8592009-10-10 01:32:21 +0000361 unsigned NumUsed = InstrEmitter::CountResults(N);
Dan Gohman8cccf0e2009-03-23 17:39:36 +0000362 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
363 --NumUsed; // Skip over unused values at the end.
364 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
Dan Gohman39746672009-03-23 16:10:52 +0000365 SU->hasPhysRegDefs = true;
366 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000367
368 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
369 SDNode *OpN = N->getOperand(i).getNode();
370 if (isPassiveNode(OpN)) continue; // Not scheduled.
371 SUnit *OpSU = &SUnits[OpN->getNodeId()];
372 assert(OpSU && "Node has no SUnit!");
373 if (OpSU == SU) continue; // In the same group.
374
Owen Andersone50ed302009-08-10 22:56:29 +0000375 EVT OpVT = N->getOperand(i).getValueType();
Owen Anderson825b72b2009-08-11 20:47:22 +0000376 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
377 bool isChain = OpVT == MVT::Other;
Dan Gohman343f0c02008-11-19 23:18:57 +0000378
379 unsigned PhysReg = 0;
Evan Chengc29a56d2009-01-12 03:19:55 +0000380 int Cost = 1;
Dan Gohman343f0c02008-11-19 23:18:57 +0000381 // Determine if this is a physical register dependency.
Evan Chengc29a56d2009-01-12 03:19:55 +0000382 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
Dan Gohman54e4c362008-12-09 22:54:47 +0000383 assert((PhysReg == 0 || !isChain) &&
384 "Chain dependence via physreg data?");
Evan Chengc29a56d2009-01-12 03:19:55 +0000385 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
386 // emits a copy from the physical register to a virtual register unless
387 // it requires a cross class copy (cost < 0). That means we are only
388 // treating "expensive to copy" register dependency as physical register
389 // dependency. This may change in the future though.
390 if (Cost >= 0)
391 PhysReg = 0;
David Goodwin71046162009-08-13 16:05:04 +0000392
Evan Cheng046fa3f2010-05-28 23:26:21 +0000393 // If this is a ctrl dep, latency is 1.
394 unsigned OpLatency = isChain ? 1 : OpSU->Latency;
395 const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
396 OpLatency, PhysReg);
David Goodwindc4bdcd2009-08-19 16:08:58 +0000397 if (!isChain && !UnitLatencies) {
Evan Cheng15a16de2010-05-20 06:13:19 +0000398 ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
Dan Gohman3fb150a2010-04-17 17:42:52 +0000399 ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
David Goodwindc4bdcd2009-08-19 16:08:58 +0000400 }
David Goodwin71046162009-08-13 16:05:04 +0000401
402 SU->addPred(dep);
Dan Gohman343f0c02008-11-19 23:18:57 +0000403 }
404 }
405 }
406}
407
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000408/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
409/// are input. This SUnit graph is similar to the SelectionDAG, but
410/// excludes nodes that aren't interesting to scheduling, and represents
411/// flagged together nodes with a single SUnit.
Dan Gohman98976e42009-10-09 23:33:48 +0000412void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
Evan Cheng302ef832010-06-10 02:09:31 +0000413 // Cluster certain nodes which should be scheduled together.
414 ClusterNodes();
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000415 // Populate the SUnits array.
416 BuildSchedUnits();
417 // Compute all the scheduling dependencies between nodes.
418 AddSchedEdges();
419}
420
Dan Gohman343f0c02008-11-19 23:18:57 +0000421void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
Evan Chenge1631682010-05-19 22:42:23 +0000422 // Check to see if the scheduler cares about latencies.
423 if (ForceUnitLatencies()) {
424 SU->Latency = 1;
425 return;
426 }
427
Dan Gohman343f0c02008-11-19 23:18:57 +0000428 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
Evan Cheng15a16de2010-05-20 06:13:19 +0000429 if (InstrItins.isEmpty()) {
430 SU->Latency = 1;
431 return;
432 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000433
434 // Compute the latency for the node. We use the sum of the latencies for
435 // all nodes flagged together into this SUnit.
Dan Gohman343f0c02008-11-19 23:18:57 +0000436 SU->Latency = 0;
Dan Gohmanc8c28272008-11-21 00:12:10 +0000437 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
Dan Gohman343f0c02008-11-19 23:18:57 +0000438 if (N->isMachineOpcode()) {
David Goodwindc4bdcd2009-08-19 16:08:58 +0000439 SU->Latency += InstrItins.
440 getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
Dan Gohman343f0c02008-11-19 23:18:57 +0000441 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000442}
443
Evan Cheng15a16de2010-05-20 06:13:19 +0000444void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
445 unsigned OpIdx, SDep& dep) const{
446 // Check to see if the scheduler cares about latencies.
447 if (ForceUnitLatencies())
448 return;
449
450 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
451 if (InstrItins.isEmpty())
452 return;
453
454 if (dep.getKind() != SDep::Data)
455 return;
456
457 unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
Evan Cheng046fa3f2010-05-28 23:26:21 +0000458 if (Def->isMachineOpcode()) {
Evan Cheng15a16de2010-05-20 06:13:19 +0000459 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
460 if (DefIdx >= II.getNumDefs())
461 return;
462 int DefCycle = InstrItins.getOperandCycle(II.getSchedClass(), DefIdx);
463 if (DefCycle < 0)
464 return;
Evan Cheng046fa3f2010-05-28 23:26:21 +0000465 int UseCycle = 1;
466 if (Use->isMachineOpcode()) {
467 const unsigned UseClass = TII->get(Use->getMachineOpcode()).getSchedClass();
468 UseCycle = InstrItins.getOperandCycle(UseClass, OpIdx);
469 }
Evan Cheng15a16de2010-05-20 06:13:19 +0000470 if (UseCycle >= 0) {
471 int Latency = DefCycle - UseCycle + 1;
472 if (Latency >= 0)
473 dep.setLatency(Latency);
474 }
475 }
476}
477
Dan Gohman343f0c02008-11-19 23:18:57 +0000478void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
Evan Chengc29a56d2009-01-12 03:19:55 +0000479 if (!SU->getNode()) {
David Greene84fa8222010-01-05 01:25:11 +0000480 dbgs() << "PHYS REG COPY\n";
Evan Chengc29a56d2009-01-12 03:19:55 +0000481 return;
482 }
483
484 SU->getNode()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000485 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000486 SmallVector<SDNode *, 4> FlaggedNodes;
487 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
488 FlaggedNodes.push_back(N);
489 while (!FlaggedNodes.empty()) {
David Greene84fa8222010-01-05 01:25:11 +0000490 dbgs() << " ";
Dan Gohman343f0c02008-11-19 23:18:57 +0000491 FlaggedNodes.back()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000492 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000493 FlaggedNodes.pop_back();
494 }
495}
Dan Gohmanbcea8592009-10-10 01:32:21 +0000496
Evan Chengbfcb3052010-03-25 01:38:16 +0000497namespace {
498 struct OrderSorter {
499 bool operator()(const std::pair<unsigned, MachineInstr*> &A,
500 const std::pair<unsigned, MachineInstr*> &B) {
501 return A.first < B.first;
502 }
503 };
504}
505
506// ProcessSourceNode - Process nodes with source order numbers. These are added
507// to a vector which EmitSchedule use to determine how to insert dbg_value
508// instructions in the right order.
509static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
510 InstrEmitter &Emitter,
Evan Chengbfcb3052010-03-25 01:38:16 +0000511 DenseMap<SDValue, unsigned> &VRBaseMap,
512 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
513 SmallSet<unsigned, 8> &Seen) {
514 unsigned Order = DAG->GetOrdering(N);
515 if (!Order || !Seen.insert(Order))
516 return;
517
518 MachineBasicBlock *BB = Emitter.getBlock();
519 if (BB->empty() || BB->back().isPHI()) {
520 // Did not insert any instruction.
521 Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
522 return;
523 }
524
525 Orders.push_back(std::make_pair(Order, &BB->back()));
526 if (!N->getHasDebugValue())
527 return;
528 // Opportunistically insert immediate dbg_value uses, i.e. those with source
529 // order number right after the N.
530 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
531 SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
532 for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
533 if (DVs[i]->isInvalidated())
534 continue;
535 unsigned DVOrder = DVs[i]->getOrder();
536 if (DVOrder == ++Order) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000537 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000538 if (DbgMI) {
539 Orders.push_back(std::make_pair(DVOrder, DbgMI));
540 BB->insert(InsertPos, DbgMI);
541 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000542 DVs[i]->setIsInvalidated();
543 }
544 }
545}
546
547
Dan Gohmanbcea8592009-10-10 01:32:21 +0000548/// EmitSchedule - Emit the machine code in scheduled order.
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000549MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
Dan Gohmanbcea8592009-10-10 01:32:21 +0000550 InstrEmitter Emitter(BB, InsertPos);
551 DenseMap<SDValue, unsigned> VRBaseMap;
552 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
Evan Chengbfcb3052010-03-25 01:38:16 +0000553 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
554 SmallSet<unsigned, 8> Seen;
555 bool HasDbg = DAG->hasDebugValues();
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000556
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000557 // If this is the first BB, emit byval parameter dbg_value's.
558 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
559 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
560 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
561 for (; PDI != PDE; ++PDI) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000562 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000563 if (DbgMI)
Dan Gohman403a8cd2010-06-21 19:47:52 +0000564 BB->push_back(DbgMI);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000565 }
566 }
567
Dan Gohmanbcea8592009-10-10 01:32:21 +0000568 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
569 SUnit *SU = Sequence[i];
570 if (!SU) {
571 // Null SUnit* is a noop.
572 EmitNoop();
573 continue;
574 }
575
576 // For pre-regalloc scheduling, create instructions corresponding to the
577 // SDNode and any flagged SDNodes and append them to the block.
578 if (!SU->getNode()) {
579 // Emit a copy.
580 EmitPhysRegCopy(SU, CopyVRBaseMap);
581 continue;
582 }
583
584 SmallVector<SDNode *, 4> FlaggedNodes;
585 for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
586 N = N->getFlaggedNode())
587 FlaggedNodes.push_back(N);
588 while (!FlaggedNodes.empty()) {
Evan Chengbfcb3052010-03-25 01:38:16 +0000589 SDNode *N = FlaggedNodes.back();
Dan Gohmanbcea8592009-10-10 01:32:21 +0000590 Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000591 VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000592 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000593 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000594 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
Dan Gohmanbcea8592009-10-10 01:32:21 +0000595 FlaggedNodes.pop_back();
596 }
597 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000598 VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000599 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000600 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000601 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
Evan Chengbfcb3052010-03-25 01:38:16 +0000602 Seen);
603 }
604
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000605 // Insert all the dbg_values which have not already been inserted in source
Evan Chengbfcb3052010-03-25 01:38:16 +0000606 // order sequence.
607 if (HasDbg) {
608 MachineBasicBlock::iterator BBBegin = BB->empty() ? BB->end() : BB->begin();
609 while (BBBegin != BB->end() && BBBegin->isPHI())
610 ++BBBegin;
611
612 // Sort the source order instructions and use the order to insert debug
613 // values.
614 std::sort(Orders.begin(), Orders.end(), OrderSorter());
615
616 SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
617 SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
618 // Now emit the rest according to source order.
619 unsigned LastOrder = 0;
620 MachineInstr *LastMI = 0;
621 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
622 unsigned Order = Orders[i].first;
623 MachineInstr *MI = Orders[i].second;
624 // Insert all SDDbgValue's whose order(s) are before "Order".
625 if (!MI)
626 continue;
627 MachineBasicBlock *MIBB = MI->getParent();
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000628#ifndef NDEBUG
629 unsigned LastDIOrder = 0;
630#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000631 for (; DI != DE &&
632 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000633#ifndef NDEBUG
634 assert((*DI)->getOrder() >= LastDIOrder &&
635 "SDDbgValue nodes must be in source order!");
636 LastDIOrder = (*DI)->getOrder();
637#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000638 if ((*DI)->isInvalidated())
639 continue;
Dan Gohman891ff8f2010-04-30 19:35:33 +0000640 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000641 if (DbgMI) {
642 if (!LastOrder)
643 // Insert to start of the BB (after PHIs).
644 BB->insert(BBBegin, DbgMI);
645 else {
646 MachineBasicBlock::iterator Pos = MI;
647 MIBB->insert(llvm::next(Pos), DbgMI);
648 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000649 }
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000650 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000651 LastOrder = Order;
652 LastMI = MI;
653 }
654 // Add trailing DbgValue's before the terminator. FIXME: May want to add
655 // some of them before one or more conditional branches?
656 while (DI != DE) {
657 MachineBasicBlock *InsertBB = Emitter.getBlock();
658 MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
659 if (!(*DI)->isInvalidated()) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000660 MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000661 if (DbgMI)
662 InsertBB->insert(Pos, DbgMI);
Evan Chengbfcb3052010-03-25 01:38:16 +0000663 }
664 ++DI;
665 }
Dan Gohmanbcea8592009-10-10 01:32:21 +0000666 }
667
668 BB = Emitter.getBlock();
669 InsertPos = Emitter.getInsertPos();
670 return BB;
671}