blob: 429b1152b076db1c9444e071b72fd672a5e8c681 [file] [log] [blame]
Dan Gohman343f0c02008-11-19 23:18:57 +00001//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "pre-RA-sched"
Evan Chenga8efe282010-03-14 19:56:39 +000016#include "SDNodeDbgValue.h"
Dan Gohman84fbac52009-02-06 17:22:58 +000017#include "ScheduleDAGSDNodes.h"
Dan Gohmanbcea8592009-10-10 01:32:21 +000018#include "InstrEmitter.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000019#include "llvm/CodeGen/SelectionDAG.h"
20#include "llvm/Target/TargetMachine.h"
21#include "llvm/Target/TargetInstrInfo.h"
Evan Cheng1cc39842010-05-20 23:26:43 +000022#include "llvm/Target/TargetLowering.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000023#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin71046162009-08-13 16:05:04 +000024#include "llvm/Target/TargetSubtarget.h"
Evan Chengc589e032010-01-22 03:36:51 +000025#include "llvm/ADT/DenseMap.h"
26#include "llvm/ADT/SmallPtrSet.h"
Evan Chengbfcb3052010-03-25 01:38:16 +000027#include "llvm/ADT/SmallSet.h"
Evan Chengc589e032010-01-22 03:36:51 +000028#include "llvm/ADT/SmallVector.h"
29#include "llvm/ADT/Statistic.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000030#include "llvm/Support/Debug.h"
31#include "llvm/Support/raw_ostream.h"
32using namespace llvm;
33
Evan Chengc589e032010-01-22 03:36:51 +000034STATISTIC(LoadsClustered, "Number of loads clustered together");
35
Dan Gohman79ce2762009-01-15 19:20:50 +000036ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
Evan Cheng3ef1c872010-09-10 01:29:16 +000037 : ScheduleDAG(mf),
38 InstrItins(mf.getTarget().getInstrItineraryData()) {}
Dan Gohman343f0c02008-11-19 23:18:57 +000039
Dan Gohman47ac0f02009-02-11 04:27:20 +000040/// Run - perform scheduling.
41///
42void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
43 MachineBasicBlock::iterator insertPos) {
44 DAG = dag;
45 ScheduleDAG::Run(bb, insertPos);
46}
47
Evan Cheng1cc39842010-05-20 23:26:43 +000048/// NewSUnit - Creates a new SUnit and return a ptr to it.
49///
50SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) {
51#ifndef NDEBUG
52 const SUnit *Addr = 0;
53 if (!SUnits.empty())
54 Addr = &SUnits[0];
55#endif
56 SUnits.push_back(SUnit(N, (unsigned)SUnits.size()));
57 assert((Addr == 0 || Addr == &SUnits[0]) &&
58 "SUnits std::vector reallocated on the fly!");
59 SUnits.back().OrigNode = &SUnits.back();
60 SUnit *SU = &SUnits.back();
61 const TargetLowering &TLI = DAG->getTargetLoweringInfo();
Evan Chengc120af42010-08-10 02:39:45 +000062 if (!N ||
63 (N->isMachineOpcode() &&
64 N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF))
Evan Cheng046fa3f2010-05-28 23:26:21 +000065 SU->SchedulingPref = Sched::None;
66 else
67 SU->SchedulingPref = TLI.getSchedulingPreference(N);
Evan Cheng1cc39842010-05-20 23:26:43 +000068 return SU;
69}
70
Dan Gohman343f0c02008-11-19 23:18:57 +000071SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
72 SUnit *SU = NewSUnit(Old->getNode());
73 SU->OrigNode = Old->OrigNode;
74 SU->Latency = Old->Latency;
Evan Cheng8239daf2010-11-03 00:45:17 +000075 SU->isCall = Old->isCall;
Dan Gohman343f0c02008-11-19 23:18:57 +000076 SU->isTwoAddress = Old->isTwoAddress;
77 SU->isCommutable = Old->isCommutable;
78 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
Dan Gohman39746672009-03-23 16:10:52 +000079 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
Evan Cheng1cc39842010-05-20 23:26:43 +000080 SU->SchedulingPref = Old->SchedulingPref;
Evan Chenge57187c2009-01-16 20:57:18 +000081 Old->isCloned = true;
Dan Gohman343f0c02008-11-19 23:18:57 +000082 return SU;
83}
84
85/// CheckForPhysRegDependency - Check if the dependency between def and use of
86/// a specified operand is a physical register dependency. If so, returns the
Evan Chengc29a56d2009-01-12 03:19:55 +000087/// register and the cost of copying the register.
Dan Gohman343f0c02008-11-19 23:18:57 +000088static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
89 const TargetRegisterInfo *TRI,
90 const TargetInstrInfo *TII,
Evan Chengc29a56d2009-01-12 03:19:55 +000091 unsigned &PhysReg, int &Cost) {
Dan Gohman343f0c02008-11-19 23:18:57 +000092 if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
93 return;
94
95 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
96 if (TargetRegisterInfo::isVirtualRegister(Reg))
97 return;
98
99 unsigned ResNo = User->getOperand(2).getResNo();
100 if (Def->isMachineOpcode()) {
101 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
102 if (ResNo >= II.getNumDefs() &&
Evan Chengc29a56d2009-01-12 03:19:55 +0000103 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000104 PhysReg = Reg;
Evan Chengc29a56d2009-01-12 03:19:55 +0000105 const TargetRegisterClass *RC =
Rafael Espindolad31f9722010-06-29 14:02:34 +0000106 TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo));
Evan Chengc29a56d2009-01-12 03:19:55 +0000107 Cost = RC->getCopyCost();
108 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000109 }
110}
111
Evan Chengc589e032010-01-22 03:36:51 +0000112static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
113 SelectionDAG *DAG) {
114 SmallVector<EVT, 4> VTs;
Bill Wendling10707f32010-06-24 22:00:37 +0000115 SDNode *FlagDestNode = Flag.getNode();
Bill Wendling151d26d2010-06-23 18:16:24 +0000116
Bill Wendling10707f32010-06-24 22:00:37 +0000117 // Don't add a flag from a node to itself.
118 if (FlagDestNode == N) return;
119
120 // Don't add a flag to something which already has a flag.
121 if (N->getValueType(N->getNumValues() - 1) == MVT::Flag) return;
122
123 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I)
124 VTs.push_back(N->getValueType(I));
Bill Wendling151d26d2010-06-23 18:16:24 +0000125
Evan Chengc589e032010-01-22 03:36:51 +0000126 if (AddFlag)
127 VTs.push_back(MVT::Flag);
Bill Wendling151d26d2010-06-23 18:16:24 +0000128
Evan Chengc589e032010-01-22 03:36:51 +0000129 SmallVector<SDValue, 4> Ops;
Bill Wendling10707f32010-06-24 22:00:37 +0000130 for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I)
131 Ops.push_back(N->getOperand(I));
Bill Wendling151d26d2010-06-23 18:16:24 +0000132
Bill Wendling10707f32010-06-24 22:00:37 +0000133 if (FlagDestNode)
Evan Chengc589e032010-01-22 03:36:51 +0000134 Ops.push_back(Flag);
Bill Wendling151d26d2010-06-23 18:16:24 +0000135
Evan Chengc589e032010-01-22 03:36:51 +0000136 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
Bill Wendling151d26d2010-06-23 18:16:24 +0000137 MachineSDNode::mmo_iterator Begin = 0, End = 0;
138 MachineSDNode *MN = dyn_cast<MachineSDNode>(N);
139
140 // Store memory references.
141 if (MN) {
142 Begin = MN->memoperands_begin();
143 End = MN->memoperands_end();
144 }
145
Evan Chengc589e032010-01-22 03:36:51 +0000146 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
Bill Wendling151d26d2010-06-23 18:16:24 +0000147
148 // Reset the memory references
149 if (MN)
150 MN->setMemRefs(Begin, End);
Evan Chengc589e032010-01-22 03:36:51 +0000151}
152
153/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
154/// This function finds loads of the same base and different offsets. If the
155/// offsets are not far apart (target specific), it add MVT::Flag inputs and
156/// outputs to ensure they are scheduled together and in order. This
157/// optimization may benefit some targets by improving cache locality.
Evan Cheng302ef832010-06-10 02:09:31 +0000158void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) {
159 SDNode *Chain = 0;
160 unsigned NumOps = Node->getNumOperands();
161 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
162 Chain = Node->getOperand(NumOps-1).getNode();
163 if (!Chain)
164 return;
165
166 // Look for other loads of the same chain. Find loads that are loading from
167 // the same base pointer and different offsets.
Evan Chengc589e032010-01-22 03:36:51 +0000168 SmallPtrSet<SDNode*, 16> Visited;
169 SmallVector<int64_t, 4> Offsets;
170 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
Evan Cheng302ef832010-06-10 02:09:31 +0000171 bool Cluster = false;
172 SDNode *Base = Node;
Evan Cheng302ef832010-06-10 02:09:31 +0000173 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
174 I != E; ++I) {
175 SDNode *User = *I;
176 if (User == Node || !Visited.insert(User))
177 continue;
178 int64_t Offset1, Offset2;
179 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
180 Offset1 == Offset2)
181 // FIXME: Should be ok if they addresses are identical. But earlier
182 // optimizations really should have eliminated one of the loads.
183 continue;
184 if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
185 Offsets.push_back(Offset1);
186 O2SMap.insert(std::make_pair(Offset2, User));
187 Offsets.push_back(Offset2);
Duncan Sandsb447c4e2010-06-25 14:48:39 +0000188 if (Offset2 < Offset1)
Evan Cheng302ef832010-06-10 02:09:31 +0000189 Base = User;
Evan Cheng302ef832010-06-10 02:09:31 +0000190 Cluster = true;
191 }
192
193 if (!Cluster)
194 return;
195
196 // Sort them in increasing order.
197 std::sort(Offsets.begin(), Offsets.end());
198
199 // Check if the loads are close enough.
200 SmallVector<SDNode*, 4> Loads;
201 unsigned NumLoads = 0;
202 int64_t BaseOff = Offsets[0];
203 SDNode *BaseLoad = O2SMap[BaseOff];
204 Loads.push_back(BaseLoad);
205 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
206 int64_t Offset = Offsets[i];
207 SDNode *Load = O2SMap[Offset];
208 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads))
209 break; // Stop right here. Ignore loads that are further away.
210 Loads.push_back(Load);
211 ++NumLoads;
212 }
213
214 if (NumLoads == 0)
215 return;
216
217 // Cluster loads by adding MVT::Flag outputs and inputs. This also
218 // ensure they are scheduled in order of increasing addresses.
219 SDNode *Lead = Loads[0];
Bill Wendling10707f32010-06-24 22:00:37 +0000220 AddFlags(Lead, SDValue(0, 0), true, DAG);
Bill Wendling151d26d2010-06-23 18:16:24 +0000221
Bill Wendling10707f32010-06-24 22:00:37 +0000222 SDValue InFlag = SDValue(Lead, Lead->getNumValues() - 1);
223 for (unsigned I = 1, E = Loads.size(); I != E; ++I) {
224 bool OutFlag = I < E - 1;
225 SDNode *Load = Loads[I];
226
Evan Cheng302ef832010-06-10 02:09:31 +0000227 AddFlags(Load, InFlag, OutFlag, DAG);
Bill Wendling151d26d2010-06-23 18:16:24 +0000228
Evan Cheng302ef832010-06-10 02:09:31 +0000229 if (OutFlag)
Bill Wendling10707f32010-06-24 22:00:37 +0000230 InFlag = SDValue(Load, Load->getNumValues() - 1);
Bill Wendling151d26d2010-06-23 18:16:24 +0000231
Evan Cheng302ef832010-06-10 02:09:31 +0000232 ++LoadsClustered;
233 }
234}
235
236/// ClusterNodes - Cluster certain nodes which should be scheduled together.
237///
238void ScheduleDAGSDNodes::ClusterNodes() {
Evan Chengc589e032010-01-22 03:36:51 +0000239 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
240 E = DAG->allnodes_end(); NI != E; ++NI) {
241 SDNode *Node = &*NI;
242 if (!Node || !Node->isMachineOpcode())
243 continue;
244
245 unsigned Opc = Node->getMachineOpcode();
246 const TargetInstrDesc &TID = TII->get(Opc);
Evan Cheng302ef832010-06-10 02:09:31 +0000247 if (TID.mayLoad())
248 // Cluster loads from "near" addresses into combined SUnits.
249 ClusterNeighboringLoads(Node);
Evan Chengc589e032010-01-22 03:36:51 +0000250 }
251}
252
Dan Gohman343f0c02008-11-19 23:18:57 +0000253void ScheduleDAGSDNodes::BuildSchedUnits() {
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000254 // During scheduling, the NodeId field of SDNode is used to map SDNodes
255 // to their associated SUnits by holding SUnits table indices. A value
256 // of -1 means the SDNode does not yet have an associated SUnit.
257 unsigned NumNodes = 0;
258 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
259 E = DAG->allnodes_end(); NI != E; ++NI) {
260 NI->setNodeId(-1);
261 ++NumNodes;
262 }
263
Dan Gohman343f0c02008-11-19 23:18:57 +0000264 // Reserve entries in the vector for each of the SUnits we are creating. This
265 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
266 // invalidated.
Dan Gohman89b64bd2008-12-17 04:30:46 +0000267 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
268 // This is a temporary workaround.
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000269 SUnits.reserve(NumNodes * 2);
Dan Gohman343f0c02008-11-19 23:18:57 +0000270
Chris Lattner736a6ea2010-02-24 06:11:37 +0000271 // Add all nodes in depth first order.
272 SmallVector<SDNode*, 64> Worklist;
273 SmallPtrSet<SDNode*, 64> Visited;
274 Worklist.push_back(DAG->getRoot().getNode());
275 Visited.insert(DAG->getRoot().getNode());
276
277 while (!Worklist.empty()) {
278 SDNode *NI = Worklist.pop_back_val();
279
280 // Add all operands to the worklist unless they've already been added.
281 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i)
282 if (Visited.insert(NI->getOperand(i).getNode()))
283 Worklist.push_back(NI->getOperand(i).getNode());
284
Dan Gohman343f0c02008-11-19 23:18:57 +0000285 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
286 continue;
287
288 // If this node has already been processed, stop now.
289 if (NI->getNodeId() != -1) continue;
290
291 SUnit *NodeSUnit = NewSUnit(NI);
292
293 // See if anything is flagged to this node, if so, add them to flagged
294 // nodes. Nodes can have at most one flag input and one flag output. Flags
Dan Gohmandb95fa12009-03-20 20:42:23 +0000295 // are required to be the last operand and result of a node.
Dan Gohman343f0c02008-11-19 23:18:57 +0000296
297 // Scan up to find flagged preds.
298 SDNode *N = NI;
Dan Gohmandb95fa12009-03-20 20:42:23 +0000299 while (N->getNumOperands() &&
Owen Anderson825b72b2009-08-11 20:47:22 +0000300 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
Dan Gohmandb95fa12009-03-20 20:42:23 +0000301 N = N->getOperand(N->getNumOperands()-1).getNode();
302 assert(N->getNodeId() == -1 && "Node already inserted!");
303 N->setNodeId(NodeSUnit->NodeNum);
Evan Cheng8239daf2010-11-03 00:45:17 +0000304 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
305 NodeSUnit->isCall = true;
Dan Gohman343f0c02008-11-19 23:18:57 +0000306 }
307
308 // Scan down to find any flagged succs.
309 N = NI;
Owen Anderson825b72b2009-08-11 20:47:22 +0000310 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000311 SDValue FlagVal(N, N->getNumValues()-1);
312
313 // There are either zero or one users of the Flag result.
314 bool HasFlagUse = false;
315 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
316 UI != E; ++UI)
317 if (FlagVal.isOperandOf(*UI)) {
318 HasFlagUse = true;
319 assert(N->getNodeId() == -1 && "Node already inserted!");
320 N->setNodeId(NodeSUnit->NodeNum);
321 N = *UI;
Evan Cheng8239daf2010-11-03 00:45:17 +0000322 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall())
323 NodeSUnit->isCall = true;
Dan Gohman343f0c02008-11-19 23:18:57 +0000324 break;
325 }
326 if (!HasFlagUse) break;
327 }
328
329 // If there are flag operands involved, N is now the bottom-most node
330 // of the sequence of nodes that are flagged together.
331 // Update the SUnit.
332 NodeSUnit->setNode(N);
333 assert(N->getNodeId() == -1 && "Node already inserted!");
334 N->setNodeId(NodeSUnit->NodeNum);
335
Dan Gohman787782f2008-11-21 01:44:51 +0000336 // Assign the Latency field of NodeSUnit using target-provided information.
Evan Chenge1631682010-05-19 22:42:23 +0000337 ComputeLatency(NodeSUnit);
Dan Gohman343f0c02008-11-19 23:18:57 +0000338 }
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000339}
340
341void ScheduleDAGSDNodes::AddSchedEdges() {
David Goodwin71046162009-08-13 16:05:04 +0000342 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
343
David Goodwindc4bdcd2009-08-19 16:08:58 +0000344 // Check to see if the scheduler cares about latencies.
345 bool UnitLatencies = ForceUnitLatencies();
346
Dan Gohman343f0c02008-11-19 23:18:57 +0000347 // Pass 2: add the preds, succs, etc.
348 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
349 SUnit *SU = &SUnits[su];
350 SDNode *MainNode = SU->getNode();
351
352 if (MainNode->isMachineOpcode()) {
353 unsigned Opc = MainNode->getMachineOpcode();
354 const TargetInstrDesc &TID = TII->get(Opc);
355 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
356 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
357 SU->isTwoAddress = true;
358 break;
359 }
360 }
361 if (TID.isCommutable())
362 SU->isCommutable = true;
363 }
364
365 // Find all predecessors and successors of the group.
366 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
367 if (N->isMachineOpcode() &&
Dan Gohman39746672009-03-23 16:10:52 +0000368 TII->get(N->getMachineOpcode()).getImplicitDefs()) {
369 SU->hasPhysRegClobbers = true;
Dan Gohmanbcea8592009-10-10 01:32:21 +0000370 unsigned NumUsed = InstrEmitter::CountResults(N);
Dan Gohman8cccf0e2009-03-23 17:39:36 +0000371 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
372 --NumUsed; // Skip over unused values at the end.
373 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
Dan Gohman39746672009-03-23 16:10:52 +0000374 SU->hasPhysRegDefs = true;
375 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000376
377 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
378 SDNode *OpN = N->getOperand(i).getNode();
379 if (isPassiveNode(OpN)) continue; // Not scheduled.
380 SUnit *OpSU = &SUnits[OpN->getNodeId()];
381 assert(OpSU && "Node has no SUnit!");
382 if (OpSU == SU) continue; // In the same group.
383
Owen Andersone50ed302009-08-10 22:56:29 +0000384 EVT OpVT = N->getOperand(i).getValueType();
Owen Anderson825b72b2009-08-11 20:47:22 +0000385 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
386 bool isChain = OpVT == MVT::Other;
Dan Gohman343f0c02008-11-19 23:18:57 +0000387
388 unsigned PhysReg = 0;
Evan Chengc29a56d2009-01-12 03:19:55 +0000389 int Cost = 1;
Dan Gohman343f0c02008-11-19 23:18:57 +0000390 // Determine if this is a physical register dependency.
Evan Chengc29a56d2009-01-12 03:19:55 +0000391 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
Dan Gohman54e4c362008-12-09 22:54:47 +0000392 assert((PhysReg == 0 || !isChain) &&
393 "Chain dependence via physreg data?");
Evan Chengc29a56d2009-01-12 03:19:55 +0000394 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
395 // emits a copy from the physical register to a virtual register unless
396 // it requires a cross class copy (cost < 0). That means we are only
397 // treating "expensive to copy" register dependency as physical register
398 // dependency. This may change in the future though.
399 if (Cost >= 0)
400 PhysReg = 0;
David Goodwin71046162009-08-13 16:05:04 +0000401
Evan Cheng046fa3f2010-05-28 23:26:21 +0000402 // If this is a ctrl dep, latency is 1.
403 unsigned OpLatency = isChain ? 1 : OpSU->Latency;
404 const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
405 OpLatency, PhysReg);
David Goodwindc4bdcd2009-08-19 16:08:58 +0000406 if (!isChain && !UnitLatencies) {
Evan Cheng15a16de2010-05-20 06:13:19 +0000407 ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep));
Dan Gohman3fb150a2010-04-17 17:42:52 +0000408 ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep));
David Goodwindc4bdcd2009-08-19 16:08:58 +0000409 }
David Goodwin71046162009-08-13 16:05:04 +0000410
411 SU->addPred(dep);
Dan Gohman343f0c02008-11-19 23:18:57 +0000412 }
413 }
414 }
415}
416
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000417/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
418/// are input. This SUnit graph is similar to the SelectionDAG, but
419/// excludes nodes that aren't interesting to scheduling, and represents
420/// flagged together nodes with a single SUnit.
Dan Gohman98976e42009-10-09 23:33:48 +0000421void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
Evan Cheng302ef832010-06-10 02:09:31 +0000422 // Cluster certain nodes which should be scheduled together.
423 ClusterNodes();
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000424 // Populate the SUnits array.
425 BuildSchedUnits();
426 // Compute all the scheduling dependencies between nodes.
427 AddSchedEdges();
428}
429
Dan Gohman343f0c02008-11-19 23:18:57 +0000430void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
Evan Chenge1631682010-05-19 22:42:23 +0000431 // Check to see if the scheduler cares about latencies.
432 if (ForceUnitLatencies()) {
433 SU->Latency = 1;
434 return;
435 }
436
Evan Cheng3ef1c872010-09-10 01:29:16 +0000437 if (!InstrItins || InstrItins->isEmpty()) {
Evan Cheng15a16de2010-05-20 06:13:19 +0000438 SU->Latency = 1;
439 return;
440 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000441
442 // Compute the latency for the node. We use the sum of the latencies for
443 // all nodes flagged together into this SUnit.
Dan Gohman343f0c02008-11-19 23:18:57 +0000444 SU->Latency = 0;
Dan Gohmanc8c28272008-11-21 00:12:10 +0000445 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
Evan Cheng8239daf2010-11-03 00:45:17 +0000446 if (N->isMachineOpcode())
447 SU->Latency += TII->getInstrLatency(InstrItins, N);
Dan Gohman343f0c02008-11-19 23:18:57 +0000448}
449
Evan Cheng15a16de2010-05-20 06:13:19 +0000450void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use,
451 unsigned OpIdx, SDep& dep) const{
452 // Check to see if the scheduler cares about latencies.
453 if (ForceUnitLatencies())
454 return;
455
Evan Cheng15a16de2010-05-20 06:13:19 +0000456 if (dep.getKind() != SDep::Data)
457 return;
458
459 unsigned DefIdx = Use->getOperand(OpIdx).getResNo();
Evan Cheng7e2fe912010-10-28 06:47:08 +0000460 if (Use->isMachineOpcode())
461 // Adjust the use operand index by num of defs.
462 OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs();
Evan Chenga0792de2010-10-06 06:27:31 +0000463 int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx);
Evan Cheng08975152010-10-29 18:09:28 +0000464 if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg &&
465 !BB->succ_empty()) {
466 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
467 if (TargetRegisterInfo::isVirtualRegister(Reg))
468 // This copy is a liveout value. It is likely coalesced, so reduce the
469 // latency so not to penalize the def.
470 // FIXME: need target specific adjustment here?
471 Latency = (Latency > 1) ? Latency - 1 : 1;
472 }
Evan Cheng3881cb72010-09-29 22:42:35 +0000473 if (Latency >= 0)
474 dep.setLatency(Latency);
Evan Cheng15a16de2010-05-20 06:13:19 +0000475}
476
Dan Gohman343f0c02008-11-19 23:18:57 +0000477void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
Evan Chengc29a56d2009-01-12 03:19:55 +0000478 if (!SU->getNode()) {
David Greene84fa8222010-01-05 01:25:11 +0000479 dbgs() << "PHYS REG COPY\n";
Evan Chengc29a56d2009-01-12 03:19:55 +0000480 return;
481 }
482
483 SU->getNode()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000484 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000485 SmallVector<SDNode *, 4> FlaggedNodes;
486 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
487 FlaggedNodes.push_back(N);
488 while (!FlaggedNodes.empty()) {
David Greene84fa8222010-01-05 01:25:11 +0000489 dbgs() << " ";
Dan Gohman343f0c02008-11-19 23:18:57 +0000490 FlaggedNodes.back()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000491 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000492 FlaggedNodes.pop_back();
493 }
494}
Dan Gohmanbcea8592009-10-10 01:32:21 +0000495
Evan Chengbfcb3052010-03-25 01:38:16 +0000496namespace {
497 struct OrderSorter {
498 bool operator()(const std::pair<unsigned, MachineInstr*> &A,
499 const std::pair<unsigned, MachineInstr*> &B) {
500 return A.first < B.first;
501 }
502 };
503}
504
505// ProcessSourceNode - Process nodes with source order numbers. These are added
Jim Grosbachd27946d2010-06-30 21:27:56 +0000506// to a vector which EmitSchedule uses to determine how to insert dbg_value
Evan Chengbfcb3052010-03-25 01:38:16 +0000507// instructions in the right order.
508static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG,
509 InstrEmitter &Emitter,
Evan Chengbfcb3052010-03-25 01:38:16 +0000510 DenseMap<SDValue, unsigned> &VRBaseMap,
511 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders,
512 SmallSet<unsigned, 8> &Seen) {
513 unsigned Order = DAG->GetOrdering(N);
514 if (!Order || !Seen.insert(Order))
515 return;
516
517 MachineBasicBlock *BB = Emitter.getBlock();
Dan Gohman84023e02010-07-10 09:00:22 +0000518 if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) {
Evan Chengbfcb3052010-03-25 01:38:16 +0000519 // Did not insert any instruction.
520 Orders.push_back(std::make_pair(Order, (MachineInstr*)0));
521 return;
522 }
523
Dan Gohman84023e02010-07-10 09:00:22 +0000524 Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos())));
Evan Chengbfcb3052010-03-25 01:38:16 +0000525 if (!N->getHasDebugValue())
526 return;
527 // Opportunistically insert immediate dbg_value uses, i.e. those with source
528 // order number right after the N.
529 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos();
530 SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N);
531 for (unsigned i = 0, e = DVs.size(); i != e; ++i) {
532 if (DVs[i]->isInvalidated())
533 continue;
534 unsigned DVOrder = DVs[i]->getOrder();
535 if (DVOrder == ++Order) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000536 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000537 if (DbgMI) {
538 Orders.push_back(std::make_pair(DVOrder, DbgMI));
539 BB->insert(InsertPos, DbgMI);
540 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000541 DVs[i]->setIsInvalidated();
542 }
543 }
544}
545
546
Dan Gohmanbcea8592009-10-10 01:32:21 +0000547/// EmitSchedule - Emit the machine code in scheduled order.
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000548MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() {
Dan Gohmanbcea8592009-10-10 01:32:21 +0000549 InstrEmitter Emitter(BB, InsertPos);
550 DenseMap<SDValue, unsigned> VRBaseMap;
551 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
Evan Chengbfcb3052010-03-25 01:38:16 +0000552 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders;
553 SmallSet<unsigned, 8> Seen;
554 bool HasDbg = DAG->hasDebugValues();
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000555
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000556 // If this is the first BB, emit byval parameter dbg_value's.
557 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) {
558 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin();
559 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd();
560 for (; PDI != PDE; ++PDI) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000561 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000562 if (DbgMI)
Dan Gohman84023e02010-07-10 09:00:22 +0000563 BB->insert(InsertPos, DbgMI);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000564 }
565 }
566
Dan Gohmanbcea8592009-10-10 01:32:21 +0000567 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
568 SUnit *SU = Sequence[i];
569 if (!SU) {
570 // Null SUnit* is a noop.
571 EmitNoop();
572 continue;
573 }
574
575 // For pre-regalloc scheduling, create instructions corresponding to the
576 // SDNode and any flagged SDNodes and append them to the block.
577 if (!SU->getNode()) {
578 // Emit a copy.
579 EmitPhysRegCopy(SU, CopyVRBaseMap);
580 continue;
581 }
582
583 SmallVector<SDNode *, 4> FlaggedNodes;
584 for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
585 N = N->getFlaggedNode())
586 FlaggedNodes.push_back(N);
587 while (!FlaggedNodes.empty()) {
Evan Chengbfcb3052010-03-25 01:38:16 +0000588 SDNode *N = FlaggedNodes.back();
Dan Gohmanbcea8592009-10-10 01:32:21 +0000589 Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000590 VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000591 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000592 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000593 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen);
Dan Gohmanbcea8592009-10-10 01:32:21 +0000594 FlaggedNodes.pop_back();
595 }
596 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
Dan Gohmanaf1d8ca2010-05-01 00:01:06 +0000597 VRBaseMap);
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000598 // Remember the source order of the inserted instruction.
Evan Chengbfcb3052010-03-25 01:38:16 +0000599 if (HasDbg)
Dan Gohman891ff8f2010-04-30 19:35:33 +0000600 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders,
Evan Chengbfcb3052010-03-25 01:38:16 +0000601 Seen);
602 }
603
Dale Johannesenfdb42fa2010-04-26 20:06:49 +0000604 // Insert all the dbg_values which have not already been inserted in source
Evan Chengbfcb3052010-03-25 01:38:16 +0000605 // order sequence.
606 if (HasDbg) {
Dan Gohman84023e02010-07-10 09:00:22 +0000607 MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI();
Evan Chengbfcb3052010-03-25 01:38:16 +0000608
609 // Sort the source order instructions and use the order to insert debug
610 // values.
611 std::sort(Orders.begin(), Orders.end(), OrderSorter());
612
613 SDDbgInfo::DbgIterator DI = DAG->DbgBegin();
614 SDDbgInfo::DbgIterator DE = DAG->DbgEnd();
615 // Now emit the rest according to source order.
616 unsigned LastOrder = 0;
Evan Chengbfcb3052010-03-25 01:38:16 +0000617 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) {
618 unsigned Order = Orders[i].first;
619 MachineInstr *MI = Orders[i].second;
620 // Insert all SDDbgValue's whose order(s) are before "Order".
621 if (!MI)
622 continue;
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000623#ifndef NDEBUG
624 unsigned LastDIOrder = 0;
625#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000626 for (; DI != DE &&
627 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) {
Evan Cheng4ec9bd92010-03-25 07:16:57 +0000628#ifndef NDEBUG
629 assert((*DI)->getOrder() >= LastDIOrder &&
630 "SDDbgValue nodes must be in source order!");
631 LastDIOrder = (*DI)->getOrder();
632#endif
Evan Chengbfcb3052010-03-25 01:38:16 +0000633 if ((*DI)->isInvalidated())
634 continue;
Dan Gohman891ff8f2010-04-30 19:35:33 +0000635 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000636 if (DbgMI) {
637 if (!LastOrder)
638 // Insert to start of the BB (after PHIs).
639 BB->insert(BBBegin, DbgMI);
640 else {
Dan Gohmana8dab362010-07-10 22:42:31 +0000641 // Insert at the instruction, which may be in a different
642 // block, if the block was split by a custom inserter.
Evan Cheng962021b2010-04-26 07:38:55 +0000643 MachineBasicBlock::iterator Pos = MI;
Dan Gohmana8dab362010-07-10 22:42:31 +0000644 MI->getParent()->insert(llvm::next(Pos), DbgMI);
Evan Cheng962021b2010-04-26 07:38:55 +0000645 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000646 }
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000647 }
Evan Chengbfcb3052010-03-25 01:38:16 +0000648 LastOrder = Order;
Evan Chengbfcb3052010-03-25 01:38:16 +0000649 }
650 // Add trailing DbgValue's before the terminator. FIXME: May want to add
651 // some of them before one or more conditional branches?
652 while (DI != DE) {
653 MachineBasicBlock *InsertBB = Emitter.getBlock();
654 MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator();
655 if (!(*DI)->isInvalidated()) {
Dan Gohman891ff8f2010-04-30 19:35:33 +0000656 MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap);
Evan Cheng962021b2010-04-26 07:38:55 +0000657 if (DbgMI)
658 InsertBB->insert(Pos, DbgMI);
Evan Chengbfcb3052010-03-25 01:38:16 +0000659 }
660 ++DI;
661 }
Dan Gohmanbcea8592009-10-10 01:32:21 +0000662 }
663
664 BB = Emitter.getBlock();
665 InsertPos = Emitter.getInsertPos();
666 return BB;
667}