blob: 159bab0709c9e2566ee9c25ba13f7dcc410017ea [file] [log] [blame]
Dan Gohman343f0c02008-11-19 23:18:57 +00001//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the ScheduleDAG class, which is a base class used by
11// scheduling implementation classes.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "pre-RA-sched"
Dan Gohman84fbac52009-02-06 17:22:58 +000016#include "ScheduleDAGSDNodes.h"
Dan Gohmanbcea8592009-10-10 01:32:21 +000017#include "InstrEmitter.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000018#include "llvm/CodeGen/SelectionDAG.h"
19#include "llvm/Target/TargetMachine.h"
20#include "llvm/Target/TargetInstrInfo.h"
21#include "llvm/Target/TargetRegisterInfo.h"
David Goodwin71046162009-08-13 16:05:04 +000022#include "llvm/Target/TargetSubtarget.h"
Evan Chengc589e032010-01-22 03:36:51 +000023#include "llvm/ADT/DenseMap.h"
24#include "llvm/ADT/SmallPtrSet.h"
25#include "llvm/ADT/SmallVector.h"
26#include "llvm/ADT/Statistic.h"
27#include "llvm/Support/CommandLine.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000028#include "llvm/Support/Debug.h"
29#include "llvm/Support/raw_ostream.h"
30using namespace llvm;
31
Evan Chengc589e032010-01-22 03:36:51 +000032STATISTIC(LoadsClustered, "Number of loads clustered together");
33
34static cl::opt<bool>
35ClusterLoads("cluster-loads", cl::Hidden,
36 cl::desc("Schedule nearby loads together and in order"));
37
Dan Gohman79ce2762009-01-15 19:20:50 +000038ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf)
39 : ScheduleDAG(mf) {
Dan Gohman343f0c02008-11-19 23:18:57 +000040}
41
Dan Gohman47ac0f02009-02-11 04:27:20 +000042/// Run - perform scheduling.
43///
44void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb,
45 MachineBasicBlock::iterator insertPos) {
46 DAG = dag;
47 ScheduleDAG::Run(bb, insertPos);
48}
49
Dan Gohman343f0c02008-11-19 23:18:57 +000050SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) {
51 SUnit *SU = NewSUnit(Old->getNode());
52 SU->OrigNode = Old->OrigNode;
53 SU->Latency = Old->Latency;
54 SU->isTwoAddress = Old->isTwoAddress;
55 SU->isCommutable = Old->isCommutable;
56 SU->hasPhysRegDefs = Old->hasPhysRegDefs;
Dan Gohman39746672009-03-23 16:10:52 +000057 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers;
Evan Chenge57187c2009-01-16 20:57:18 +000058 Old->isCloned = true;
Dan Gohman343f0c02008-11-19 23:18:57 +000059 return SU;
60}
61
62/// CheckForPhysRegDependency - Check if the dependency between def and use of
63/// a specified operand is a physical register dependency. If so, returns the
Evan Chengc29a56d2009-01-12 03:19:55 +000064/// register and the cost of copying the register.
Dan Gohman343f0c02008-11-19 23:18:57 +000065static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op,
66 const TargetRegisterInfo *TRI,
67 const TargetInstrInfo *TII,
Evan Chengc29a56d2009-01-12 03:19:55 +000068 unsigned &PhysReg, int &Cost) {
Dan Gohman343f0c02008-11-19 23:18:57 +000069 if (Op != 2 || User->getOpcode() != ISD::CopyToReg)
70 return;
71
72 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg();
73 if (TargetRegisterInfo::isVirtualRegister(Reg))
74 return;
75
76 unsigned ResNo = User->getOperand(2).getResNo();
77 if (Def->isMachineOpcode()) {
78 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode());
79 if (ResNo >= II.getNumDefs() &&
Evan Chengc29a56d2009-01-12 03:19:55 +000080 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) {
Dan Gohman343f0c02008-11-19 23:18:57 +000081 PhysReg = Reg;
Evan Chengc29a56d2009-01-12 03:19:55 +000082 const TargetRegisterClass *RC =
83 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo));
84 Cost = RC->getCopyCost();
85 }
Dan Gohman343f0c02008-11-19 23:18:57 +000086 }
87}
88
Evan Chengc589e032010-01-22 03:36:51 +000089static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag,
90 SelectionDAG *DAG) {
91 SmallVector<EVT, 4> VTs;
92 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
93 VTs.push_back(N->getValueType(i));
94 if (AddFlag)
95 VTs.push_back(MVT::Flag);
96 SmallVector<SDValue, 4> Ops;
97 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
98 Ops.push_back(N->getOperand(i));
99 if (Flag.getNode())
100 Ops.push_back(Flag);
101 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size());
102 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size());
103}
104
105/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them.
106/// This function finds loads of the same base and different offsets. If the
107/// offsets are not far apart (target specific), it add MVT::Flag inputs and
108/// outputs to ensure they are scheduled together and in order. This
109/// optimization may benefit some targets by improving cache locality.
110void ScheduleDAGSDNodes::ClusterNeighboringLoads() {
111 SmallPtrSet<SDNode*, 16> Visited;
112 SmallVector<int64_t, 4> Offsets;
113 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode.
114 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
115 E = DAG->allnodes_end(); NI != E; ++NI) {
116 SDNode *Node = &*NI;
117 if (!Node || !Node->isMachineOpcode())
118 continue;
119
120 unsigned Opc = Node->getMachineOpcode();
121 const TargetInstrDesc &TID = TII->get(Opc);
122 if (!TID.mayLoad())
123 continue;
124
125 SDNode *Chain = 0;
126 unsigned NumOps = Node->getNumOperands();
127 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other)
128 Chain = Node->getOperand(NumOps-1).getNode();
129 if (!Chain)
130 continue;
131
132 // Look for other loads of the same chain. Find loads that are loading from
133 // the same base pointer and different offsets.
134 Visited.clear();
135 Offsets.clear();
136 O2SMap.clear();
137 bool Cluster = false;
138 SDNode *Base = Node;
139 int64_t BaseOffset;
140 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end();
141 I != E; ++I) {
142 SDNode *User = *I;
143 if (User == Node || !Visited.insert(User))
144 continue;
145 int64_t Offset1, Offset2;
146 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) ||
147 Offset1 == Offset2)
148 // FIXME: Should be ok if they addresses are identical. But earlier
149 // optimizations really should have eliminated one of the loads.
150 continue;
151 if (O2SMap.insert(std::make_pair(Offset1, Base)).second)
152 Offsets.push_back(Offset1);
153 O2SMap.insert(std::make_pair(Offset2, User));
154 Offsets.push_back(Offset2);
155 if (Offset2 < Offset1) {
156 Base = User;
157 BaseOffset = Offset2;
158 } else {
159 BaseOffset = Offset1;
160 }
161 Cluster = true;
162 }
163
164 if (!Cluster)
165 continue;
166
167 // Sort them in increasing order.
168 std::sort(Offsets.begin(), Offsets.end());
169
170 // Check if the loads are close enough.
171 SmallVector<SDNode*, 4> Loads;
172 unsigned NumLoads = 0;
173 int64_t BaseOff = Offsets[0];
174 SDNode *BaseLoad = O2SMap[BaseOff];
175 Loads.push_back(BaseLoad);
176 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) {
177 int64_t Offset = Offsets[i];
178 SDNode *Load = O2SMap[Offset];
179 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,
180 NumLoads))
181 break; // Stop right here. Ignore loads that are further away.
182 Loads.push_back(Load);
183 ++NumLoads;
184 }
185
186 if (NumLoads == 0)
187 continue;
188
189 // Cluster loads by adding MVT::Flag outputs and inputs. This also
190 // ensure they are scheduled in order of increasing addresses.
191 SDNode *Lead = Loads[0];
192 AddFlags(Lead, SDValue(0,0), true, DAG);
193 SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1);
194 for (unsigned i = 1, e = Loads.size(); i != e; ++i) {
195 bool OutFlag = i < e-1;
196 SDNode *Load = Loads[i];
197 AddFlags(Load, InFlag, OutFlag, DAG);
198 if (OutFlag)
199 InFlag = SDValue(Load, Load->getNumValues()-1);
200 ++LoadsClustered;
201 }
202 }
203}
204
Dan Gohman343f0c02008-11-19 23:18:57 +0000205void ScheduleDAGSDNodes::BuildSchedUnits() {
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000206 // During scheduling, the NodeId field of SDNode is used to map SDNodes
207 // to their associated SUnits by holding SUnits table indices. A value
208 // of -1 means the SDNode does not yet have an associated SUnit.
209 unsigned NumNodes = 0;
210 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
211 E = DAG->allnodes_end(); NI != E; ++NI) {
212 NI->setNodeId(-1);
213 ++NumNodes;
214 }
215
Dan Gohman343f0c02008-11-19 23:18:57 +0000216 // Reserve entries in the vector for each of the SUnits we are creating. This
217 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get
218 // invalidated.
Dan Gohman89b64bd2008-12-17 04:30:46 +0000219 // FIXME: Multiply by 2 because we may clone nodes during scheduling.
220 // This is a temporary workaround.
Dan Gohmane1dfc7d2008-12-23 17:24:50 +0000221 SUnits.reserve(NumNodes * 2);
Dan Gohman343f0c02008-11-19 23:18:57 +0000222
Dan Gohman3f237442008-12-16 03:25:46 +0000223 // Check to see if the scheduler cares about latencies.
224 bool UnitLatencies = ForceUnitLatencies();
225
Dan Gohman343f0c02008-11-19 23:18:57 +0000226 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(),
227 E = DAG->allnodes_end(); NI != E; ++NI) {
228 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate.
229 continue;
230
231 // If this node has already been processed, stop now.
232 if (NI->getNodeId() != -1) continue;
233
234 SUnit *NodeSUnit = NewSUnit(NI);
235
236 // See if anything is flagged to this node, if so, add them to flagged
237 // nodes. Nodes can have at most one flag input and one flag output. Flags
Dan Gohmandb95fa12009-03-20 20:42:23 +0000238 // are required to be the last operand and result of a node.
Dan Gohman343f0c02008-11-19 23:18:57 +0000239
240 // Scan up to find flagged preds.
241 SDNode *N = NI;
Dan Gohmandb95fa12009-03-20 20:42:23 +0000242 while (N->getNumOperands() &&
Owen Anderson825b72b2009-08-11 20:47:22 +0000243 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) {
Dan Gohmandb95fa12009-03-20 20:42:23 +0000244 N = N->getOperand(N->getNumOperands()-1).getNode();
245 assert(N->getNodeId() == -1 && "Node already inserted!");
246 N->setNodeId(NodeSUnit->NodeNum);
Dan Gohman343f0c02008-11-19 23:18:57 +0000247 }
248
249 // Scan down to find any flagged succs.
250 N = NI;
Owen Anderson825b72b2009-08-11 20:47:22 +0000251 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) {
Dan Gohman343f0c02008-11-19 23:18:57 +0000252 SDValue FlagVal(N, N->getNumValues()-1);
253
254 // There are either zero or one users of the Flag result.
255 bool HasFlagUse = false;
256 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end();
257 UI != E; ++UI)
258 if (FlagVal.isOperandOf(*UI)) {
259 HasFlagUse = true;
260 assert(N->getNodeId() == -1 && "Node already inserted!");
261 N->setNodeId(NodeSUnit->NodeNum);
262 N = *UI;
263 break;
264 }
265 if (!HasFlagUse) break;
266 }
267
268 // If there are flag operands involved, N is now the bottom-most node
269 // of the sequence of nodes that are flagged together.
270 // Update the SUnit.
271 NodeSUnit->setNode(N);
272 assert(N->getNodeId() == -1 && "Node already inserted!");
273 N->setNodeId(NodeSUnit->NodeNum);
274
Dan Gohman787782f2008-11-21 01:44:51 +0000275 // Assign the Latency field of NodeSUnit using target-provided information.
Dan Gohman3f237442008-12-16 03:25:46 +0000276 if (UnitLatencies)
277 NodeSUnit->Latency = 1;
278 else
279 ComputeLatency(NodeSUnit);
Dan Gohman343f0c02008-11-19 23:18:57 +0000280 }
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000281}
282
283void ScheduleDAGSDNodes::AddSchedEdges() {
David Goodwin71046162009-08-13 16:05:04 +0000284 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
285
David Goodwindc4bdcd2009-08-19 16:08:58 +0000286 // Check to see if the scheduler cares about latencies.
287 bool UnitLatencies = ForceUnitLatencies();
288
Dan Gohman343f0c02008-11-19 23:18:57 +0000289 // Pass 2: add the preds, succs, etc.
290 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) {
291 SUnit *SU = &SUnits[su];
292 SDNode *MainNode = SU->getNode();
293
294 if (MainNode->isMachineOpcode()) {
295 unsigned Opc = MainNode->getMachineOpcode();
296 const TargetInstrDesc &TID = TII->get(Opc);
297 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
298 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
299 SU->isTwoAddress = true;
300 break;
301 }
302 }
303 if (TID.isCommutable())
304 SU->isCommutable = true;
305 }
306
307 // Find all predecessors and successors of the group.
308 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) {
309 if (N->isMachineOpcode() &&
Dan Gohman39746672009-03-23 16:10:52 +0000310 TII->get(N->getMachineOpcode()).getImplicitDefs()) {
311 SU->hasPhysRegClobbers = true;
Dan Gohmanbcea8592009-10-10 01:32:21 +0000312 unsigned NumUsed = InstrEmitter::CountResults(N);
Dan Gohman8cccf0e2009-03-23 17:39:36 +0000313 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1))
314 --NumUsed; // Skip over unused values at the end.
315 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs())
Dan Gohman39746672009-03-23 16:10:52 +0000316 SU->hasPhysRegDefs = true;
317 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000318
319 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
320 SDNode *OpN = N->getOperand(i).getNode();
321 if (isPassiveNode(OpN)) continue; // Not scheduled.
322 SUnit *OpSU = &SUnits[OpN->getNodeId()];
323 assert(OpSU && "Node has no SUnit!");
324 if (OpSU == SU) continue; // In the same group.
325
Owen Andersone50ed302009-08-10 22:56:29 +0000326 EVT OpVT = N->getOperand(i).getValueType();
Owen Anderson825b72b2009-08-11 20:47:22 +0000327 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!");
328 bool isChain = OpVT == MVT::Other;
Dan Gohman343f0c02008-11-19 23:18:57 +0000329
330 unsigned PhysReg = 0;
Evan Chengc29a56d2009-01-12 03:19:55 +0000331 int Cost = 1;
Dan Gohman343f0c02008-11-19 23:18:57 +0000332 // Determine if this is a physical register dependency.
Evan Chengc29a56d2009-01-12 03:19:55 +0000333 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost);
Dan Gohman54e4c362008-12-09 22:54:47 +0000334 assert((PhysReg == 0 || !isChain) &&
335 "Chain dependence via physreg data?");
Evan Chengc29a56d2009-01-12 03:19:55 +0000336 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler
337 // emits a copy from the physical register to a virtual register unless
338 // it requires a cross class copy (cost < 0). That means we are only
339 // treating "expensive to copy" register dependency as physical register
340 // dependency. This may change in the future though.
341 if (Cost >= 0)
342 PhysReg = 0;
David Goodwin71046162009-08-13 16:05:04 +0000343
344 const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data,
345 OpSU->Latency, PhysReg);
David Goodwindc4bdcd2009-08-19 16:08:58 +0000346 if (!isChain && !UnitLatencies) {
347 ComputeOperandLatency(OpSU, SU, (SDep &)dep);
348 ST.adjustSchedDependency(OpSU, SU, (SDep &)dep);
349 }
David Goodwin71046162009-08-13 16:05:04 +0000350
351 SU->addPred(dep);
Dan Gohman343f0c02008-11-19 23:18:57 +0000352 }
353 }
354 }
355}
356
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000357/// BuildSchedGraph - Build the SUnit graph from the selection dag that we
358/// are input. This SUnit graph is similar to the SelectionDAG, but
359/// excludes nodes that aren't interesting to scheduling, and represents
360/// flagged together nodes with a single SUnit.
Dan Gohman98976e42009-10-09 23:33:48 +0000361void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) {
Evan Chengc589e032010-01-22 03:36:51 +0000362 // Cluster loads from "near" addresses into combined SUnits.
363 if (ClusterLoads)
364 ClusterNeighboringLoads();
Dan Gohmanc9a5b9e2008-12-23 18:36:58 +0000365 // Populate the SUnits array.
366 BuildSchedUnits();
367 // Compute all the scheduling dependencies between nodes.
368 AddSchedEdges();
369}
370
Dan Gohman343f0c02008-11-19 23:18:57 +0000371void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) {
372 const InstrItineraryData &InstrItins = TM.getInstrItineraryData();
373
374 // Compute the latency for the node. We use the sum of the latencies for
375 // all nodes flagged together into this SUnit.
Dan Gohman343f0c02008-11-19 23:18:57 +0000376 SU->Latency = 0;
Dan Gohmanc8c28272008-11-21 00:12:10 +0000377 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode())
Dan Gohman343f0c02008-11-19 23:18:57 +0000378 if (N->isMachineOpcode()) {
David Goodwindc4bdcd2009-08-19 16:08:58 +0000379 SU->Latency += InstrItins.
380 getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass());
Dan Gohman343f0c02008-11-19 23:18:57 +0000381 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000382}
383
Dan Gohman343f0c02008-11-19 23:18:57 +0000384void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const {
Evan Chengc29a56d2009-01-12 03:19:55 +0000385 if (!SU->getNode()) {
David Greene84fa8222010-01-05 01:25:11 +0000386 dbgs() << "PHYS REG COPY\n";
Evan Chengc29a56d2009-01-12 03:19:55 +0000387 return;
388 }
389
390 SU->getNode()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000391 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000392 SmallVector<SDNode *, 4> FlaggedNodes;
393 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode())
394 FlaggedNodes.push_back(N);
395 while (!FlaggedNodes.empty()) {
David Greene84fa8222010-01-05 01:25:11 +0000396 dbgs() << " ";
Dan Gohman343f0c02008-11-19 23:18:57 +0000397 FlaggedNodes.back()->dump(DAG);
David Greene84fa8222010-01-05 01:25:11 +0000398 dbgs() << "\n";
Dan Gohman343f0c02008-11-19 23:18:57 +0000399 FlaggedNodes.pop_back();
400 }
401}
Dan Gohmanbcea8592009-10-10 01:32:21 +0000402
403/// EmitSchedule - Emit the machine code in scheduled order.
404MachineBasicBlock *ScheduleDAGSDNodes::
405EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
406 InstrEmitter Emitter(BB, InsertPos);
407 DenseMap<SDValue, unsigned> VRBaseMap;
408 DenseMap<SUnit*, unsigned> CopyVRBaseMap;
409 for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
410 SUnit *SU = Sequence[i];
411 if (!SU) {
412 // Null SUnit* is a noop.
413 EmitNoop();
414 continue;
415 }
416
417 // For pre-regalloc scheduling, create instructions corresponding to the
418 // SDNode and any flagged SDNodes and append them to the block.
419 if (!SU->getNode()) {
420 // Emit a copy.
421 EmitPhysRegCopy(SU, CopyVRBaseMap);
422 continue;
423 }
424
425 SmallVector<SDNode *, 4> FlaggedNodes;
426 for (SDNode *N = SU->getNode()->getFlaggedNode(); N;
427 N = N->getFlaggedNode())
428 FlaggedNodes.push_back(N);
429 while (!FlaggedNodes.empty()) {
430 Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned,
431 VRBaseMap, EM);
432 FlaggedNodes.pop_back();
433 }
434 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned,
435 VRBaseMap, EM);
436 }
437
438 BB = Emitter.getBlock();
439 InsertPos = Emitter.getInsertPos();
440 return BB;
441}