blob: d940dbcf9f285315ac019779c29c15d8ea0399cb [file] [log] [blame]
Dan Gohmana629b482008-12-08 17:50:35 +00001//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===//
Dan Gohman343f0c02008-11-19 23:18:57 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
Dan Gohmana629b482008-12-08 17:50:35 +000010// This implements the ScheduleDAGInstrs class, which implements re-scheduling
11// of MachineInstrs.
Dan Gohman343f0c02008-11-19 23:18:57 +000012//
13//===----------------------------------------------------------------------===//
14
Andrew Trick8b1496c2012-11-28 05:13:28 +000015#define DEBUG_TYPE "misched"
Chandler Carruthd04a8d42012-12-03 16:50:05 +000016#include "llvm/CodeGen/ScheduleDAGInstrs.h"
17#include "llvm/ADT/MapVector.h"
18#include "llvm/ADT/SmallPtrSet.h"
19#include "llvm/ADT/SmallSet.h"
Dan Gohman3311a1f2009-01-30 02:49:14 +000020#include "llvm/Analysis/AliasAnalysis.h"
Dan Gohman5034dd32010-12-15 20:02:24 +000021#include "llvm/Analysis/ValueTracking.h"
Andrew Trickb4566a92012-02-22 06:08:11 +000022#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Dan Gohman3f237442008-12-16 03:25:46 +000023#include "llvm/CodeGen/MachineFunctionPass.h"
Dan Gohmanc76909a2009-09-25 20:36:54 +000024#include "llvm/CodeGen/MachineMemOperand.h"
Dan Gohman3f237442008-12-16 03:25:46 +000025#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohman6a9041e2008-12-04 01:35:46 +000026#include "llvm/CodeGen/PseudoSourceValue.h"
Andrew Trickafc26572012-06-06 19:47:35 +000027#include "llvm/CodeGen/RegisterPressure.h"
Andrew Trick53e98a22012-11-28 05:13:24 +000028#include "llvm/CodeGen/ScheduleDFS.h"
Chandler Carruth0b8c9a82013-01-02 11:36:10 +000029#include "llvm/IR/Operator.h"
Evan Chengab8be962011-06-29 01:14:12 +000030#include "llvm/MC/MCInstrItineraries.h"
Andrew Trickeb05b972012-05-15 18:59:41 +000031#include "llvm/Support/CommandLine.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000032#include "llvm/Support/Debug.h"
Andrew Trick1e94e982012-10-15 18:02:27 +000033#include "llvm/Support/Format.h"
Dan Gohman343f0c02008-11-19 23:18:57 +000034#include "llvm/Support/raw_ostream.h"
Chandler Carruthd04a8d42012-12-03 16:50:05 +000035#include "llvm/Target/TargetInstrInfo.h"
36#include "llvm/Target/TargetMachine.h"
37#include "llvm/Target/TargetRegisterInfo.h"
38#include "llvm/Target/TargetSubtargetInfo.h"
Andrew Trickea574332013-08-23 17:48:43 +000039#include <queue>
40
Dan Gohman343f0c02008-11-19 23:18:57 +000041using namespace llvm;
42
Andrew Trickeb05b972012-05-15 18:59:41 +000043static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden,
44 cl::ZeroOrMore, cl::init(false),
45 cl::desc("Enable use of AA during MI GAD construction"));
46
Dan Gohman79ce2762009-01-15 19:20:50 +000047ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
Dan Gohman3f237442008-12-16 03:25:46 +000048 const MachineLoopInfo &mli,
Andrew Trick5e920d72012-01-14 02:17:12 +000049 const MachineDominatorTree &mdt,
Andrew Trickb4566a92012-02-22 06:08:11 +000050 bool IsPostRAFlag,
51 LiveIntervals *lis)
Andrew Trick412cd2f2012-10-10 05:43:09 +000052 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis),
Andrew Trick714973e2012-10-09 23:44:23 +000053 IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) {
Andrew Trickb4566a92012-02-22 06:08:11 +000054 assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals");
Devang Patelcf4cc842011-06-02 20:07:12 +000055 DbgValues.clear();
Andrew Trickcc77b542012-02-22 06:08:13 +000056 assert(!(IsPostRA && MRI.getNumVirtRegs()) &&
Andrew Trick19273ae2012-02-21 04:51:23 +000057 "Virtual registers must be removed prior to PostRA scheduling");
Andrew Trick781ab472012-09-18 18:20:00 +000058
59 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
60 SchedModel.init(*ST.getSchedModel(), &ST, TII);
Evan Cheng38bdfc62009-10-18 19:58:47 +000061}
Dan Gohman343f0c02008-11-19 23:18:57 +000062
Dan Gohman3311a1f2009-01-30 02:49:14 +000063/// getUnderlyingObjectFromInt - This is the function that does the work of
64/// looking through basic ptrtoint+arithmetic+inttoptr sequences.
65static const Value *getUnderlyingObjectFromInt(const Value *V) {
66 do {
Dan Gohman8906f952009-07-17 20:58:59 +000067 if (const Operator *U = dyn_cast<Operator>(V)) {
Dan Gohman3311a1f2009-01-30 02:49:14 +000068 // If we find a ptrtoint, we can transfer control back to the
69 // regular getUnderlyingObjectFromInt.
Dan Gohman8906f952009-07-17 20:58:59 +000070 if (U->getOpcode() == Instruction::PtrToInt)
Dan Gohman3311a1f2009-01-30 02:49:14 +000071 return U->getOperand(0);
Andrew Trick8f82a082012-11-28 03:42:49 +000072 // If we find an add of a constant, a multiplied value, or a phi, it's
Dan Gohman3311a1f2009-01-30 02:49:14 +000073 // likely that the other operand will lead us to the base
74 // object. We don't have to worry about the case where the
Dan Gohman748f98f2009-08-07 01:26:06 +000075 // object address is somehow being computed by the multiply,
Dan Gohman3311a1f2009-01-30 02:49:14 +000076 // because our callers only care when the result is an
Nick Lewycky6b0db5f2012-10-26 04:27:49 +000077 // identifiable object.
Dan Gohman8906f952009-07-17 20:58:59 +000078 if (U->getOpcode() != Instruction::Add ||
Dan Gohman3311a1f2009-01-30 02:49:14 +000079 (!isa<ConstantInt>(U->getOperand(1)) &&
Andrew Trick8f82a082012-11-28 03:42:49 +000080 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul &&
81 !isa<PHINode>(U->getOperand(1))))
Dan Gohman3311a1f2009-01-30 02:49:14 +000082 return V;
83 V = U->getOperand(0);
84 } else {
85 return V;
86 }
Duncan Sands1df98592010-02-16 11:11:14 +000087 assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
Dan Gohman3311a1f2009-01-30 02:49:14 +000088 } while (1);
89}
90
Hal Finkelf2183102012-12-10 18:49:16 +000091/// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects
Dan Gohman3311a1f2009-01-30 02:49:14 +000092/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences.
Hal Finkelf2183102012-12-10 18:49:16 +000093static void getUnderlyingObjects(const Value *V,
94 SmallVectorImpl<Value *> &Objects) {
95 SmallPtrSet<const Value*, 16> Visited;
96 SmallVector<const Value *, 4> Working(1, V);
Dan Gohman3311a1f2009-01-30 02:49:14 +000097 do {
Hal Finkelf2183102012-12-10 18:49:16 +000098 V = Working.pop_back_val();
99
100 SmallVector<Value *, 4> Objs;
101 GetUnderlyingObjects(const_cast<Value *>(V), Objs);
102
Craig Topperf22fd3f2013-07-03 05:11:49 +0000103 for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
Hal Finkelf2183102012-12-10 18:49:16 +0000104 I != IE; ++I) {
105 V = *I;
106 if (!Visited.insert(V))
107 continue;
108 if (Operator::getOpcode(V) == Instruction::IntToPtr) {
109 const Value *O =
110 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
111 if (O->getType()->isPointerTy()) {
112 Working.push_back(O);
113 continue;
114 }
115 }
116 Objects.push_back(const_cast<Value *>(V));
117 }
118 } while (!Working.empty());
Dan Gohman3311a1f2009-01-30 02:49:14 +0000119}
120
Benjamin Kramer04d56132013-06-29 18:41:17 +0000121typedef SmallVector<PointerIntPair<const Value *, 1, bool>, 4>
122UnderlyingObjectsVector;
123
Hal Finkelf2183102012-12-10 18:49:16 +0000124/// getUnderlyingObjectsForInstr - If this machine instr has memory reference
Dan Gohman3311a1f2009-01-30 02:49:14 +0000125/// information and it can be tracked to a normal reference to a known
Hal Finkelf2183102012-12-10 18:49:16 +0000126/// object, return the Value for that object.
127static void getUnderlyingObjectsForInstr(const MachineInstr *MI,
Benjamin Kramer04d56132013-06-29 18:41:17 +0000128 const MachineFrameInfo *MFI,
129 UnderlyingObjectsVector &Objects) {
Dan Gohman3311a1f2009-01-30 02:49:14 +0000130 if (!MI->hasOneMemOperand() ||
Dan Gohmanc76909a2009-09-25 20:36:54 +0000131 !(*MI->memoperands_begin())->getValue() ||
132 (*MI->memoperands_begin())->isVolatile())
Hal Finkelf2183102012-12-10 18:49:16 +0000133 return;
Dan Gohman3311a1f2009-01-30 02:49:14 +0000134
Dan Gohmanc76909a2009-09-25 20:36:54 +0000135 const Value *V = (*MI->memoperands_begin())->getValue();
Dan Gohman3311a1f2009-01-30 02:49:14 +0000136 if (!V)
Hal Finkelf2183102012-12-10 18:49:16 +0000137 return;
Dan Gohman3311a1f2009-01-30 02:49:14 +0000138
Hal Finkelf2183102012-12-10 18:49:16 +0000139 SmallVector<Value *, 4> Objs;
140 getUnderlyingObjects(V, Objs);
Andrew Trickf405b1a2011-05-05 19:24:06 +0000141
Craig Topperf22fd3f2013-07-03 05:11:49 +0000142 for (SmallVectorImpl<Value *>::iterator I = Objs.begin(), IE = Objs.end();
143 I != IE; ++I) {
Hal Finkelf2183102012-12-10 18:49:16 +0000144 bool MayAlias = true;
145 V = *I;
146
147 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
148 // For now, ignore PseudoSourceValues which may alias LLVM IR values
149 // because the code that uses this function has no way to cope with
150 // such aliases.
151
152 if (PSV->isAliased(MFI)) {
153 Objects.clear();
154 return;
155 }
156
157 MayAlias = PSV->mayAlias(MFI);
158 } else if (!isIdentifiedObject(V)) {
159 Objects.clear();
160 return;
161 }
162
Benjamin Kramer04d56132013-06-29 18:41:17 +0000163 Objects.push_back(UnderlyingObjectsVector::value_type(V, MayAlias));
Evan Chengff89dcb2009-10-18 18:16:27 +0000164 }
Dan Gohman3311a1f2009-01-30 02:49:14 +0000165}
166
Andrew Trick918f38a2012-04-20 20:05:21 +0000167void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) {
168 BB = bb;
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000169}
170
Andrew Trick953be892012-03-07 23:00:49 +0000171void ScheduleDAGInstrs::finishBlock() {
Andrew Tricka30444a2012-04-20 20:24:33 +0000172 // Subclasses should no longer refer to the old block.
Andrew Trick918f38a2012-04-20 20:05:21 +0000173 BB = 0;
Andrew Trick47c14452012-03-07 05:21:52 +0000174}
175
Andrew Trick47c14452012-03-07 05:21:52 +0000176/// Initialize the DAG and common scheduler state for the current scheduling
177/// region. This does not actually create the DAG, only clears it. The
178/// scheduling driver may call BuildSchedGraph multiple times per scheduling
179/// region.
180void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb,
181 MachineBasicBlock::iterator begin,
182 MachineBasicBlock::iterator end,
Andrew Trickd2763f62013-08-23 17:48:33 +0000183 unsigned regioninstrs) {
Andrew Trick918f38a2012-04-20 20:05:21 +0000184 assert(bb == BB && "startBlock should set BB");
Andrew Trick68675c62012-03-09 04:29:02 +0000185 RegionBegin = begin;
186 RegionEnd = end;
Andrew Trickd2763f62013-08-23 17:48:33 +0000187 NumRegionInstrs = regioninstrs;
Andrew Trick47c14452012-03-07 05:21:52 +0000188}
189
190/// Close the current scheduling region. Don't clear any state in case the
191/// driver wants to refer to the previous scheduling region.
192void ScheduleDAGInstrs::exitRegion() {
193 // Nothing to do.
194}
195
Andrew Trick953be892012-03-07 23:00:49 +0000196/// addSchedBarrierDeps - Add dependencies from instructions in the current
Evan Chengec6906b2010-10-23 02:10:46 +0000197/// list of instructions being scheduled to scheduling barrier by adding
198/// the exit SU to the register defs and use list. This is because we want to
199/// make sure instructions which define registers that are either used by
200/// the terminator or are live-out are properly scheduled. This is
201/// especially important when the definition latency of the return value(s)
202/// are too high to be hidden by the branch or when the liveout registers
203/// used by instructions in the fallthrough block.
Andrew Trick953be892012-03-07 23:00:49 +0000204void ScheduleDAGInstrs::addSchedBarrierDeps() {
Andrew Trick68675c62012-03-09 04:29:02 +0000205 MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : 0;
Evan Chengec6906b2010-10-23 02:10:46 +0000206 ExitSU.setInstr(ExitMI);
207 bool AllDepKnown = ExitMI &&
Evan Cheng5a96b3d2011-12-07 07:15:52 +0000208 (ExitMI->isCall() || ExitMI->isBarrier());
Evan Chengec6906b2010-10-23 02:10:46 +0000209 if (ExitMI && AllDepKnown) {
210 // If it's a call or a barrier, add dependencies on the defs and uses of
211 // instruction.
212 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) {
213 const MachineOperand &MO = ExitMI->getOperand(i);
214 if (!MO.isReg() || MO.isDef()) continue;
215 unsigned Reg = MO.getReg();
216 if (Reg == 0) continue;
217
Andrew Trick3c58ba82012-01-14 02:17:18 +0000218 if (TRI->isPhysicalRegister(Reg))
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000219 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
Andrew Trickd3a74862012-03-16 05:04:25 +0000220 else {
Andrew Trick3c58ba82012-01-14 02:17:18 +0000221 assert(!IsPostRA && "Virtual register encountered after regalloc.");
Andrew Trick177d87a2012-12-01 01:22:44 +0000222 if (MO.readsReg()) // ignore undef operands
223 addVRegUseDeps(&ExitSU, i);
Andrew Trickd3a74862012-03-16 05:04:25 +0000224 }
Evan Chengec6906b2010-10-23 02:10:46 +0000225 }
226 } else {
227 // For others, e.g. fallthrough, conditional branch, assume the exit
Evan Chengde5fa932010-10-27 23:17:17 +0000228 // uses all the registers that are livein to the successor blocks.
Benjamin Kramera82d5262012-03-16 17:38:19 +0000229 assert(Uses.empty() && "Uses in set before adding deps?");
Evan Chengde5fa932010-10-27 23:17:17 +0000230 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
231 SE = BB->succ_end(); SI != SE; ++SI)
232 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
Andrew Trickf405b1a2011-05-05 19:24:06 +0000233 E = (*SI)->livein_end(); I != E; ++I) {
Evan Chengde5fa932010-10-27 23:17:17 +0000234 unsigned Reg = *I;
Benjamin Kramera82d5262012-03-16 17:38:19 +0000235 if (!Uses.contains(Reg))
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000236 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
Evan Chengde5fa932010-10-27 23:17:17 +0000237 }
Evan Chengec6906b2010-10-23 02:10:46 +0000238 }
239}
240
Andrew Trick81a682a2012-02-23 01:52:38 +0000241/// MO is an operand of SU's instruction that defines a physical register. Add
242/// data dependencies from SU to any uses of the physical register.
Andrew Trickffd25262012-08-23 00:39:43 +0000243void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
244 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx);
Andrew Trick81a682a2012-02-23 01:52:38 +0000245 assert(MO.isDef() && "expect physreg def");
246
247 // Ask the target if address-backscheduling is desirable, and if so how much.
248 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
Andrew Trick81a682a2012-02-23 01:52:38 +0000249
Jakob Stoklund Olesen396618b2012-06-01 23:28:30 +0000250 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
251 Alias.isValid(); ++Alias) {
Andrew Trick702d4892012-02-24 07:04:55 +0000252 if (!Uses.contains(*Alias))
Andrew Trick81a682a2012-02-23 01:52:38 +0000253 continue;
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000254 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) {
255 SUnit *UseSU = I->SU;
Andrew Trick81a682a2012-02-23 01:52:38 +0000256 if (UseSU == SU)
257 continue;
Andrew Trick39817f92012-10-08 18:54:00 +0000258
Andrew Trick39817f92012-10-08 18:54:00 +0000259 // Adjust the dependence latency using operand def/use information,
260 // then allow the target to perform its own adjustments.
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000261 int UseOp = I->OpIdx;
Andrew Trickae692f22012-11-12 19:28:57 +0000262 MachineInstr *RegUse = 0;
263 SDep Dep;
264 if (UseOp < 0)
265 Dep = SDep(SU, SDep::Artificial);
266 else {
Andrew Trick4392f0f2013-04-13 06:07:40 +0000267 // Set the hasPhysRegDefs only for physreg defs that have a use within
268 // the scheduling region.
269 SU->hasPhysRegDefs = true;
Andrew Trickae692f22012-11-12 19:28:57 +0000270 Dep = SDep(SU, SDep::Data, *Alias);
271 RegUse = UseSU->getInstr();
Andrew Trickae692f22012-11-12 19:28:57 +0000272 }
273 Dep.setLatency(
Andrew Trickb86a0cd2013-06-15 04:49:57 +0000274 SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse,
275 UseOp));
Andrew Trickb7e02892012-06-05 21:11:27 +0000276
Andrew Trickae692f22012-11-12 19:28:57 +0000277 ST.adjustSchedDependency(SU, UseSU, Dep);
278 UseSU->addPred(Dep);
Andrew Trick81a682a2012-02-23 01:52:38 +0000279 }
280 }
281}
282
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000283/// addPhysRegDeps - Add register dependencies (data, anti, and output) from
284/// this SUnit to following instructions in the same scheduling region that
285/// depend the physical register referenced at OperIdx.
286void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
287 const MachineInstr *MI = SU->getInstr();
288 const MachineOperand &MO = MI->getOperand(OperIdx);
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000289
290 // Optionally add output and anti dependencies. For anti
291 // dependencies we use a latency of 0 because for a multi-issue
292 // target we want to allow the defining instruction to issue
293 // in the same cycle as the using instruction.
294 // TODO: Using a latency of 1 here for output dependencies assumes
295 // there's no cost for reusing registers.
296 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output;
Jakob Stoklund Olesen396618b2012-06-01 23:28:30 +0000297 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true);
298 Alias.isValid(); ++Alias) {
Andrew Trick702d4892012-02-24 07:04:55 +0000299 if (!Defs.contains(*Alias))
Andrew Trick81a682a2012-02-23 01:52:38 +0000300 continue;
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000301 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) {
302 SUnit *DefSU = I->SU;
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000303 if (DefSU == &ExitSU)
304 continue;
305 if (DefSU != SU &&
306 (Kind != SDep::Output || !MO.isDead() ||
307 !DefSU->getInstr()->registerDefIsDead(*Alias))) {
308 if (Kind == SDep::Anti)
Andrew Tricka78d3222012-11-06 03:13:46 +0000309 DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias));
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000310 else {
Andrew Tricka78d3222012-11-06 03:13:46 +0000311 SDep Dep(SU, Kind, /*Reg=*/*Alias);
Andrew Trickb86a0cd2013-06-15 04:49:57 +0000312 Dep.setLatency(
313 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
Andrew Tricka78d3222012-11-06 03:13:46 +0000314 DefSU->addPred(Dep);
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000315 }
316 }
317 }
318 }
319
Andrew Trick81a682a2012-02-23 01:52:38 +0000320 if (!MO.isDef()) {
Andrew Trick4392f0f2013-04-13 06:07:40 +0000321 SU->hasPhysRegUses = true;
Andrew Trick81a682a2012-02-23 01:52:38 +0000322 // Either insert a new Reg2SUnits entry with an empty SUnits list, or
323 // retrieve the existing SUnits list for this register's uses.
324 // Push this SUnit on the use list.
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000325 Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
Andrew Trick81a682a2012-02-23 01:52:38 +0000326 }
327 else {
Andrew Trickffd25262012-08-23 00:39:43 +0000328 addPhysRegDataDeps(SU, OperIdx);
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000329 unsigned Reg = MO.getReg();
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000330
Andrew Trick81a682a2012-02-23 01:52:38 +0000331 // clear this register's use list
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000332 if (Uses.contains(Reg))
333 Uses.eraseAll(Reg);
Andrew Trick81a682a2012-02-23 01:52:38 +0000334
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000335 if (!MO.isDead()) {
336 Defs.eraseAll(Reg);
337 } else if (SU->isCall) {
338 // Calls will not be reordered because of chain dependencies (see
339 // below). Since call operands are dead, calls may continue to be added
340 // to the DefList making dependence checking quadratic in the size of
341 // the block. Instead, we leave only one call at the back of the
342 // DefList.
343 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg);
344 Reg2SUnitsMap::iterator B = P.first;
345 Reg2SUnitsMap::iterator I = P.second;
346 for (bool isBegin = I == B; !isBegin; /* empty */) {
347 isBegin = (--I) == B;
348 if (!I->SU->isCall)
349 break;
350 I = Defs.erase(I);
351 }
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000352 }
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000353
Andrew Trick81a682a2012-02-23 01:52:38 +0000354 // Defs are pushed in the order they are visited and never reordered.
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000355 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg));
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000356 }
357}
358
Andrew Trick3c58ba82012-01-14 02:17:18 +0000359/// addVRegDefDeps - Add register output and data dependencies from this SUnit
360/// to instructions that occur later in the same scheduling region if they read
361/// from or write to the virtual register defined at OperIdx.
362///
363/// TODO: Hoist loop induction variable increments. This has to be
364/// reevaluated. Generally, IV scheduling should be done before coalescing.
365void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) {
366 const MachineInstr *MI = SU->getInstr();
367 unsigned Reg = MI->getOperand(OperIdx).getReg();
368
Andrew Trick4b72ada2012-07-28 01:48:15 +0000369 // Singly defined vregs do not have output/anti dependencies.
Andrew Trick2fc09772012-02-22 18:34:49 +0000370 // The current operand is a def, so we have at least one.
Andrew Trick4b72ada2012-07-28 01:48:15 +0000371 // Check here if there are any others...
Andrew Trick8b5704f2012-07-30 23:48:17 +0000372 if (MRI.hasOneDef(Reg))
Andrew Trick4b72ada2012-07-28 01:48:15 +0000373 return;
Andrew Trickcc77b542012-02-22 06:08:13 +0000374
Andrew Trick3c58ba82012-01-14 02:17:18 +0000375 // Add output dependence to the next nearest def of this vreg.
376 //
377 // Unless this definition is dead, the output dependence should be
378 // transitively redundant with antidependencies from this definition's
379 // uses. We're conservative for now until we have a way to guarantee the uses
380 // are not eliminated sometime during scheduling. The output dependence edge
381 // is also useful if output latency exceeds def-use latency.
Andrew Trickc0ccb8b2012-04-20 20:05:28 +0000382 VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
Andrew Trick8ae3ac72012-02-22 21:59:00 +0000383 if (DefI == VRegDefs.end())
384 VRegDefs.insert(VReg2SUnit(Reg, SU));
385 else {
386 SUnit *DefSU = DefI->SU;
387 if (DefSU != SU && DefSU != &ExitSU) {
Andrew Tricka78d3222012-11-06 03:13:46 +0000388 SDep Dep(SU, SDep::Output, Reg);
Andrew Trickb86a0cd2013-06-15 04:49:57 +0000389 Dep.setLatency(
390 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()));
Andrew Tricka78d3222012-11-06 03:13:46 +0000391 DefSU->addPred(Dep);
Andrew Trick8ae3ac72012-02-22 21:59:00 +0000392 }
393 DefI->SU = SU;
Andrew Trick3c58ba82012-01-14 02:17:18 +0000394 }
Andrew Trick3c58ba82012-01-14 02:17:18 +0000395}
396
Andrew Trickb4566a92012-02-22 06:08:11 +0000397/// addVRegUseDeps - Add a register data dependency if the instruction that
398/// defines the virtual register used at OperIdx is mapped to an SUnit. Add a
399/// register antidependency from this SUnit to instructions that occur later in
400/// the same scheduling region if they write the virtual register.
401///
402/// TODO: Handle ExitSU "uses" properly.
Andrew Trick3c58ba82012-01-14 02:17:18 +0000403void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) {
Andrew Trickb4566a92012-02-22 06:08:11 +0000404 MachineInstr *MI = SU->getInstr();
405 unsigned Reg = MI->getOperand(OperIdx).getReg();
406
Andrew Trick99093632013-08-23 17:48:39 +0000407 // Record this local VReg use.
Andrew Trick663bd992013-08-30 04:36:57 +0000408 VReg2UseMap::iterator UI = VRegUses.find(Reg);
409 for (; UI != VRegUses.end(); ++UI) {
410 if (UI->SU == SU)
411 break;
412 }
413 if (UI == VRegUses.end())
414 VRegUses.insert(VReg2SUnit(Reg, SU));
Andrew Trick99093632013-08-23 17:48:39 +0000415
Andrew Trickb4566a92012-02-22 06:08:11 +0000416 // Lookup this operand's reaching definition.
417 assert(LIS && "vreg dependencies requires LiveIntervals");
Jakob Stoklund Olesen93e29ce2012-05-20 02:44:38 +0000418 LiveRangeQuery LRQ(LIS->getInterval(Reg), LIS->getInstructionIndex(MI));
419 VNInfo *VNI = LRQ.valueIn();
Andrew Trickc3ad8852012-04-24 18:04:41 +0000420
Andrew Trick63d578b2012-02-23 03:16:24 +0000421 // VNI will be valid because MachineOperand::readsReg() is checked by caller.
Jakob Stoklund Olesen93e29ce2012-05-20 02:44:38 +0000422 assert(VNI && "No value to read by operand");
Andrew Trickb4566a92012-02-22 06:08:11 +0000423 MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def);
Andrew Trick63d578b2012-02-23 03:16:24 +0000424 // Phis and other noninstructions (after coalescing) have a NULL Def.
Andrew Trickb4566a92012-02-22 06:08:11 +0000425 if (Def) {
426 SUnit *DefSU = getSUnit(Def);
427 if (DefSU) {
428 // The reaching Def lives within this scheduling region.
429 // Create a data dependence.
Andrew Tricka78d3222012-11-06 03:13:46 +0000430 SDep dep(DefSU, SDep::Data, Reg);
Andrew Tricka98f6002012-10-08 18:53:57 +0000431 // Adjust the dependence latency using operand def/use information, then
432 // allow the target to perform its own adjustments.
433 int DefOp = Def->findRegisterDefOperandIdx(Reg);
Andrew Trickb86a0cd2013-06-15 04:49:57 +0000434 dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx));
Andrew Trickb7e02892012-06-05 21:11:27 +0000435
Andrew Tricka98f6002012-10-08 18:53:57 +0000436 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
437 ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep));
Andrew Trickb4566a92012-02-22 06:08:11 +0000438 SU->addPred(dep);
439 }
440 }
Andrew Trick3c58ba82012-01-14 02:17:18 +0000441
442 // Add antidependence to the following def of the vreg it uses.
Andrew Trickc0ccb8b2012-04-20 20:05:28 +0000443 VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg);
Andrew Trick8ae3ac72012-02-22 21:59:00 +0000444 if (DefI != VRegDefs.end() && DefI->SU != SU)
Andrew Tricka78d3222012-11-06 03:13:46 +0000445 DefI->SU->addPred(SDep(SU, SDep::Anti, Reg));
Andrew Trickb4566a92012-02-22 06:08:11 +0000446}
Andrew Trick3c58ba82012-01-14 02:17:18 +0000447
Andrew Trickeb05b972012-05-15 18:59:41 +0000448/// Return true if MI is an instruction we are unable to reason about
449/// (like a call or something with unmodeled side effects).
450static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) {
451 if (MI->isCall() || MI->hasUnmodeledSideEffects() ||
Jakob Stoklund Olesenf036f7a2012-08-29 21:19:21 +0000452 (MI->hasOrderedMemoryRef() &&
Andrew Trickeb05b972012-05-15 18:59:41 +0000453 (!MI->mayLoad() || !MI->isInvariantLoad(AA))))
454 return true;
455 return false;
456}
457
458// This MI might have either incomplete info, or known to be unsafe
459// to deal with (i.e. volatile object).
460static inline bool isUnsafeMemoryObject(MachineInstr *MI,
461 const MachineFrameInfo *MFI) {
462 if (!MI || MI->memoperands_empty())
463 return true;
464 // We purposefully do no check for hasOneMemOperand() here
465 // in hope to trigger an assert downstream in order to
466 // finish implementation.
467 if ((*MI->memoperands_begin())->isVolatile() ||
468 MI->hasUnmodeledSideEffects())
469 return true;
Andrew Trickeb05b972012-05-15 18:59:41 +0000470 const Value *V = (*MI->memoperands_begin())->getValue();
471 if (!V)
472 return true;
473
Hal Finkelf2183102012-12-10 18:49:16 +0000474 SmallVector<Value *, 4> Objs;
475 getUnderlyingObjects(V, Objs);
Craig Topperf22fd3f2013-07-03 05:11:49 +0000476 for (SmallVectorImpl<Value *>::iterator I = Objs.begin(),
477 IE = Objs.end(); I != IE; ++I) {
Hal Finkelf2183102012-12-10 18:49:16 +0000478 V = *I;
479
480 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
481 // Similarly to getUnderlyingObjectForInstr:
482 // For now, ignore PseudoSourceValues which may alias LLVM IR values
483 // because the code that uses this function has no way to cope with
484 // such aliases.
485 if (PSV->isAliased(MFI))
486 return true;
487 }
488
489 // Does this pointer refer to a distinct and identifiable object?
490 if (!isIdentifiedObject(V))
Andrew Trickeb05b972012-05-15 18:59:41 +0000491 return true;
492 }
Andrew Trickeb05b972012-05-15 18:59:41 +0000493
494 return false;
495}
496
497/// This returns true if the two MIs need a chain edge betwee them.
498/// If these are not even memory operations, we still may need
499/// chain deps between them. The question really is - could
500/// these two MIs be reordered during scheduling from memory dependency
501/// point of view.
502static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI,
503 MachineInstr *MIa,
504 MachineInstr *MIb) {
505 // Cover a trivial case - no edge is need to itself.
506 if (MIa == MIb)
507 return false;
508
509 if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI))
510 return true;
511
512 // If we are dealing with two "normal" loads, we do not need an edge
513 // between them - they could be reordered.
514 if (!MIa->mayStore() && !MIb->mayStore())
515 return false;
516
517 // To this point analysis is generic. From here on we do need AA.
518 if (!AA)
519 return true;
520
521 MachineMemOperand *MMOa = *MIa->memoperands_begin();
522 MachineMemOperand *MMOb = *MIb->memoperands_begin();
523
524 // FIXME: Need to handle multiple memory operands to support all targets.
525 if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand())
526 llvm_unreachable("Multiple memory operands.");
527
528 // The following interface to AA is fashioned after DAGCombiner::isAlias
529 // and operates with MachineMemOperand offset with some important
530 // assumptions:
531 // - LLVM fundamentally assumes flat address spaces.
532 // - MachineOperand offset can *only* result from legalization and
533 // cannot affect queries other than the trivial case of overlap
534 // checking.
535 // - These offsets never wrap and never step outside
536 // of allocated objects.
537 // - There should never be any negative offsets here.
538 //
539 // FIXME: Modify API to hide this math from "user"
540 // FIXME: Even before we go to AA we can reason locally about some
541 // memory objects. It can save compile time, and possibly catch some
542 // corner cases not currently covered.
543
544 assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset");
545 assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset");
546
547 int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset());
548 int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset;
549 int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset;
550
551 AliasAnalysis::AliasResult AAResult = AA->alias(
552 AliasAnalysis::Location(MMOa->getValue(), Overlapa,
553 MMOa->getTBAAInfo()),
554 AliasAnalysis::Location(MMOb->getValue(), Overlapb,
555 MMOb->getTBAAInfo()));
556
557 return (AAResult != AliasAnalysis::NoAlias);
558}
559
560/// This recursive function iterates over chain deps of SUb looking for
561/// "latest" node that needs a chain edge to SUa.
562static unsigned
563iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI,
564 SUnit *SUa, SUnit *SUb, SUnit *ExitSU, unsigned *Depth,
565 SmallPtrSet<const SUnit*, 16> &Visited) {
566 if (!SUa || !SUb || SUb == ExitSU)
567 return *Depth;
568
569 // Remember visited nodes.
570 if (!Visited.insert(SUb))
571 return *Depth;
572 // If there is _some_ dependency already in place, do not
573 // descend any further.
574 // TODO: Need to make sure that if that dependency got eliminated or ignored
575 // for any reason in the future, we would not violate DAG topology.
576 // Currently it does not happen, but makes an implicit assumption about
577 // future implementation.
578 //
579 // Independently, if we encounter node that is some sort of global
580 // object (like a call) we already have full set of dependencies to it
581 // and we can stop descending.
582 if (SUa->isSucc(SUb) ||
583 isGlobalMemoryObject(AA, SUb->getInstr()))
584 return *Depth;
585
586 // If we do need an edge, or we have exceeded depth budget,
587 // add that edge to the predecessors chain of SUb,
588 // and stop descending.
589 if (*Depth > 200 ||
590 MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
Andrew Tricka78d3222012-11-06 03:13:46 +0000591 SUb->addPred(SDep(SUa, SDep::MayAliasMem));
Andrew Trickeb05b972012-05-15 18:59:41 +0000592 return *Depth;
593 }
594 // Track current depth.
595 (*Depth)++;
596 // Iterate over chain dependencies only.
597 for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end();
598 I != E; ++I)
599 if (I->isCtrl())
600 iterateChainSucc (AA, MFI, SUa, I->getSUnit(), ExitSU, Depth, Visited);
601 return *Depth;
602}
603
604/// This function assumes that "downward" from SU there exist
605/// tail/leaf of already constructed DAG. It iterates downward and
606/// checks whether SU can be aliasing any node dominated
607/// by it.
608static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI,
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000609 SUnit *SU, SUnit *ExitSU, std::set<SUnit *> &CheckList,
610 unsigned LatencyToLoad) {
Andrew Trickeb05b972012-05-15 18:59:41 +0000611 if (!SU)
612 return;
613
614 SmallPtrSet<const SUnit*, 16> Visited;
615 unsigned Depth = 0;
616
617 for (std::set<SUnit *>::iterator I = CheckList.begin(), IE = CheckList.end();
618 I != IE; ++I) {
619 if (SU == *I)
620 continue;
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000621 if (MIsNeedChainEdge(AA, MFI, SU->getInstr(), (*I)->getInstr())) {
Andrew Tricka78d3222012-11-06 03:13:46 +0000622 SDep Dep(SU, SDep::MayAliasMem);
623 Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0);
624 (*I)->addPred(Dep);
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000625 }
Andrew Trickeb05b972012-05-15 18:59:41 +0000626 // Now go through all the chain successors and iterate from them.
627 // Keep track of visited nodes.
628 for (SUnit::const_succ_iterator J = (*I)->Succs.begin(),
629 JE = (*I)->Succs.end(); J != JE; ++J)
630 if (J->isCtrl())
631 iterateChainSucc (AA, MFI, SU, J->getSUnit(),
632 ExitSU, &Depth, Visited);
633 }
634}
635
636/// Check whether two objects need a chain edge, if so, add it
637/// otherwise remember the rejected SU.
638static inline
639void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI,
640 SUnit *SUa, SUnit *SUb,
641 std::set<SUnit *> &RejectList,
642 unsigned TrueMemOrderLatency = 0,
643 bool isNormalMemory = false) {
644 // If this is a false dependency,
645 // do not add the edge, but rememeber the rejected node.
Hal Finkel738073c2013-08-29 03:25:05 +0000646 if (!AA || MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) {
Andrew Tricka78d3222012-11-06 03:13:46 +0000647 SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier);
648 Dep.setLatency(TrueMemOrderLatency);
649 SUb->addPred(Dep);
650 }
Andrew Trickeb05b972012-05-15 18:59:41 +0000651 else {
652 // Duplicate entries should be ignored.
653 RejectList.insert(SUb);
654 DEBUG(dbgs() << "\tReject chain dep between SU("
655 << SUa->NodeNum << ") and SU("
656 << SUb->NodeNum << ")\n");
657 }
658}
659
Andrew Trickb4566a92012-02-22 06:08:11 +0000660/// Create an SUnit for each real instruction, numbered in top-down toplological
661/// order. The instruction order A < B, implies that no edge exists from B to A.
662///
663/// Map each real instruction to its SUnit.
664///
Andrew Trick17d35e52012-03-14 04:00:41 +0000665/// After initSUnits, the SUnits vector cannot be resized and the scheduler may
666/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs
667/// instead of pointers.
668///
669/// MachineScheduler relies on initSUnits numbering the nodes by their order in
670/// the original instruction list.
Andrew Trickb4566a92012-02-22 06:08:11 +0000671void ScheduleDAGInstrs::initSUnits() {
672 // We'll be allocating one SUnit for each real instruction in the region,
673 // which is contained within a basic block.
Andrew Trickd2763f62013-08-23 17:48:33 +0000674 SUnits.reserve(NumRegionInstrs);
Andrew Trickb4566a92012-02-22 06:08:11 +0000675
Andrew Trick68675c62012-03-09 04:29:02 +0000676 for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) {
Andrew Trickb4566a92012-02-22 06:08:11 +0000677 MachineInstr *MI = I;
678 if (MI->isDebugValue())
679 continue;
680
Andrew Trick953be892012-03-07 23:00:49 +0000681 SUnit *SU = newSUnit(MI);
Andrew Trickb4566a92012-02-22 06:08:11 +0000682 MISUnitMap[MI] = SU;
683
684 SU->isCall = MI->isCall();
685 SU->isCommutable = MI->isCommutable();
686
687 // Assign the Latency field of SU using target-provided information.
Andrew Trick412cd2f2012-10-10 05:43:09 +0000688 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr());
Andrew Trickb4566a92012-02-22 06:08:11 +0000689 }
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000690}
691
Andrew Trick006e1ab2012-04-24 17:56:43 +0000692/// If RegPressure is non null, compute register pressure as a side effect. The
693/// DAG builder is an efficient place to do it because it already visits
694/// operands.
695void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
Andrew Trick4c60b8a2013-08-30 03:49:48 +0000696 RegPressureTracker *RPTracker,
697 PressureDiffs *PDiffs) {
Hal Finkel738073c2013-08-29 03:25:05 +0000698 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>();
699 bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI
700 : ST.useAA();
701 AliasAnalysis *AAForDep = UseAA ? AA : 0;
702
Andrew Trick40b52bb2013-09-04 21:00:02 +0000703 MISUnitMap.clear();
704 ScheduleDAG::clearDAG();
705
Andrew Trickb4566a92012-02-22 06:08:11 +0000706 // Create an SUnit for each real instruction.
707 initSUnits();
Dan Gohman343f0c02008-11-19 23:18:57 +0000708
Andrew Trick4c60b8a2013-08-30 03:49:48 +0000709 if (PDiffs)
710 PDiffs->init(SUnits.size());
711
Dan Gohman6a9041e2008-12-04 01:35:46 +0000712 // We build scheduling units by walking a block's instruction list from bottom
713 // to top.
714
David Goodwin980d4942009-11-09 19:22:17 +0000715 // Remember where a generic side-effecting instruction is as we procede.
716 SUnit *BarrierChain = 0, *AliasChain = 0;
Dan Gohman6a9041e2008-12-04 01:35:46 +0000717
David Goodwin980d4942009-11-09 19:22:17 +0000718 // Memory references to specific known memory locations are tracked
719 // so that they can be given more precise dependencies. We track
720 // separately the known memory locations that may alias and those
721 // that are known not to alias
Sergei Larin009cf9e2012-11-15 17:45:50 +0000722 MapVector<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
723 MapVector<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
Andrew Trickeb05b972012-05-15 18:59:41 +0000724 std::set<SUnit*> RejectMemNodes;
Dan Gohman6a9041e2008-12-04 01:35:46 +0000725
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000726 // Remove any stale debug info; sometimes BuildSchedGraph is called again
727 // without emitting the info from the previous call.
Devang Patelcf4cc842011-06-02 20:07:12 +0000728 DbgValues.clear();
729 FirstDbgValue = NULL;
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000730
Andrew Trick81a682a2012-02-23 01:52:38 +0000731 assert(Defs.empty() && Uses.empty() &&
732 "Only BuildGraph should update Defs/Uses");
Michael Ilsemanafe77f32013-01-21 18:18:53 +0000733 Defs.setUniverse(TRI->getNumRegs());
734 Uses.setUniverse(TRI->getNumRegs());
Andrew Trick9b668532011-05-06 21:52:52 +0000735
Andrew Trick8ae3ac72012-02-22 21:59:00 +0000736 assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
Andrew Trick99093632013-08-23 17:48:39 +0000737 VRegUses.clear();
Andrew Trick8ae3ac72012-02-22 21:59:00 +0000738 VRegDefs.setUniverse(MRI.getNumVirtRegs());
Andrew Trick99093632013-08-23 17:48:39 +0000739 VRegUses.setUniverse(MRI.getNumVirtRegs());
Andrew Trick3c58ba82012-01-14 02:17:18 +0000740
Andrew Trick81a682a2012-02-23 01:52:38 +0000741 // Model data dependencies between instructions being scheduled and the
742 // ExitSU.
Andrew Trick953be892012-03-07 23:00:49 +0000743 addSchedBarrierDeps();
Andrew Trick81a682a2012-02-23 01:52:38 +0000744
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000745 // Walk the list of instructions, from bottom moving up.
Andrew Trick657b75b2012-12-01 01:22:49 +0000746 MachineInstr *DbgMI = NULL;
Andrew Trick68675c62012-03-09 04:29:02 +0000747 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin;
Dan Gohman343f0c02008-11-19 23:18:57 +0000748 MII != MIE; --MII) {
749 MachineInstr *MI = prior(MII);
Andrew Trick657b75b2012-12-01 01:22:49 +0000750 if (MI && DbgMI) {
751 DbgValues.push_back(std::make_pair(DbgMI, MI));
752 DbgMI = NULL;
Devang Patelcf4cc842011-06-02 20:07:12 +0000753 }
754
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000755 if (MI->isDebugValue()) {
Andrew Trick657b75b2012-12-01 01:22:49 +0000756 DbgMI = MI;
Dale Johannesenbfdf7f32010-03-10 22:13:47 +0000757 continue;
758 }
Andrew Trick4c60b8a2013-08-30 03:49:48 +0000759 SUnit *SU = MISUnitMap[MI];
760 assert(SU && "No SUnit mapped to this MI");
761
Andrew Trick006e1ab2012-04-24 17:56:43 +0000762 if (RPTracker) {
Andrew Trick4c60b8a2013-08-30 03:49:48 +0000763 PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : 0;
Andrew Trick663bd992013-08-30 04:36:57 +0000764 RPTracker->recede(/*LiveUses=*/0, PDiff);
Andrew Trick006e1ab2012-04-24 17:56:43 +0000765 assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI");
766 }
Devang Patelcf4cc842011-06-02 20:07:12 +0000767
Sergei Larin91231a62013-02-12 16:36:03 +0000768 assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) &&
Dan Gohman9e64bbb2009-02-10 23:27:53 +0000769 "Cannot schedule terminators or labels!");
Dan Gohman343f0c02008-11-19 23:18:57 +0000770
Dan Gohman6a9041e2008-12-04 01:35:46 +0000771 // Add register-based dependencies (data, anti, and output).
Andrew Trick04f52e12012-12-18 20:53:01 +0000772 bool HasVRegDef = false;
Dan Gohman343f0c02008-11-19 23:18:57 +0000773 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) {
774 const MachineOperand &MO = MI->getOperand(j);
775 if (!MO.isReg()) continue;
776 unsigned Reg = MO.getReg();
777 if (Reg == 0) continue;
778
Andrew Trick7ebcaf42012-01-14 02:17:15 +0000779 if (TRI->isPhysicalRegister(Reg))
780 addPhysRegDeps(SU, j);
781 else {
782 assert(!IsPostRA && "Virtual register encountered!");
Andrew Trick04f52e12012-12-18 20:53:01 +0000783 if (MO.isDef()) {
784 HasVRegDef = true;
Andrew Trick3c58ba82012-01-14 02:17:18 +0000785 addVRegDefDeps(SU, j);
Andrew Trick04f52e12012-12-18 20:53:01 +0000786 }
Andrew Trick63d578b2012-02-23 03:16:24 +0000787 else if (MO.readsReg()) // ignore undef operands
Andrew Trick3c58ba82012-01-14 02:17:18 +0000788 addVRegUseDeps(SU, j);
Dan Gohman343f0c02008-11-19 23:18:57 +0000789 }
790 }
Andrew Trick04f52e12012-12-18 20:53:01 +0000791 // If we haven't seen any uses in this scheduling region, create a
792 // dependence edge to ExitSU to model the live-out latency. This is required
793 // for vreg defs with no in-region use, and prefetches with no vreg def.
794 //
795 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This
796 // check currently relies on being called before adding chain deps.
797 if (SU->NumSuccs == 0 && SU->Latency > 1
798 && (HasVRegDef || MI->mayLoad())) {
799 SDep Dep(SU, SDep::Artificial);
800 Dep.setLatency(SU->Latency - 1);
801 ExitSU.addPred(Dep);
802 }
Dan Gohman6a9041e2008-12-04 01:35:46 +0000803
804 // Add chain dependencies.
David Goodwin7c9b1ac2009-11-02 17:06:28 +0000805 // Chain dependencies used to enforce memory order should have
806 // latency of 0 (except for true dependency of Store followed by
807 // aliased Load... we estimate that with a single cycle of latency
808 // assuming the hardware will bypass)
Dan Gohman6a9041e2008-12-04 01:35:46 +0000809 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
810 // after stack slots are lowered to actual addresses.
811 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and
812 // produce more precise dependence information.
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000813 unsigned TrueMemOrderLatency = MI->mayStore() ? 1 : 0;
Andrew Trickeb05b972012-05-15 18:59:41 +0000814 if (isGlobalMemoryObject(AA, MI)) {
David Goodwin980d4942009-11-09 19:22:17 +0000815 // Be conservative with these and add dependencies on all memory
816 // references, even those that are known to not alias.
Sergei Larin009cf9e2012-11-15 17:45:50 +0000817 for (MapVector<const Value *, SUnit *>::iterator I =
David Goodwin980d4942009-11-09 19:22:17 +0000818 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
Andrew Tricka78d3222012-11-06 03:13:46 +0000819 I->second->addPred(SDep(SU, SDep::Barrier));
Dan Gohman6a9041e2008-12-04 01:35:46 +0000820 }
Sergei Larin009cf9e2012-11-15 17:45:50 +0000821 for (MapVector<const Value *, std::vector<SUnit *> >::iterator I =
David Goodwin980d4942009-11-09 19:22:17 +0000822 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
Andrew Tricka78d3222012-11-06 03:13:46 +0000823 for (unsigned i = 0, e = I->second.size(); i != e; ++i) {
824 SDep Dep(SU, SDep::Barrier);
825 Dep.setLatency(TrueMemOrderLatency);
826 I->second[i]->addPred(Dep);
827 }
Dan Gohman6a9041e2008-12-04 01:35:46 +0000828 }
David Goodwin980d4942009-11-09 19:22:17 +0000829 // Add SU to the barrier chain.
830 if (BarrierChain)
Andrew Tricka78d3222012-11-06 03:13:46 +0000831 BarrierChain->addPred(SDep(SU, SDep::Barrier));
David Goodwin980d4942009-11-09 19:22:17 +0000832 BarrierChain = SU;
Andrew Trickeb05b972012-05-15 18:59:41 +0000833 // This is a barrier event that acts as a pivotal node in the DAG,
834 // so it is safe to clear list of exposed nodes.
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000835 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
836 TrueMemOrderLatency);
Andrew Trickeb05b972012-05-15 18:59:41 +0000837 RejectMemNodes.clear();
838 NonAliasMemDefs.clear();
839 NonAliasMemUses.clear();
David Goodwin980d4942009-11-09 19:22:17 +0000840
841 // fall-through
842 new_alias_chain:
843 // Chain all possibly aliasing memory references though SU.
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000844 if (AliasChain) {
845 unsigned ChainLatency = 0;
846 if (AliasChain->getInstr()->mayLoad())
847 ChainLatency = TrueMemOrderLatency;
Hal Finkel738073c2013-08-29 03:25:05 +0000848 addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes,
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000849 ChainLatency);
850 }
David Goodwin980d4942009-11-09 19:22:17 +0000851 AliasChain = SU;
852 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
Hal Finkel738073c2013-08-29 03:25:05 +0000853 addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes,
Andrew Trickeb05b972012-05-15 18:59:41 +0000854 TrueMemOrderLatency);
Sergei Larin009cf9e2012-11-15 17:45:50 +0000855 for (MapVector<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
Andrew Trickeb05b972012-05-15 18:59:41 +0000856 E = AliasMemDefs.end(); I != E; ++I)
Hal Finkel738073c2013-08-29 03:25:05 +0000857 addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes);
Sergei Larin009cf9e2012-11-15 17:45:50 +0000858 for (MapVector<const Value *, std::vector<SUnit *> >::iterator I =
David Goodwin980d4942009-11-09 19:22:17 +0000859 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
860 for (unsigned i = 0, e = I->second.size(); i != e; ++i)
Hal Finkel738073c2013-08-29 03:25:05 +0000861 addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes,
Andrew Trickeb05b972012-05-15 18:59:41 +0000862 TrueMemOrderLatency);
David Goodwin980d4942009-11-09 19:22:17 +0000863 }
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000864 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
865 TrueMemOrderLatency);
David Goodwin980d4942009-11-09 19:22:17 +0000866 PendingLoads.clear();
867 AliasMemDefs.clear();
868 AliasMemUses.clear();
Evan Cheng5a96b3d2011-12-07 07:15:52 +0000869 } else if (MI->mayStore()) {
Benjamin Kramer04d56132013-06-29 18:41:17 +0000870 UnderlyingObjectsVector Objs;
Hal Finkelf2183102012-12-10 18:49:16 +0000871 getUnderlyingObjectsForInstr(MI, MFI, Objs);
872
873 if (Objs.empty()) {
874 // Treat all other stores conservatively.
875 goto new_alias_chain;
876 }
877
878 bool MayAlias = false;
Benjamin Kramer04d56132013-06-29 18:41:17 +0000879 for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end();
880 K != KE; ++K) {
881 const Value *V = K->getPointer();
882 bool ThisMayAlias = K->getInt();
Hal Finkelf2183102012-12-10 18:49:16 +0000883 if (ThisMayAlias)
884 MayAlias = true;
885
Dan Gohman6a9041e2008-12-04 01:35:46 +0000886 // A store to a specific PseudoSourceValue. Add precise dependencies.
David Goodwin980d4942009-11-09 19:22:17 +0000887 // Record the def in MemDefs, first adding a dep if there is
888 // an existing def.
Sergei Larin009cf9e2012-11-15 17:45:50 +0000889 MapVector<const Value *, SUnit *>::iterator I =
Hal Finkelf2183102012-12-10 18:49:16 +0000890 ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
Sergei Larin009cf9e2012-11-15 17:45:50 +0000891 MapVector<const Value *, SUnit *>::iterator IE =
Hal Finkelf2183102012-12-10 18:49:16 +0000892 ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
David Goodwin980d4942009-11-09 19:22:17 +0000893 if (I != IE) {
Hal Finkel738073c2013-08-29 03:25:05 +0000894 addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes,
895 0, true);
Dan Gohman6a9041e2008-12-04 01:35:46 +0000896 I->second = SU;
897 } else {
Hal Finkelf2183102012-12-10 18:49:16 +0000898 if (ThisMayAlias)
David Goodwin980d4942009-11-09 19:22:17 +0000899 AliasMemDefs[V] = SU;
900 else
901 NonAliasMemDefs[V] = SU;
Dan Gohman6a9041e2008-12-04 01:35:46 +0000902 }
903 // Handle the uses in MemUses, if there are any.
Sergei Larin009cf9e2012-11-15 17:45:50 +0000904 MapVector<const Value *, std::vector<SUnit *> >::iterator J =
Hal Finkelf2183102012-12-10 18:49:16 +0000905 ((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
Sergei Larin009cf9e2012-11-15 17:45:50 +0000906 MapVector<const Value *, std::vector<SUnit *> >::iterator JE =
Hal Finkelf2183102012-12-10 18:49:16 +0000907 ((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
David Goodwin980d4942009-11-09 19:22:17 +0000908 if (J != JE) {
Dan Gohman6a9041e2008-12-04 01:35:46 +0000909 for (unsigned i = 0, e = J->second.size(); i != e; ++i)
Hal Finkel738073c2013-08-29 03:25:05 +0000910 addChainDependency(AAForDep, MFI, SU, J->second[i], RejectMemNodes,
Andrew Trickeb05b972012-05-15 18:59:41 +0000911 TrueMemOrderLatency, true);
Dan Gohman6a9041e2008-12-04 01:35:46 +0000912 J->second.clear();
913 }
David Goodwin7c9b1ac2009-11-02 17:06:28 +0000914 }
Hal Finkelf2183102012-12-10 18:49:16 +0000915 if (MayAlias) {
916 // Add dependencies from all the PendingLoads, i.e. loads
917 // with no underlying object.
918 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
Hal Finkel738073c2013-08-29 03:25:05 +0000919 addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes,
Hal Finkelf2183102012-12-10 18:49:16 +0000920 TrueMemOrderLatency);
921 // Add dependence on alias chain, if needed.
922 if (AliasChain)
Hal Finkel738073c2013-08-29 03:25:05 +0000923 addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes);
Hal Finkelf2183102012-12-10 18:49:16 +0000924 // But we also should check dependent instructions for the
925 // SU in question.
926 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes,
927 TrueMemOrderLatency);
928 }
929 // Add dependence on barrier chain, if needed.
930 // There is no point to check aliasing on barrier event. Even if
931 // SU and barrier _could_ be reordered, they should not. In addition,
932 // we have lost all RejectMemNodes below barrier.
933 if (BarrierChain)
934 BarrierChain->addPred(SDep(SU, SDep::Barrier));
Evan Chengec6906b2010-10-23 02:10:46 +0000935
936 if (!ExitSU.isPred(SU))
937 // Push store's up a bit to avoid them getting in between cmp
938 // and branches.
Andrew Tricka78d3222012-11-06 03:13:46 +0000939 ExitSU.addPred(SDep(SU, SDep::Artificial));
Evan Cheng5a96b3d2011-12-07 07:15:52 +0000940 } else if (MI->mayLoad()) {
David Goodwina9e61072009-11-03 20:15:00 +0000941 bool MayAlias = true;
Dan Gohmana70dca12009-10-09 23:27:56 +0000942 if (MI->isInvariantLoad(AA)) {
Dan Gohman6a9041e2008-12-04 01:35:46 +0000943 // Invariant load, no chain dependencies needed!
David Goodwin5be870a2009-11-05 00:16:44 +0000944 } else {
Benjamin Kramer04d56132013-06-29 18:41:17 +0000945 UnderlyingObjectsVector Objs;
Hal Finkelf2183102012-12-10 18:49:16 +0000946 getUnderlyingObjectsForInstr(MI, MFI, Objs);
947
948 if (Objs.empty()) {
David Goodwin980d4942009-11-09 19:22:17 +0000949 // A load with no underlying object. Depend on all
950 // potentially aliasing stores.
Sergei Larin009cf9e2012-11-15 17:45:50 +0000951 for (MapVector<const Value *, SUnit *>::iterator I =
David Goodwin980d4942009-11-09 19:22:17 +0000952 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
Hal Finkel738073c2013-08-29 03:25:05 +0000953 addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes);
Andrew Trickf405b1a2011-05-05 19:24:06 +0000954
David Goodwin980d4942009-11-09 19:22:17 +0000955 PendingLoads.push_back(SU);
956 MayAlias = true;
Hal Finkelf2183102012-12-10 18:49:16 +0000957 } else {
958 MayAlias = false;
959 }
960
Benjamin Kramer04d56132013-06-29 18:41:17 +0000961 for (UnderlyingObjectsVector::iterator
Hal Finkelf2183102012-12-10 18:49:16 +0000962 J = Objs.begin(), JE = Objs.end(); J != JE; ++J) {
Benjamin Kramer04d56132013-06-29 18:41:17 +0000963 const Value *V = J->getPointer();
964 bool ThisMayAlias = J->getInt();
Hal Finkelf2183102012-12-10 18:49:16 +0000965
966 if (ThisMayAlias)
967 MayAlias = true;
968
969 // A load from a specific PseudoSourceValue. Add precise dependencies.
970 MapVector<const Value *, SUnit *>::iterator I =
971 ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
972 MapVector<const Value *, SUnit *>::iterator IE =
973 ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
974 if (I != IE)
Hal Finkel738073c2013-08-29 03:25:05 +0000975 addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes,
976 0, true);
Hal Finkelf2183102012-12-10 18:49:16 +0000977 if (ThisMayAlias)
978 AliasMemUses[V].push_back(SU);
979 else
980 NonAliasMemUses[V].push_back(SU);
David Goodwina9e61072009-11-03 20:15:00 +0000981 }
Andrew Trickeb05b972012-05-15 18:59:41 +0000982 if (MayAlias)
Andrew Trick1c2d3c52012-06-13 02:39:03 +0000983 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, /*Latency=*/0);
David Goodwin980d4942009-11-09 19:22:17 +0000984 // Add dependencies on alias and barrier chains, if needed.
985 if (MayAlias && AliasChain)
Hal Finkel738073c2013-08-29 03:25:05 +0000986 addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes);
David Goodwin980d4942009-11-09 19:22:17 +0000987 if (BarrierChain)
Andrew Tricka78d3222012-11-06 03:13:46 +0000988 BarrierChain->addPred(SDep(SU, SDep::Barrier));
Andrew Trickf405b1a2011-05-05 19:24:06 +0000989 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000990 }
Dan Gohman343f0c02008-11-19 23:18:57 +0000991 }
Andrew Trick657b75b2012-12-01 01:22:49 +0000992 if (DbgMI)
993 FirstDbgValue = DbgMI;
Dan Gohman79ce2762009-01-15 19:20:50 +0000994
Andrew Trick81a682a2012-02-23 01:52:38 +0000995 Defs.clear();
996 Uses.clear();
Andrew Trick3c58ba82012-01-14 02:17:18 +0000997 VRegDefs.clear();
Dan Gohman79ce2762009-01-15 19:20:50 +0000998 PendingLoads.clear();
Dan Gohman343f0c02008-11-19 23:18:57 +0000999}
1000
Dan Gohman343f0c02008-11-19 23:18:57 +00001001void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const {
Manman Renb720be62012-09-11 22:23:19 +00001002#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Dan Gohman343f0c02008-11-19 23:18:57 +00001003 SU->getInstr()->dump();
Manman Ren77e300e2012-09-06 19:06:06 +00001004#endif
Dan Gohman343f0c02008-11-19 23:18:57 +00001005}
1006
1007std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const {
1008 std::string s;
1009 raw_string_ostream oss(s);
Dan Gohman9e64bbb2009-02-10 23:27:53 +00001010 if (SU == &EntrySU)
1011 oss << "<entry>";
1012 else if (SU == &ExitSU)
1013 oss << "<exit>";
1014 else
Andrew Trickc6ada8e2013-01-25 07:45:25 +00001015 SU->getInstr()->print(oss, &TM, /*SkipOpers=*/true);
Dan Gohman343f0c02008-11-19 23:18:57 +00001016 return oss.str();
1017}
1018
Andrew Trick56b94c52012-03-07 00:18:22 +00001019/// Return the basic block label. It is not necessarilly unique because a block
1020/// contains multiple scheduling regions. But it is fine for visualization.
1021std::string ScheduleDAGInstrs::getDAGName() const {
1022 return "dag." + BB->getFullName();
1023}
Andrew Trick1e94e982012-10-15 18:02:27 +00001024
Andrew Trick8b1496c2012-11-28 05:13:28 +00001025//===----------------------------------------------------------------------===//
1026// SchedDFSResult Implementation
1027//===----------------------------------------------------------------------===//
1028
1029namespace llvm {
1030/// \brief Internal state used to compute SchedDFSResult.
1031class SchedDFSImpl {
1032 SchedDFSResult &R;
1033
1034 /// Join DAG nodes into equivalence classes by their subtree.
1035 IntEqClasses SubtreeClasses;
1036 /// List PredSU, SuccSU pairs that represent data edges between subtrees.
1037 std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs;
1038
Andrew Trick988d06b2013-01-25 06:52:27 +00001039 struct RootData {
1040 unsigned NodeID;
1041 unsigned ParentNodeID; // Parent node (member of the parent subtree).
1042 unsigned SubInstrCount; // Instr count in this tree only, not children.
1043
1044 RootData(unsigned id): NodeID(id),
1045 ParentNodeID(SchedDFSResult::InvalidSubtreeID),
1046 SubInstrCount(0) {}
1047
1048 unsigned getSparseSetIndex() const { return NodeID; }
1049 };
1050
1051 SparseSet<RootData> RootSet;
1052
Andrew Trick8b1496c2012-11-28 05:13:28 +00001053public:
Andrew Trick988d06b2013-01-25 06:52:27 +00001054 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) {
1055 RootSet.setUniverse(R.DFSNodeData.size());
1056 }
Andrew Trick8b1496c2012-11-28 05:13:28 +00001057
Andrew Trickbfb82232013-01-25 06:02:44 +00001058 /// Return true if this node been visited by the DFS traversal.
1059 ///
1060 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node
1061 /// ID. Later, SubtreeID is updated but remains valid.
Andrew Trick8b1496c2012-11-28 05:13:28 +00001062 bool isVisited(const SUnit *SU) const {
Andrew Trick988d06b2013-01-25 06:52:27 +00001063 return R.DFSNodeData[SU->NodeNum].SubtreeID
1064 != SchedDFSResult::InvalidSubtreeID;
Andrew Trick8b1496c2012-11-28 05:13:28 +00001065 }
1066
1067 /// Initialize this node's instruction count. We don't need to flag the node
1068 /// visited until visitPostorder because the DAG cannot have cycles.
1069 void visitPreorder(const SUnit *SU) {
Andrew Trick988d06b2013-01-25 06:52:27 +00001070 R.DFSNodeData[SU->NodeNum].InstrCount =
1071 SU->getInstr()->isTransient() ? 0 : 1;
Andrew Trickbfb82232013-01-25 06:02:44 +00001072 }
1073
1074 /// Called once for each node after all predecessors are visited. Revisit this
1075 /// node's predecessors and potentially join them now that we know the ILP of
1076 /// the other predecessors.
1077 void visitPostorderNode(const SUnit *SU) {
1078 // Mark this node as the root of a subtree. It may be joined with its
1079 // successors later.
Andrew Trick988d06b2013-01-25 06:52:27 +00001080 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum;
1081 RootData RData(SU->NodeNum);
1082 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1;
Andrew Trick8b1496c2012-11-28 05:13:28 +00001083
Andrew Trickbfb82232013-01-25 06:02:44 +00001084 // If any predecessors are still in their own subtree, they either cannot be
1085 // joined or are large enough to remain separate. If this parent node's
1086 // total instruction count is not greater than a child subtree by at least
1087 // the subtree limit, then try to join it now since splitting subtrees is
1088 // only useful if multiple high-pressure paths are possible.
Andrew Trick988d06b2013-01-25 06:52:27 +00001089 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount;
Andrew Trickbfb82232013-01-25 06:02:44 +00001090 for (SUnit::const_pred_iterator
1091 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1092 if (PI->getKind() != SDep::Data)
1093 continue;
1094 unsigned PredNum = PI->getSUnit()->NodeNum;
Andrew Trick988d06b2013-01-25 06:52:27 +00001095 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit)
Andrew Trickbfb82232013-01-25 06:02:44 +00001096 joinPredSubtree(*PI, SU, /*CheckLimit=*/false);
Andrew Trick988d06b2013-01-25 06:52:27 +00001097
1098 // Either link or merge the TreeData entry from the child to the parent.
Andrew Tricka5a73ad2013-01-25 06:52:30 +00001099 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) {
1100 // If the predecessor's parent is invalid, this is a tree edge and the
1101 // current node is the parent.
1102 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID)
1103 RootSet[PredNum].ParentNodeID = SU->NodeNum;
1104 }
1105 else if (RootSet.count(PredNum)) {
1106 // The predecessor is not a root, but is still in the root set. This
1107 // must be the new parent that it was just joined to. Note that
1108 // RootSet[PredNum].ParentNodeID may either be invalid or may still be
1109 // set to the original parent.
Andrew Trick988d06b2013-01-25 06:52:27 +00001110 RData.SubInstrCount += RootSet[PredNum].SubInstrCount;
1111 RootSet.erase(PredNum);
1112 }
Andrew Trickbfb82232013-01-25 06:02:44 +00001113 }
Andrew Trick988d06b2013-01-25 06:52:27 +00001114 RootSet[SU->NodeNum] = RData;
1115 }
1116
1117 /// Called once for each tree edge after calling visitPostOrderNode on the
1118 /// predecessor. Increment the parent node's instruction count and
1119 /// preemptively join this subtree to its parent's if it is small enough.
1120 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) {
1121 R.DFSNodeData[Succ->NodeNum].InstrCount
1122 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount;
1123 joinPredSubtree(PredDep, Succ);
Andrew Trick8b1496c2012-11-28 05:13:28 +00001124 }
1125
Andrew Trickbfb82232013-01-25 06:02:44 +00001126 /// Add a connection for cross edges.
1127 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) {
Andrew Trick8b1496c2012-11-28 05:13:28 +00001128 ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ));
1129 }
1130
1131 /// Set each node's subtree ID to the representative ID and record connections
1132 /// between trees.
1133 void finalize() {
1134 SubtreeClasses.compress();
Andrew Trick988d06b2013-01-25 06:52:27 +00001135 R.DFSTreeData.resize(SubtreeClasses.getNumClasses());
1136 assert(SubtreeClasses.getNumClasses() == RootSet.size()
1137 && "number of roots should match trees");
1138 for (SparseSet<RootData>::const_iterator
1139 RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) {
1140 unsigned TreeID = SubtreeClasses[RI->NodeID];
1141 if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID)
1142 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID];
1143 R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount;
Andrew Tricka5a73ad2013-01-25 06:52:30 +00001144 // Note that SubInstrCount may be greater than InstrCount if we joined
1145 // subtrees across a cross edge. InstrCount will be attributed to the
1146 // original parent, while SubInstrCount will be attributed to the joined
1147 // parent.
Andrew Trick988d06b2013-01-25 06:52:27 +00001148 }
Andrew Trick8b1496c2012-11-28 05:13:28 +00001149 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses());
1150 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses());
1151 DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n");
Andrew Trick988d06b2013-01-25 06:52:27 +00001152 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) {
1153 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx];
Andrew Trick8b1496c2012-11-28 05:13:28 +00001154 DEBUG(dbgs() << " SU(" << Idx << ") in tree "
Andrew Trick988d06b2013-01-25 06:52:27 +00001155 << R.DFSNodeData[Idx].SubtreeID << '\n');
Andrew Trick8b1496c2012-11-28 05:13:28 +00001156 }
1157 for (std::vector<std::pair<const SUnit*, const SUnit*> >::const_iterator
1158 I = ConnectionPairs.begin(), E = ConnectionPairs.end();
1159 I != E; ++I) {
1160 unsigned PredTree = SubtreeClasses[I->first->NodeNum];
1161 unsigned SuccTree = SubtreeClasses[I->second->NodeNum];
1162 if (PredTree == SuccTree)
1163 continue;
1164 unsigned Depth = I->first->getDepth();
1165 addConnection(PredTree, SuccTree, Depth);
1166 addConnection(SuccTree, PredTree, Depth);
1167 }
1168 }
1169
1170protected:
Andrew Trickbfb82232013-01-25 06:02:44 +00001171 /// Join the predecessor subtree with the successor that is its DFS
1172 /// parent. Apply some heuristics before joining.
1173 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ,
1174 bool CheckLimit = true) {
1175 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges");
1176
1177 // Check if the predecessor is already joined.
1178 const SUnit *PredSU = PredDep.getSUnit();
1179 unsigned PredNum = PredSU->NodeNum;
Andrew Trick988d06b2013-01-25 06:52:27 +00001180 if (R.DFSNodeData[PredNum].SubtreeID != PredNum)
Andrew Trickbfb82232013-01-25 06:02:44 +00001181 return false;
Andrew Trickb12a7712013-01-25 00:12:57 +00001182
1183 // Four is the magic number of successors before a node is considered a
1184 // pinch point.
1185 unsigned NumDataSucs = 0;
Andrew Trickb12a7712013-01-25 00:12:57 +00001186 for (SUnit::const_succ_iterator SI = PredSU->Succs.begin(),
1187 SE = PredSU->Succs.end(); SI != SE; ++SI) {
1188 if (SI->getKind() == SDep::Data) {
1189 if (++NumDataSucs >= 4)
Andrew Trickbfb82232013-01-25 06:02:44 +00001190 return false;
Andrew Trickb12a7712013-01-25 00:12:57 +00001191 }
1192 }
Andrew Trick988d06b2013-01-25 06:52:27 +00001193 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit)
Andrew Trickbfb82232013-01-25 06:02:44 +00001194 return false;
Andrew Trick988d06b2013-01-25 06:52:27 +00001195 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum;
Andrew Trickbfb82232013-01-25 06:02:44 +00001196 SubtreeClasses.join(Succ->NodeNum, PredNum);
1197 return true;
Andrew Trickb12a7712013-01-25 00:12:57 +00001198 }
1199
Andrew Trick8b1496c2012-11-28 05:13:28 +00001200 /// Called by finalize() to record a connection between trees.
1201 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) {
1202 if (!Depth)
1203 return;
1204
Andrew Trick988d06b2013-01-25 06:52:27 +00001205 do {
1206 SmallVectorImpl<SchedDFSResult::Connection> &Connections =
1207 R.SubtreeConnections[FromTree];
1208 for (SmallVectorImpl<SchedDFSResult::Connection>::iterator
1209 I = Connections.begin(), E = Connections.end(); I != E; ++I) {
1210 if (I->TreeID == ToTree) {
1211 I->Level = std::max(I->Level, Depth);
1212 return;
1213 }
Andrew Trick8b1496c2012-11-28 05:13:28 +00001214 }
Andrew Trick988d06b2013-01-25 06:52:27 +00001215 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth));
1216 FromTree = R.DFSTreeData[FromTree].ParentTreeID;
1217 } while (FromTree != SchedDFSResult::InvalidSubtreeID);
Andrew Trick8b1496c2012-11-28 05:13:28 +00001218 }
1219};
1220} // namespace llvm
1221
Andrew Trick1e94e982012-10-15 18:02:27 +00001222namespace {
1223/// \brief Manage the stack used by a reverse depth-first search over the DAG.
1224class SchedDAGReverseDFS {
1225 std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack;
1226public:
1227 bool isComplete() const { return DFSStack.empty(); }
1228
1229 void follow(const SUnit *SU) {
1230 DFSStack.push_back(std::make_pair(SU, SU->Preds.begin()));
1231 }
1232 void advance() { ++DFSStack.back().second; }
1233
Andrew Trick8b1496c2012-11-28 05:13:28 +00001234 const SDep *backtrack() {
1235 DFSStack.pop_back();
1236 return DFSStack.empty() ? 0 : llvm::prior(DFSStack.back().second);
1237 }
Andrew Trick1e94e982012-10-15 18:02:27 +00001238
1239 const SUnit *getCurr() const { return DFSStack.back().first; }
1240
1241 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; }
1242
1243 SUnit::const_pred_iterator getPredEnd() const {
1244 return getCurr()->Preds.end();
1245 }
1246};
1247} // anonymous
1248
Andrew Trickbfb82232013-01-25 06:02:44 +00001249static bool hasDataSucc(const SUnit *SU) {
1250 for (SUnit::const_succ_iterator
1251 SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) {
Andrew Tricka5a73ad2013-01-25 06:52:30 +00001252 if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode())
Andrew Trickbfb82232013-01-25 06:02:44 +00001253 return true;
1254 }
1255 return false;
1256}
1257
Andrew Trick1e94e982012-10-15 18:02:27 +00001258/// Compute an ILP metric for all nodes in the subDAG reachable via depth-first
1259/// search from this root.
Andrew Trick4e1fb182013-01-25 06:33:57 +00001260void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) {
Andrew Trick1e94e982012-10-15 18:02:27 +00001261 if (!IsBottomUp)
1262 llvm_unreachable("Top-down ILP metric is unimplemnted");
1263
Andrew Trick8b1496c2012-11-28 05:13:28 +00001264 SchedDFSImpl Impl(*this);
Andrew Trick4e1fb182013-01-25 06:33:57 +00001265 for (ArrayRef<SUnit>::const_iterator
1266 SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) {
1267 const SUnit *SU = &*SI;
1268 if (Impl.isVisited(SU) || hasDataSucc(SU))
1269 continue;
1270
Andrew Trick8b1496c2012-11-28 05:13:28 +00001271 SchedDAGReverseDFS DFS;
Andrew Trick4e1fb182013-01-25 06:33:57 +00001272 Impl.visitPreorder(SU);
1273 DFS.follow(SU);
Andrew Trick8b1496c2012-11-28 05:13:28 +00001274 for (;;) {
1275 // Traverse the leftmost path as far as possible.
1276 while (DFS.getPred() != DFS.getPredEnd()) {
1277 const SDep &PredDep = *DFS.getPred();
1278 DFS.advance();
Andrew Trickbfb82232013-01-25 06:02:44 +00001279 // Ignore non-data edges.
Andrew Tricka5a73ad2013-01-25 06:52:30 +00001280 if (PredDep.getKind() != SDep::Data
1281 || PredDep.getSUnit()->isBoundaryNode()) {
Andrew Trickbfb82232013-01-25 06:02:44 +00001282 continue;
Andrew Tricka5a73ad2013-01-25 06:52:30 +00001283 }
Andrew Trickbfb82232013-01-25 06:02:44 +00001284 // An already visited edge is a cross edge, assuming an acyclic DAG.
Andrew Trick8b1496c2012-11-28 05:13:28 +00001285 if (Impl.isVisited(PredDep.getSUnit())) {
Andrew Trickbfb82232013-01-25 06:02:44 +00001286 Impl.visitCrossEdge(PredDep, DFS.getCurr());
Andrew Trick8b1496c2012-11-28 05:13:28 +00001287 continue;
1288 }
1289 Impl.visitPreorder(PredDep.getSUnit());
1290 DFS.follow(PredDep.getSUnit());
1291 }
1292 // Visit the top of the stack in postorder and backtrack.
1293 const SUnit *Child = DFS.getCurr();
1294 const SDep *PredDep = DFS.backtrack();
Andrew Trickbfb82232013-01-25 06:02:44 +00001295 Impl.visitPostorderNode(Child);
1296 if (PredDep)
1297 Impl.visitPostorderEdge(*PredDep, DFS.getCurr());
Andrew Trick8b1496c2012-11-28 05:13:28 +00001298 if (DFS.isComplete())
1299 break;
Andrew Trick1e94e982012-10-15 18:02:27 +00001300 }
Andrew Trick8b1496c2012-11-28 05:13:28 +00001301 }
1302 Impl.finalize();
1303}
1304
1305/// The root of the given SubtreeID was just scheduled. For all subtrees
1306/// connected to this tree, record the depth of the connection so that the
1307/// nearest connected subtrees can be prioritized.
1308void SchedDFSResult::scheduleTree(unsigned SubtreeID) {
1309 for (SmallVectorImpl<Connection>::const_iterator
1310 I = SubtreeConnections[SubtreeID].begin(),
1311 E = SubtreeConnections[SubtreeID].end(); I != E; ++I) {
1312 SubtreeConnectLevels[I->TreeID] =
1313 std::max(SubtreeConnectLevels[I->TreeID], I->Level);
1314 DEBUG(dbgs() << " Tree: " << I->TreeID
1315 << " @" << SubtreeConnectLevels[I->TreeID] << '\n');
Andrew Trick1e94e982012-10-15 18:02:27 +00001316 }
1317}
1318
1319#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1320void ILPValue::print(raw_ostream &OS) const {
Andrew Trick8b1496c2012-11-28 05:13:28 +00001321 OS << InstrCount << " / " << Length << " = ";
1322 if (!Length)
Andrew Trick1e94e982012-10-15 18:02:27 +00001323 OS << "BADILP";
Andrew Trick8b1496c2012-11-28 05:13:28 +00001324 else
1325 OS << format("%g", ((double)InstrCount / Length));
Andrew Trick1e94e982012-10-15 18:02:27 +00001326}
1327
1328void ILPValue::dump() const {
1329 dbgs() << *this << '\n';
1330}
1331
1332namespace llvm {
1333
1334raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) {
1335 Val.print(OS);
1336 return OS;
1337}
1338
1339} // namespace llvm
1340#endif // !NDEBUG || LLVM_ENABLE_DUMP