blob: 3984a15861def78542c45d4f4d611d3b757c9fd0 [file] [log] [blame]
Andrew Trick6a50baa2012-05-17 22:37:09 +00001//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
Andrew Tricke77e84e2012-01-13 06:30:30 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
Andrew Trick02a80da2012-03-08 01:41:12 +000015#include "llvm/CodeGen/MachineScheduler.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "llvm/ADT/PriorityQueue.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Jakub Staszakdf17ddd2013-03-10 13:11:23 +000019#include "llvm/CodeGen/MachineDominators.h"
20#include "llvm/CodeGen/MachineLoopInfo.h"
Andrew Trick736dd9a2013-06-21 18:32:58 +000021#include "llvm/CodeGen/MachineRegisterInfo.h"
Andrew Tricke77e84e2012-01-13 06:30:30 +000022#include "llvm/CodeGen/Passes.h"
Andrew Trick05ff4662012-06-06 20:29:31 +000023#include "llvm/CodeGen/RegisterClassInfo.h"
Andrew Trickcd1c2f92012-11-28 05:13:24 +000024#include "llvm/CodeGen/ScheduleDFS.h"
Andrew Trick61f1a272012-05-24 22:11:09 +000025#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000026#include "llvm/CodeGen/TargetPassConfig.h"
Andrew Tricke77e84e2012-01-13 06:30:30 +000027#include "llvm/Support/CommandLine.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/ErrorHandling.h"
Andrew Trickea9fd952013-01-25 07:45:29 +000030#include "llvm/Support/GraphWriter.h"
Andrew Tricke77e84e2012-01-13 06:30:30 +000031#include "llvm/Support/raw_ostream.h"
Jakub Staszak80df8b82013-06-14 00:00:13 +000032#include "llvm/Target/TargetInstrInfo.h"
Andrew Trick7ccdc5c2012-01-17 06:55:07 +000033
Andrew Tricke77e84e2012-01-13 06:30:30 +000034using namespace llvm;
35
Chandler Carruth1b9dde02014-04-22 02:02:50 +000036#define DEBUG_TYPE "misched"
37
Andrew Trick7a8e1002012-09-11 00:39:15 +000038namespace llvm {
39cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40 cl::desc("Force top-down list scheduling"));
41cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42 cl::desc("Force bottom-up list scheduling"));
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +000043cl::opt<bool>
44DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45 cl::desc("Print critical path length to stdout"));
Andrew Trick7a8e1002012-09-11 00:39:15 +000046}
Andrew Trick8823dec2012-03-14 04:00:41 +000047
Andrew Tricka5f19562012-03-07 00:18:25 +000048#ifndef NDEBUG
49static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50 cl::desc("Pop up a window to show MISched dags after they are processed"));
Lang Hamesdd98c492012-03-19 18:38:38 +000051
Matthias Braund78ee542015-09-17 21:09:59 +000052/// In some situations a few uninteresting nodes depend on nearly all other
53/// nodes in the graph, provide a cutoff to hide them.
54static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56
Lang Hamesdd98c492012-03-19 18:38:38 +000057static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
Andrew Trick33e05d72013-12-28 21:57:02 +000059
60static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61 cl::desc("Only schedule this function"));
62static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63 cl::desc("Only schedule this MBB#"));
Andrew Tricka5f19562012-03-07 00:18:25 +000064#else
65static bool ViewMISchedDAGs = false;
66#endif // NDEBUG
67
Matthias Braun6493bc22016-04-22 19:09:17 +000068/// Avoid quadratic complexity in unusually large basic blocks by limiting the
69/// size of the ready lists.
70static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
71 cl::desc("Limit ready list to N instructions"), cl::init(256));
72
Andrew Trickb6e74712013-09-04 20:59:59 +000073static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
74 cl::desc("Enable register pressure scheduling."), cl::init(true));
75
Andrew Trickc01b0042013-08-23 17:48:43 +000076static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
Andrew Trick6c88b352013-09-09 23:31:14 +000077 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
Andrew Trickc01b0042013-08-23 17:48:43 +000078
Jun Bum Lim4c5bd582016-04-15 14:58:38 +000079static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
80 cl::desc("Enable memop clustering."),
81 cl::init(true));
Andrew Tricka7714a02012-11-12 19:40:10 +000082
Andrew Trick263280242012-11-12 19:52:20 +000083// Experimental heuristics
84static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
Andrew Trick108c88c2012-11-13 08:47:29 +000085 cl::desc("Enable scheduling for macro fusion."), cl::init(true));
Andrew Trick263280242012-11-12 19:52:20 +000086
Andrew Trick48f2a722013-03-08 05:40:34 +000087static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
88 cl::desc("Verify machine instrs before and after machine scheduling"));
89
Andrew Trick44f750a2013-01-25 04:01:04 +000090// DAG subtrees must have at least this many nodes.
91static const unsigned MinSubtreeSize = 8;
92
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000093// Pin the vtables to this file.
94void MachineSchedStrategy::anchor() {}
95void ScheduleDAGMutation::anchor() {}
96
Andrew Trick63440872012-01-14 02:17:06 +000097//===----------------------------------------------------------------------===//
98// Machine Instruction Scheduling Pass and Registry
99//===----------------------------------------------------------------------===//
100
Andrew Trick4d4b5462012-04-24 20:36:19 +0000101MachineSchedContext::MachineSchedContext():
Craig Topperc0196b12014-04-14 00:51:57 +0000102 MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
Andrew Trick4d4b5462012-04-24 20:36:19 +0000103 RegClassInfo = new RegisterClassInfo();
104}
105
106MachineSchedContext::~MachineSchedContext() {
107 delete RegClassInfo;
108}
109
Andrew Tricke77e84e2012-01-13 06:30:30 +0000110namespace {
Andrew Trickd7f890e2013-12-28 21:56:47 +0000111/// Base class for a machine scheduler class that can run at any point.
112class MachineSchedulerBase : public MachineSchedContext,
113 public MachineFunctionPass {
114public:
115 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
116
Craig Topperc0196b12014-04-14 00:51:57 +0000117 void print(raw_ostream &O, const Module* = nullptr) const override;
Andrew Trickd7f890e2013-12-28 21:56:47 +0000118
119protected:
Matthias Braun93563e72015-11-03 01:53:29 +0000120 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000121};
122
Andrew Tricke1c034f2012-01-17 06:55:03 +0000123/// MachineScheduler runs after coalescing and before register allocation.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000124class MachineScheduler : public MachineSchedulerBase {
Andrew Tricke77e84e2012-01-13 06:30:30 +0000125public:
Andrew Tricke1c034f2012-01-17 06:55:03 +0000126 MachineScheduler();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000127
Craig Topper4584cd52014-03-07 09:26:03 +0000128 void getAnalysisUsage(AnalysisUsage &AU) const override;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000129
Craig Topper4584cd52014-03-07 09:26:03 +0000130 bool runOnMachineFunction(MachineFunction&) override;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000131
Andrew Tricke77e84e2012-01-13 06:30:30 +0000132 static char ID; // Class identification, replacement for typeinfo
Andrew Trick978674b2013-09-20 05:14:41 +0000133
134protected:
135 ScheduleDAGInstrs *createMachineScheduler();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000136};
Andrew Trick17080b92013-12-28 21:56:51 +0000137
138/// PostMachineScheduler runs after shortly before code emission.
139class PostMachineScheduler : public MachineSchedulerBase {
140public:
141 PostMachineScheduler();
142
Craig Topper4584cd52014-03-07 09:26:03 +0000143 void getAnalysisUsage(AnalysisUsage &AU) const override;
Andrew Trick17080b92013-12-28 21:56:51 +0000144
Craig Topper4584cd52014-03-07 09:26:03 +0000145 bool runOnMachineFunction(MachineFunction&) override;
Andrew Trick17080b92013-12-28 21:56:51 +0000146
147 static char ID; // Class identification, replacement for typeinfo
148
149protected:
150 ScheduleDAGInstrs *createPostMachineScheduler();
151};
Andrew Tricke77e84e2012-01-13 06:30:30 +0000152} // namespace
153
Andrew Tricke1c034f2012-01-17 06:55:03 +0000154char MachineScheduler::ID = 0;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000155
Andrew Tricke1c034f2012-01-17 06:55:03 +0000156char &llvm::MachineSchedulerID = MachineScheduler::ID;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000157
Akira Hatanaka7ba78302014-12-13 04:52:04 +0000158INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
Andrew Tricke77e84e2012-01-13 06:30:30 +0000159 "Machine Instruction Scheduler", false, false)
Chandler Carruth7b560d42015-09-09 17:55:00 +0000160INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Andrew Tricke77e84e2012-01-13 06:30:30 +0000161INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
162INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
Akira Hatanaka7ba78302014-12-13 04:52:04 +0000163INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
Andrew Tricke77e84e2012-01-13 06:30:30 +0000164 "Machine Instruction Scheduler", false, false)
165
Andrew Tricke1c034f2012-01-17 06:55:03 +0000166MachineScheduler::MachineScheduler()
Andrew Trickd7f890e2013-12-28 21:56:47 +0000167: MachineSchedulerBase(ID) {
Andrew Tricke1c034f2012-01-17 06:55:03 +0000168 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
Andrew Tricke77e84e2012-01-13 06:30:30 +0000169}
170
Andrew Tricke1c034f2012-01-17 06:55:03 +0000171void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
Andrew Tricke77e84e2012-01-13 06:30:30 +0000172 AU.setPreservesCFG();
173 AU.addRequiredID(MachineDominatorsID);
174 AU.addRequired<MachineLoopInfo>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000175 AU.addRequired<AAResultsWrapperPass>();
Andrew Trick45300682012-03-09 00:52:20 +0000176 AU.addRequired<TargetPassConfig>();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000177 AU.addRequired<SlotIndexes>();
178 AU.addPreserved<SlotIndexes>();
179 AU.addRequired<LiveIntervals>();
180 AU.addPreserved<LiveIntervals>();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000181 MachineFunctionPass::getAnalysisUsage(AU);
182}
183
Andrew Trick17080b92013-12-28 21:56:51 +0000184char PostMachineScheduler::ID = 0;
185
186char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
187
188INITIALIZE_PASS(PostMachineScheduler, "postmisched",
Saleem Abdulrasool7230b372013-12-28 22:47:55 +0000189 "PostRA Machine Instruction Scheduler", false, false)
Andrew Trick17080b92013-12-28 21:56:51 +0000190
191PostMachineScheduler::PostMachineScheduler()
192: MachineSchedulerBase(ID) {
193 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
194}
195
196void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
197 AU.setPreservesCFG();
198 AU.addRequiredID(MachineDominatorsID);
199 AU.addRequired<MachineLoopInfo>();
200 AU.addRequired<TargetPassConfig>();
201 MachineFunctionPass::getAnalysisUsage(AU);
202}
203
Andrew Tricke77e84e2012-01-13 06:30:30 +0000204MachinePassRegistry MachineSchedRegistry::Registry;
205
Andrew Trick45300682012-03-09 00:52:20 +0000206/// A dummy default scheduler factory indicates whether the scheduler
207/// is overridden on the command line.
208static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
Craig Topperc0196b12014-04-14 00:51:57 +0000209 return nullptr;
Andrew Trick45300682012-03-09 00:52:20 +0000210}
Andrew Tricke77e84e2012-01-13 06:30:30 +0000211
212/// MachineSchedOpt allows command line selection of the scheduler.
213static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
214 RegisterPassParser<MachineSchedRegistry> >
215MachineSchedOpt("misched",
Andrew Trick45300682012-03-09 00:52:20 +0000216 cl::init(&useDefaultMachineSched), cl::Hidden,
Andrew Tricke77e84e2012-01-13 06:30:30 +0000217 cl::desc("Machine instruction scheduler to use"));
218
Andrew Trick45300682012-03-09 00:52:20 +0000219static MachineSchedRegistry
Andrew Trick8823dec2012-03-14 04:00:41 +0000220DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
Andrew Trick45300682012-03-09 00:52:20 +0000221 useDefaultMachineSched);
222
Eric Christopher5f141b02015-03-11 22:56:10 +0000223static cl::opt<bool> EnableMachineSched(
224 "enable-misched",
225 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
226 cl::Hidden);
227
Chad Rosier816a1ab2016-01-20 23:08:32 +0000228static cl::opt<bool> EnablePostRAMachineSched(
229 "enable-post-misched",
230 cl::desc("Enable the post-ra machine instruction scheduling pass."),
231 cl::init(true), cl::Hidden);
232
Andrew Trick8823dec2012-03-14 04:00:41 +0000233/// Forward declare the standard machine scheduler. This will be used as the
Andrew Trick45300682012-03-09 00:52:20 +0000234/// default scheduler if the target does not set a default.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000235static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
236static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
Andrew Trickcc45a282012-04-24 18:04:34 +0000237
238/// Decrement this iterator until reaching the top or a non-debug instr.
Andrew Trick2bc74c22013-08-30 04:36:57 +0000239static MachineBasicBlock::const_iterator
240priorNonDebug(MachineBasicBlock::const_iterator I,
241 MachineBasicBlock::const_iterator Beg) {
Andrew Trickcc45a282012-04-24 18:04:34 +0000242 assert(I != Beg && "reached the top of the region, cannot decrement");
243 while (--I != Beg) {
244 if (!I->isDebugValue())
245 break;
246 }
247 return I;
248}
249
Andrew Trick2bc74c22013-08-30 04:36:57 +0000250/// Non-const version.
251static MachineBasicBlock::iterator
252priorNonDebug(MachineBasicBlock::iterator I,
253 MachineBasicBlock::const_iterator Beg) {
Duncan P. N. Exon Smithdcbce9c2016-08-16 23:34:07 +0000254 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
255 .getNonConstIterator();
Andrew Trick2bc74c22013-08-30 04:36:57 +0000256}
257
Andrew Trickcc45a282012-04-24 18:04:34 +0000258/// If this iterator is a debug value, increment until reaching the End or a
259/// non-debug instruction.
Andrew Trick2c4f8b72013-08-31 05:17:58 +0000260static MachineBasicBlock::const_iterator
261nextIfDebug(MachineBasicBlock::const_iterator I,
262 MachineBasicBlock::const_iterator End) {
Andrew Trick463b2f12012-05-17 18:35:03 +0000263 for(; I != End; ++I) {
Andrew Trickcc45a282012-04-24 18:04:34 +0000264 if (!I->isDebugValue())
265 break;
266 }
267 return I;
268}
269
Andrew Trick2c4f8b72013-08-31 05:17:58 +0000270/// Non-const version.
271static MachineBasicBlock::iterator
272nextIfDebug(MachineBasicBlock::iterator I,
273 MachineBasicBlock::const_iterator End) {
Duncan P. N. Exon Smithdcbce9c2016-08-16 23:34:07 +0000274 return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
275 .getNonConstIterator();
Andrew Trick2c4f8b72013-08-31 05:17:58 +0000276}
277
Andrew Trickdc4c1ad2013-09-24 17:11:19 +0000278/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
Andrew Trick978674b2013-09-20 05:14:41 +0000279ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
280 // Select the scheduler, or set the default.
281 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
282 if (Ctor != useDefaultMachineSched)
283 return Ctor(this);
284
285 // Get the default scheduler set by the target for this function.
286 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
287 if (Scheduler)
288 return Scheduler;
289
290 // Default to GenericScheduler.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000291 return createGenericSchedLive(this);
Andrew Trick978674b2013-09-20 05:14:41 +0000292}
293
Andrew Trick17080b92013-12-28 21:56:51 +0000294/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
295/// the caller. We don't have a command line option to override the postRA
296/// scheduler. The Target must configure it.
297ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
298 // Get the postRA scheduler set by the target for this function.
299 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
300 if (Scheduler)
301 return Scheduler;
302
303 // Default to GenericScheduler.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000304 return createGenericSchedPostRA(this);
Andrew Trick17080b92013-12-28 21:56:51 +0000305}
306
Andrew Trick72515be2012-03-14 04:00:38 +0000307/// Top-level MachineScheduler pass driver.
308///
309/// Visit blocks in function order. Divide each block into scheduling regions
Andrew Trick8823dec2012-03-14 04:00:41 +0000310/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
311/// consistent with the DAG builder, which traverses the interior of the
312/// scheduling regions bottom-up.
Andrew Trick72515be2012-03-14 04:00:38 +0000313///
314/// This design avoids exposing scheduling boundaries to the DAG builder,
Andrew Trick8823dec2012-03-14 04:00:41 +0000315/// simplifying the DAG builder's support for "special" target instructions.
316/// At the same time the design allows target schedulers to operate across
Andrew Trick72515be2012-03-14 04:00:38 +0000317/// scheduling boundaries, for example to bundle the boudary instructions
318/// without reordering them. This creates complexity, because the target
319/// scheduler must update the RegionBegin and RegionEnd positions cached by
320/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
321/// design would be to split blocks at scheduling boundaries, but LLVM has a
322/// general bias against block splitting purely for implementation simplicity.
Andrew Tricke1c034f2012-01-17 06:55:03 +0000323bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
Andrew Kayloraa641a52016-04-22 22:06:11 +0000324 if (skipFunction(*mf.getFunction()))
Chad Rosier6338d7c2016-01-20 22:38:25 +0000325 return false;
326
Eric Christopher5f141b02015-03-11 22:56:10 +0000327 if (EnableMachineSched.getNumOccurrences()) {
328 if (!EnableMachineSched)
329 return false;
330 } else if (!mf.getSubtarget().enableMachineScheduler())
331 return false;
332
Matthias Braundc7580a2015-10-29 03:57:28 +0000333 DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
Andrew Trickc5d70082012-05-10 21:06:21 +0000334
Andrew Tricke77e84e2012-01-13 06:30:30 +0000335 // Initialize the context of the pass.
336 MF = &mf;
337 MLI = &getAnalysis<MachineLoopInfo>();
338 MDT = &getAnalysis<MachineDominatorTree>();
Andrew Trick45300682012-03-09 00:52:20 +0000339 PassConfig = &getAnalysis<TargetPassConfig>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000340 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Andrew Trick02a80da2012-03-08 01:41:12 +0000341
Lang Hamesad33d5a2012-01-27 22:36:19 +0000342 LIS = &getAnalysis<LiveIntervals>();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000343
Andrew Trick48f2a722013-03-08 05:40:34 +0000344 if (VerifyScheduling) {
Andrew Trick97064962013-07-25 07:26:26 +0000345 DEBUG(LIS->dump());
Andrew Trick48f2a722013-03-08 05:40:34 +0000346 MF->verify(this, "Before machine scheduling.");
347 }
Andrew Trick4d4b5462012-04-24 20:36:19 +0000348 RegClassInfo->runOnMachineFunction(*MF);
Andrew Trick88639922012-04-24 17:56:43 +0000349
Andrew Trick978674b2013-09-20 05:14:41 +0000350 // Instantiate the selected scheduler for this target, function, and
351 // optimization level.
Ahmed Charles56440fd2014-03-06 05:51:42 +0000352 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
Matthias Braun93563e72015-11-03 01:53:29 +0000353 scheduleRegions(*Scheduler, false);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000354
355 DEBUG(LIS->dump());
356 if (VerifyScheduling)
357 MF->verify(this, "After machine scheduling.");
358 return true;
359}
360
Andrew Trick17080b92013-12-28 21:56:51 +0000361bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
Andrew Kayloraa641a52016-04-22 22:06:11 +0000362 if (skipFunction(*mf.getFunction()))
Paul Robinson7c99ec52014-03-31 17:43:35 +0000363 return false;
364
Chad Rosier816a1ab2016-01-20 23:08:32 +0000365 if (EnablePostRAMachineSched.getNumOccurrences()) {
366 if (!EnablePostRAMachineSched)
367 return false;
368 } else if (!mf.getSubtarget().enablePostRAScheduler()) {
Andrew Trick8d2ee372014-06-04 07:06:27 +0000369 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
370 return false;
371 }
Andrew Trick17080b92013-12-28 21:56:51 +0000372 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
373
374 // Initialize the context of the pass.
375 MF = &mf;
376 PassConfig = &getAnalysis<TargetPassConfig>();
377
378 if (VerifyScheduling)
379 MF->verify(this, "Before post machine scheduling.");
380
381 // Instantiate the selected scheduler for this target, function, and
382 // optimization level.
Ahmed Charles56440fd2014-03-06 05:51:42 +0000383 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
Matthias Braun93563e72015-11-03 01:53:29 +0000384 scheduleRegions(*Scheduler, true);
Andrew Trick17080b92013-12-28 21:56:51 +0000385
386 if (VerifyScheduling)
387 MF->verify(this, "After post machine scheduling.");
388 return true;
389}
390
Andrew Trickd14d7c22013-12-28 21:56:57 +0000391/// Return true of the given instruction should not be included in a scheduling
392/// region.
393///
394/// MachineScheduler does not currently support scheduling across calls. To
395/// handle calls, the DAG builder needs to be modified to create register
396/// anti/output dependencies on the registers clobbered by the call's regmask
397/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
398/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
399/// the boundary, but there would be no benefit to postRA scheduling across
400/// calls this late anyway.
401static bool isSchedBoundary(MachineBasicBlock::iterator MI,
402 MachineBasicBlock *MBB,
403 MachineFunction *MF,
Matthias Braun93563e72015-11-03 01:53:29 +0000404 const TargetInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000405 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
Andrew Trickd14d7c22013-12-28 21:56:57 +0000406}
407
Andrew Trickd7f890e2013-12-28 21:56:47 +0000408/// Main driver for both MachineScheduler and PostMachineScheduler.
Matthias Braun93563e72015-11-03 01:53:29 +0000409void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
410 bool FixKillFlags) {
Eric Christopherfc6de422014-08-05 02:39:49 +0000411 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000412
413 // Visit all machine basic blocks.
Andrew Trick88639922012-04-24 17:56:43 +0000414 //
415 // TODO: Visit blocks in global postorder or postorder within the bottom-up
416 // loop tree. Then we can optionally compute global RegPressure.
Andrew Tricke77e84e2012-01-13 06:30:30 +0000417 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
418 MBB != MBBEnd; ++MBB) {
419
Duncan P. N. Exon Smith5ec15682015-10-09 19:40:45 +0000420 Scheduler.startBlock(&*MBB);
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000421
Andrew Trick33e05d72013-12-28 21:57:02 +0000422#ifndef NDEBUG
423 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
424 continue;
425 if (SchedOnlyBlock.getNumOccurrences()
426 && (int)SchedOnlyBlock != MBB->getNumber())
427 continue;
428#endif
429
Andrew Trick7e120f42012-01-14 02:17:09 +0000430 // Break the block into scheduling regions [I, RegionEnd), and schedule each
Sylvestre Ledru35521e22012-07-23 08:51:15 +0000431 // region as soon as it is discovered. RegionEnd points the scheduling
Andrew Trickaf1bee72012-03-09 22:34:56 +0000432 // boundary at the bottom of the region. The DAG does not include RegionEnd,
433 // but the region does (i.e. the next RegionEnd is above the previous
434 // RegionBegin). If the current block has no terminator then RegionEnd ==
435 // MBB->end() for the bottom region.
436 //
437 // The Scheduler may insert instructions during either schedule() or
438 // exitRegion(), even for empty regions. So the local iterators 'I' and
439 // 'RegionEnd' are invalid across these calls.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000440 //
441 // MBB::size() uses instr_iterator to count. Here we need a bundle to count
442 // as a single instruction.
Andrew Tricka21daf72012-03-09 03:46:39 +0000443 for(MachineBasicBlock::iterator RegionEnd = MBB->end();
Andrew Trickd7f890e2013-12-28 21:56:47 +0000444 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
Andrew Trick88639922012-04-24 17:56:43 +0000445
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000446 // Avoid decrementing RegionEnd for blocks with no terminator.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000447 if (RegionEnd != MBB->end() ||
Matthias Braun93563e72015-11-03 01:53:29 +0000448 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000449 --RegionEnd;
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000450 }
451
Andrew Trick7e120f42012-01-14 02:17:09 +0000452 // The next region starts above the previous region. Look backward in the
453 // instruction stream until we find the nearest boundary.
Andrew Tricka53e1012013-08-23 17:48:33 +0000454 unsigned NumRegionInstrs = 0;
Andrew Trick7e120f42012-01-14 02:17:09 +0000455 MachineBasicBlock::iterator I = RegionEnd;
Matthias Braun858d1df2016-05-20 19:46:13 +0000456 for (;I != MBB->begin(); --I) {
Duncan P. N. Exon Smith38eea4a2016-08-11 20:03:09 +0000457 MachineInstr &MI = *std::prev(I);
458 if (isSchedBoundary(&MI, &*MBB, MF, TII))
Andrew Trick7e120f42012-01-14 02:17:09 +0000459 break;
Duncan P. N. Exon Smith38eea4a2016-08-11 20:03:09 +0000460 if (!MI.isDebugValue())
Andrea Di Biagiod65fd9f2014-12-12 15:09:58 +0000461 ++NumRegionInstrs;
Andrew Trick7e120f42012-01-14 02:17:09 +0000462 }
Andrew Trick60cf03e2012-03-07 05:21:52 +0000463 // Notify the scheduler of the region, even if we may skip scheduling
464 // it. Perhaps it still needs to be bundled.
Duncan P. N. Exon Smith5ec15682015-10-09 19:40:45 +0000465 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
Andrew Trick60cf03e2012-03-07 05:21:52 +0000466
467 // Skip empty scheduling regions (0 or 1 schedulable instructions).
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000468 if (I == RegionEnd || I == std::prev(RegionEnd)) {
Andrew Trick60cf03e2012-03-07 05:21:52 +0000469 // Close the current region. Bundle the terminator if needed.
Andrew Trickaf1bee72012-03-09 22:34:56 +0000470 // This invalidates 'RegionEnd' and 'I'.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000471 Scheduler.exitRegion();
Andrew Trick7ccdc5c2012-01-17 06:55:07 +0000472 continue;
Andrew Trick59ac4fb2012-01-14 02:17:18 +0000473 }
Matthias Braun93563e72015-11-03 01:53:29 +0000474 DEBUG(dbgs() << "********** MI Scheduling **********\n");
Craig Toppera538d832012-08-22 06:07:19 +0000475 DEBUG(dbgs() << MF->getName()
Andrew Trick54b2ce32013-01-25 07:45:31 +0000476 << ":BB#" << MBB->getNumber() << " " << MBB->getName()
477 << "\n From: " << *I << " To: ";
Andrew Tricke57583a2012-02-08 02:17:21 +0000478 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
479 else dbgs() << "End";
Matthias Braun858d1df2016-05-20 19:46:13 +0000480 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +0000481 if (DumpCriticalPathLength) {
482 errs() << MF->getName();
483 errs() << ":BB# " << MBB->getNumber();
484 errs() << " " << MBB->getName() << " \n";
485 }
Andrew Trick7ccdc5c2012-01-17 06:55:07 +0000486
Andrew Trick1c0ec452012-03-09 03:46:42 +0000487 // Schedule a region: possibly reorder instructions.
Andrew Trickaf1bee72012-03-09 22:34:56 +0000488 // This invalidates 'RegionEnd' and 'I'.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000489 Scheduler.schedule();
Andrew Trick1c0ec452012-03-09 03:46:42 +0000490
491 // Close the current region.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000492 Scheduler.exitRegion();
Andrew Trick60cf03e2012-03-07 05:21:52 +0000493
494 // Scheduling has invalidated the current iterator 'I'. Ask the
495 // scheduler for the top of it's scheduled region.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000496 RegionEnd = Scheduler.begin();
Andrew Trick7e120f42012-01-14 02:17:09 +0000497 }
Andrew Trickd7f890e2013-12-28 21:56:47 +0000498 Scheduler.finishBlock();
Matthias Braun93563e72015-11-03 01:53:29 +0000499 // FIXME: Ideally, no further passes should rely on kill flags. However,
500 // thumb2 size reduction is currently an exception, so the PostMIScheduler
501 // needs to do this.
502 if (FixKillFlags)
503 Scheduler.fixupKills(&*MBB);
Andrew Tricke77e84e2012-01-13 06:30:30 +0000504 }
Andrew Trickd7f890e2013-12-28 21:56:47 +0000505 Scheduler.finalizeSchedule();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000506}
507
Andrew Trickd7f890e2013-12-28 21:56:47 +0000508void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
Andrew Tricke77e84e2012-01-13 06:30:30 +0000509 // unimplemented
510}
511
Alp Tokerd8d510a2014-07-01 21:19:13 +0000512LLVM_DUMP_METHOD
Andrew Trick7a8e1002012-09-11 00:39:15 +0000513void ReadyQueue::dump() {
James Y Knighte72b0db2015-09-18 18:52:20 +0000514 dbgs() << "Queue " << Name << ": ";
Andrew Trick7a8e1002012-09-11 00:39:15 +0000515 for (unsigned i = 0, e = Queue.size(); i < e; ++i)
516 dbgs() << Queue[i]->NodeNum << " ";
517 dbgs() << "\n";
518}
Andrew Trick8823dec2012-03-14 04:00:41 +0000519
520//===----------------------------------------------------------------------===//
Andrew Trickd7f890e2013-12-28 21:56:47 +0000521// ScheduleDAGMI - Basic machine instruction scheduling. This is
522// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
523// virtual registers.
524// ===----------------------------------------------------------------------===/
Andrew Trick8823dec2012-03-14 04:00:41 +0000525
David Blaikie422b93d2014-04-21 20:32:32 +0000526// Provide a vtable anchor.
Andrew Trick44f750a2013-01-25 04:01:04 +0000527ScheduleDAGMI::~ScheduleDAGMI() {
Andrew Trick44f750a2013-01-25 04:01:04 +0000528}
529
Andrew Trick85a1d4c2013-04-24 15:54:43 +0000530bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
531 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
532}
533
Andrew Tricka7714a02012-11-12 19:40:10 +0000534bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
Andrew Trick263280242012-11-12 19:52:20 +0000535 if (SuccSU != &ExitSU) {
536 // Do not use WillCreateCycle, it assumes SD scheduling.
537 // If Pred is reachable from Succ, then the edge creates a cycle.
538 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
539 return false;
540 Topo.AddPred(SuccSU, PredDep.getSUnit());
541 }
Andrew Tricka7714a02012-11-12 19:40:10 +0000542 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
543 // Return true regardless of whether a new edge needed to be inserted.
544 return true;
545}
546
Andrew Trick02a80da2012-03-08 01:41:12 +0000547/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
548/// NumPredsLeft reaches zero, release the successor node.
Andrew Trick61f1a272012-05-24 22:11:09 +0000549///
550/// FIXME: Adjust SuccSU height based on MinLatency.
Andrew Trick8823dec2012-03-14 04:00:41 +0000551void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
Andrew Trick02a80da2012-03-08 01:41:12 +0000552 SUnit *SuccSU = SuccEdge->getSUnit();
553
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000554 if (SuccEdge->isWeak()) {
555 --SuccSU->WeakPredsLeft;
Andrew Tricka7714a02012-11-12 19:40:10 +0000556 if (SuccEdge->isCluster())
557 NextClusterSucc = SuccSU;
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000558 return;
559 }
Andrew Trick02a80da2012-03-08 01:41:12 +0000560#ifndef NDEBUG
561 if (SuccSU->NumPredsLeft == 0) {
562 dbgs() << "*** Scheduling failed! ***\n";
563 SuccSU->dump(this);
564 dbgs() << " has been released too many times!\n";
Craig Topperc0196b12014-04-14 00:51:57 +0000565 llvm_unreachable(nullptr);
Andrew Trick02a80da2012-03-08 01:41:12 +0000566 }
567#endif
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000568 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
569 // CurrCycle may have advanced since then.
570 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
571 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
572
Andrew Trick02a80da2012-03-08 01:41:12 +0000573 --SuccSU->NumPredsLeft;
574 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
Andrew Trick8823dec2012-03-14 04:00:41 +0000575 SchedImpl->releaseTopNode(SuccSU);
Andrew Trick02a80da2012-03-08 01:41:12 +0000576}
577
578/// releaseSuccessors - Call releaseSucc on each of SU's successors.
Andrew Trick8823dec2012-03-14 04:00:41 +0000579void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
Andrew Trick02a80da2012-03-08 01:41:12 +0000580 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
581 I != E; ++I) {
582 releaseSucc(SU, &*I);
583 }
584}
585
Andrew Trick8823dec2012-03-14 04:00:41 +0000586/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
587/// NumSuccsLeft reaches zero, release the predecessor node.
Andrew Trick61f1a272012-05-24 22:11:09 +0000588///
589/// FIXME: Adjust PredSU height based on MinLatency.
Andrew Trick8823dec2012-03-14 04:00:41 +0000590void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
591 SUnit *PredSU = PredEdge->getSUnit();
592
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000593 if (PredEdge->isWeak()) {
594 --PredSU->WeakSuccsLeft;
Andrew Tricka7714a02012-11-12 19:40:10 +0000595 if (PredEdge->isCluster())
596 NextClusterPred = PredSU;
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000597 return;
598 }
Andrew Trick8823dec2012-03-14 04:00:41 +0000599#ifndef NDEBUG
600 if (PredSU->NumSuccsLeft == 0) {
601 dbgs() << "*** Scheduling failed! ***\n";
602 PredSU->dump(this);
603 dbgs() << " has been released too many times!\n";
Craig Topperc0196b12014-04-14 00:51:57 +0000604 llvm_unreachable(nullptr);
Andrew Trick8823dec2012-03-14 04:00:41 +0000605 }
606#endif
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000607 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
608 // CurrCycle may have advanced since then.
609 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
610 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
611
Andrew Trick8823dec2012-03-14 04:00:41 +0000612 --PredSU->NumSuccsLeft;
613 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
614 SchedImpl->releaseBottomNode(PredSU);
615}
616
617/// releasePredecessors - Call releasePred on each of SU's predecessors.
618void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
619 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
620 I != E; ++I) {
621 releasePred(SU, &*I);
622 }
623}
624
Andrew Trickd7f890e2013-12-28 21:56:47 +0000625/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
626/// crossing a scheduling boundary. [begin, end) includes all instructions in
627/// the region, including the boundary itself and single-instruction regions
628/// that don't get scheduled.
629void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
630 MachineBasicBlock::iterator begin,
631 MachineBasicBlock::iterator end,
632 unsigned regioninstrs)
633{
634 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
635
636 SchedImpl->initPolicy(begin, end, regioninstrs);
637}
638
Andrew Tricke833e1c2013-04-13 06:07:40 +0000639/// This is normally called from the main scheduler loop but may also be invoked
640/// by the scheduling strategy to perform additional code motion.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000641void ScheduleDAGMI::moveInstruction(
642 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
Andrew Trick463b2f12012-05-17 18:35:03 +0000643 // Advance RegionBegin if the first instruction moves down.
Andrew Trick54f7def2012-03-21 04:12:10 +0000644 if (&*RegionBegin == MI)
Andrew Trick463b2f12012-05-17 18:35:03 +0000645 ++RegionBegin;
646
647 // Update the instruction stream.
Andrew Trick8823dec2012-03-14 04:00:41 +0000648 BB->splice(InsertPos, BB, MI);
Andrew Trick463b2f12012-05-17 18:35:03 +0000649
650 // Update LiveIntervals
Andrew Trickd7f890e2013-12-28 21:56:47 +0000651 if (LIS)
Duncan P. N. Exon Smithbe8f8c42016-02-27 20:14:29 +0000652 LIS->handleMove(*MI, /*UpdateFlags=*/true);
Andrew Trick463b2f12012-05-17 18:35:03 +0000653
654 // Recede RegionBegin if an instruction moves above the first.
Andrew Trick8823dec2012-03-14 04:00:41 +0000655 if (RegionBegin == InsertPos)
656 RegionBegin = MI;
657}
658
Andrew Trickde670c02012-03-21 04:12:07 +0000659bool ScheduleDAGMI::checkSchedLimit() {
660#ifndef NDEBUG
661 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
662 CurrentTop = CurrentBottom;
663 return false;
664 }
665 ++NumInstrsScheduled;
666#endif
667 return true;
668}
669
Andrew Trickd7f890e2013-12-28 21:56:47 +0000670/// Per-region scheduling driver, called back from
671/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
672/// does not consider liveness or register pressure. It is useful for PostRA
673/// scheduling and potentially other custom schedulers.
674void ScheduleDAGMI::schedule() {
James Y Knighte72b0db2015-09-18 18:52:20 +0000675 DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
676 DEBUG(SchedImpl->dumpPolicy());
677
Andrew Trickd7f890e2013-12-28 21:56:47 +0000678 // Build the DAG.
679 buildSchedGraph(AA);
680
681 Topo.InitDAGTopologicalSorting();
682
683 postprocessDAG();
684
685 SmallVector<SUnit*, 8> TopRoots, BotRoots;
686 findRootsAndBiasEdges(TopRoots, BotRoots);
687
688 // Initialize the strategy before modifying the DAG.
689 // This may initialize a DFSResult to be used for queue priority.
690 SchedImpl->initialize(this);
691
Matthias Braun69f1d122016-11-11 22:37:28 +0000692 DEBUG(
693 if (EntrySU.getInstr() != nullptr)
694 EntrySU.dumpAll(this);
695 for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
696 SUnits[su].dumpAll(this);
697 if (ExitSU.getInstr() != nullptr)
698 ExitSU.dumpAll(this);
699 );
Andrew Trickd7f890e2013-12-28 21:56:47 +0000700 if (ViewMISchedDAGs) viewGraph();
701
702 // Initialize ready queues now that the DAG and priority data are finalized.
703 initQueues(TopRoots, BotRoots);
704
705 bool IsTopNode = false;
James Y Knighte72b0db2015-09-18 18:52:20 +0000706 while (true) {
707 DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
708 SUnit *SU = SchedImpl->pickNode(IsTopNode);
709 if (!SU) break;
710
Andrew Trickd7f890e2013-12-28 21:56:47 +0000711 assert(!SU->isScheduled && "Node already scheduled");
712 if (!checkSchedLimit())
713 break;
714
715 MachineInstr *MI = SU->getInstr();
716 if (IsTopNode) {
717 assert(SU->isTopReady() && "node still has unscheduled dependencies");
718 if (&*CurrentTop == MI)
719 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
720 else
721 moveInstruction(MI, CurrentTop);
Matthias Braunb550b762016-04-21 01:54:13 +0000722 } else {
Andrew Trickd7f890e2013-12-28 21:56:47 +0000723 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
724 MachineBasicBlock::iterator priorII =
725 priorNonDebug(CurrentBottom, CurrentTop);
726 if (&*priorII == MI)
727 CurrentBottom = priorII;
728 else {
729 if (&*CurrentTop == MI)
730 CurrentTop = nextIfDebug(++CurrentTop, priorII);
731 moveInstruction(MI, CurrentBottom);
732 CurrentBottom = MI;
733 }
734 }
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000735 // Notify the scheduling strategy before updating the DAG.
Andrew Trick491e34a2014-06-12 22:36:28 +0000736 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000737 // runs, it can then use the accurate ReadyCycle time to determine whether
738 // newly released nodes can move to the readyQ.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000739 SchedImpl->schedNode(SU, IsTopNode);
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000740
741 updateQueues(SU, IsTopNode);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000742 }
743 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
744
745 placeDebugValues();
746
747 DEBUG({
748 unsigned BBNum = begin()->getParent()->getNumber();
749 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
750 dumpSchedule();
751 dbgs() << '\n';
752 });
753}
754
755/// Apply each ScheduleDAGMutation step in order.
756void ScheduleDAGMI::postprocessDAG() {
757 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
758 Mutations[i]->apply(this);
759 }
760}
761
762void ScheduleDAGMI::
763findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
764 SmallVectorImpl<SUnit*> &BotRoots) {
765 for (std::vector<SUnit>::iterator
766 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
767 SUnit *SU = &(*I);
768 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
769
770 // Order predecessors so DFSResult follows the critical path.
771 SU->biasCriticalPath();
772
773 // A SUnit is ready to top schedule if it has no predecessors.
774 if (!I->NumPredsLeft)
775 TopRoots.push_back(SU);
776 // A SUnit is ready to bottom schedule if it has no successors.
777 if (!I->NumSuccsLeft)
778 BotRoots.push_back(SU);
779 }
780 ExitSU.biasCriticalPath();
781}
782
783/// Identify DAG roots and setup scheduler queues.
784void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
785 ArrayRef<SUnit*> BotRoots) {
Craig Topperc0196b12014-04-14 00:51:57 +0000786 NextClusterSucc = nullptr;
787 NextClusterPred = nullptr;
Andrew Trickd7f890e2013-12-28 21:56:47 +0000788
789 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
790 //
791 // Nodes with unreleased weak edges can still be roots.
792 // Release top roots in forward order.
793 for (SmallVectorImpl<SUnit*>::const_iterator
794 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
795 SchedImpl->releaseTopNode(*I);
796 }
797 // Release bottom roots in reverse order so the higher priority nodes appear
798 // first. This is more natural and slightly more efficient.
799 for (SmallVectorImpl<SUnit*>::const_reverse_iterator
800 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
801 SchedImpl->releaseBottomNode(*I);
802 }
803
804 releaseSuccessors(&EntrySU);
805 releasePredecessors(&ExitSU);
806
807 SchedImpl->registerRoots();
808
809 // Advance past initial DebugValues.
810 CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
811 CurrentBottom = RegionEnd;
812}
813
814/// Update scheduler queues after scheduling an instruction.
815void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
816 // Release dependent instructions for scheduling.
817 if (IsTopNode)
818 releaseSuccessors(SU);
819 else
820 releasePredecessors(SU);
821
822 SU->isScheduled = true;
823}
824
825/// Reinsert any remaining debug_values, just like the PostRA scheduler.
826void ScheduleDAGMI::placeDebugValues() {
827 // If first instruction was a DBG_VALUE then put it back.
828 if (FirstDbgValue) {
829 BB->splice(RegionBegin, BB, FirstDbgValue);
830 RegionBegin = FirstDbgValue;
831 }
832
833 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
834 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000835 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000836 MachineInstr *DbgValue = P.first;
837 MachineBasicBlock::iterator OrigPrevMI = P.second;
838 if (&*RegionBegin == DbgValue)
839 ++RegionBegin;
840 BB->splice(++OrigPrevMI, BB, DbgValue);
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000841 if (OrigPrevMI == std::prev(RegionEnd))
Andrew Trickd7f890e2013-12-28 21:56:47 +0000842 RegionEnd = DbgValue;
843 }
844 DbgValues.clear();
Craig Topperc0196b12014-04-14 00:51:57 +0000845 FirstDbgValue = nullptr;
Andrew Trickd7f890e2013-12-28 21:56:47 +0000846}
847
848#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
849void ScheduleDAGMI::dumpSchedule() const {
850 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
851 if (SUnit *SU = getSUnit(&(*MI)))
852 SU->dump(this);
853 else
854 dbgs() << "Missing SUnit\n";
855 }
856}
857#endif
858
859//===----------------------------------------------------------------------===//
860// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
861// preservation.
862//===----------------------------------------------------------------------===//
863
864ScheduleDAGMILive::~ScheduleDAGMILive() {
865 delete DFSResult;
866}
867
Matthias Braun40639882016-11-11 22:37:31 +0000868void ScheduleDAGMILive::collectVRegUses(SUnit &SU) {
869 const MachineInstr &MI = *SU.getInstr();
870 for (const MachineOperand &MO : MI.operands()) {
871 if (!MO.isReg())
872 continue;
873 if (!MO.readsReg())
874 continue;
875 if (TrackLaneMasks && !MO.isUse())
876 continue;
877
878 unsigned Reg = MO.getReg();
879 if (!TargetRegisterInfo::isVirtualRegister(Reg))
880 continue;
881
882 // Ignore re-defs.
883 if (TrackLaneMasks) {
884 bool FoundDef = false;
885 for (const MachineOperand &MO2 : MI.operands()) {
886 if (MO2.isReg() && MO2.isDef() && MO2.getReg() == Reg && !MO2.isDead()) {
887 FoundDef = true;
888 break;
889 }
890 }
891 if (FoundDef)
892 continue;
893 }
894
895 // Record this local VReg use.
896 VReg2SUnitMultiMap::iterator UI = VRegUses.find(Reg);
897 for (; UI != VRegUses.end(); ++UI) {
898 if (UI->SU == &SU)
899 break;
900 }
901 if (UI == VRegUses.end())
902 VRegUses.insert(VReg2SUnit(Reg, 0, &SU));
903 }
904}
905
Andrew Trick88639922012-04-24 17:56:43 +0000906/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
907/// crossing a scheduling boundary. [begin, end) includes all instructions in
908/// the region, including the boundary itself and single-instruction regions
909/// that don't get scheduled.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000910void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
Andrew Trick88639922012-04-24 17:56:43 +0000911 MachineBasicBlock::iterator begin,
912 MachineBasicBlock::iterator end,
Andrew Tricka53e1012013-08-23 17:48:33 +0000913 unsigned regioninstrs)
Andrew Trick88639922012-04-24 17:56:43 +0000914{
Andrew Trickd7f890e2013-12-28 21:56:47 +0000915 // ScheduleDAGMI initializes SchedImpl's per-region policy.
916 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
Andrew Trick4add42f2012-05-10 21:06:10 +0000917
918 // For convenience remember the end of the liveness region.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000919 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
Andrew Trick75e411c2013-09-06 17:32:34 +0000920
Andrew Trickb248b4a2013-09-06 17:32:47 +0000921 SUPressureDiffs.clear();
922
Andrew Trick75e411c2013-09-06 17:32:34 +0000923 ShouldTrackPressure = SchedImpl->shouldTrackPressure();
Matthias Braund4f64092016-01-20 00:23:32 +0000924 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
925
Matthias Braunf9acaca2016-05-31 22:38:06 +0000926 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
927 "ShouldTrackLaneMasks requires ShouldTrackPressure");
Andrew Trick4add42f2012-05-10 21:06:10 +0000928}
929
930// Setup the register pressure trackers for the top scheduled top and bottom
931// scheduled regions.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000932void ScheduleDAGMILive::initRegPressure() {
Matthias Braun40639882016-11-11 22:37:31 +0000933 VRegUses.clear();
934 VRegUses.setUniverse(MRI.getNumVirtRegs());
935 for (SUnit &SU : SUnits)
936 collectVRegUses(SU);
937
Matthias Braund4f64092016-01-20 00:23:32 +0000938 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
939 ShouldTrackLaneMasks, false);
940 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
941 ShouldTrackLaneMasks, false);
Andrew Trick4add42f2012-05-10 21:06:10 +0000942
943 // Close the RPTracker to finalize live ins.
944 RPTracker.closeRegion();
945
Andrew Trick9c17eab2013-07-30 19:59:12 +0000946 DEBUG(RPTracker.dump());
Andrew Trick79d3eec2012-05-24 22:11:14 +0000947
Andrew Trick4add42f2012-05-10 21:06:10 +0000948 // Initialize the live ins and live outs.
Matthias Braun3e86de12015-09-17 21:12:24 +0000949 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
950 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
Andrew Trick4add42f2012-05-10 21:06:10 +0000951
952 // Close one end of the tracker so we can call
953 // getMaxUpward/DownwardPressureDelta before advancing across any
954 // instructions. This converts currently live regs into live ins/outs.
955 TopRPTracker.closeTop();
956 BotRPTracker.closeBottom();
957
Andrew Trick9c17eab2013-07-30 19:59:12 +0000958 BotRPTracker.initLiveThru(RPTracker);
959 if (!BotRPTracker.getLiveThru().empty()) {
960 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
961 DEBUG(dbgs() << "Live Thru: ";
962 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
963 };
964
Andrew Trick2bc74c22013-08-30 04:36:57 +0000965 // For each live out vreg reduce the pressure change associated with other
966 // uses of the same vreg below the live-out reaching def.
Matthias Braun3e86de12015-09-17 21:12:24 +0000967 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
Andrew Trick2bc74c22013-08-30 04:36:57 +0000968
Andrew Trick4add42f2012-05-10 21:06:10 +0000969 // Account for liveness generated by the region boundary.
Andrew Trick2bc74c22013-08-30 04:36:57 +0000970 if (LiveRegionEnd != RegionEnd) {
Matthias Braun5d458612016-01-20 00:23:26 +0000971 SmallVector<RegisterMaskPair, 8> LiveUses;
Andrew Trick2bc74c22013-08-30 04:36:57 +0000972 BotRPTracker.recede(&LiveUses);
973 updatePressureDiffs(LiveUses);
974 }
Andrew Trick4add42f2012-05-10 21:06:10 +0000975
Matthias Braune6edd482015-11-13 22:30:31 +0000976 DEBUG(
977 dbgs() << "Top Pressure:\n";
978 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
979 dbgs() << "Bottom Pressure:\n";
980 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
981 );
982
Andrew Trick4add42f2012-05-10 21:06:10 +0000983 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
Andrew Trick22025772012-05-17 18:35:10 +0000984
985 // Cache the list of excess pressure sets in this region. This will also track
986 // the max pressure in the scheduled code for these sets.
987 RegionCriticalPSets.clear();
Jakub Staszakc641ada2013-01-25 21:44:27 +0000988 const std::vector<unsigned> &RegionPressure =
989 RPTracker.getPressure().MaxSetPressure;
Andrew Trick22025772012-05-17 18:35:10 +0000990 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
Andrew Trick736dd9a2013-06-21 18:32:58 +0000991 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
Andrew Trickb55db582013-06-21 18:33:01 +0000992 if (RegionPressure[i] > Limit) {
993 DEBUG(dbgs() << TRI->getRegPressureSetName(i)
994 << " Limit " << Limit
995 << " Actual " << RegionPressure[i] << "\n");
Andrew Trick1a831342013-08-30 03:49:48 +0000996 RegionCriticalPSets.push_back(PressureChange(i));
Andrew Trickb55db582013-06-21 18:33:01 +0000997 }
Andrew Trick22025772012-05-17 18:35:10 +0000998 }
999 DEBUG(dbgs() << "Excess PSets: ";
1000 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
1001 dbgs() << TRI->getRegPressureSetName(
Andrew Trick1a831342013-08-30 03:49:48 +00001002 RegionCriticalPSets[i].getPSet()) << " ";
Andrew Trick22025772012-05-17 18:35:10 +00001003 dbgs() << "\n");
1004}
1005
Andrew Trickd7f890e2013-12-28 21:56:47 +00001006void ScheduleDAGMILive::
Andrew Trickb248b4a2013-09-06 17:32:47 +00001007updateScheduledPressure(const SUnit *SU,
1008 const std::vector<unsigned> &NewMaxPressure) {
1009 const PressureDiff &PDiff = getPressureDiff(SU);
1010 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
1011 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
1012 I != E; ++I) {
1013 if (!I->isValid())
1014 break;
1015 unsigned ID = I->getPSet();
1016 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
1017 ++CritIdx;
1018 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
1019 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
1020 && NewMaxPressure[ID] <= INT16_MAX)
1021 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
1022 }
1023 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
1024 if (NewMaxPressure[ID] >= Limit - 2) {
1025 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
Andrew Trick569dc65a2015-05-17 23:40:31 +00001026 << NewMaxPressure[ID]
1027 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
1028 << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
Andrew Trickb248b4a2013-09-06 17:32:47 +00001029 }
Andrew Trick22025772012-05-17 18:35:10 +00001030 }
Andrew Trick88639922012-04-24 17:56:43 +00001031}
1032
Andrew Trick2bc74c22013-08-30 04:36:57 +00001033/// Update the PressureDiff array for liveness after scheduling this
1034/// instruction.
Matthias Braun5d458612016-01-20 00:23:26 +00001035void ScheduleDAGMILive::updatePressureDiffs(
1036 ArrayRef<RegisterMaskPair> LiveUses) {
1037 for (const RegisterMaskPair &P : LiveUses) {
Matthias Braun5d458612016-01-20 00:23:26 +00001038 unsigned Reg = P.RegUnit;
Matthias Braund4f64092016-01-20 00:23:32 +00001039 /// FIXME: Currently assuming single-use physregs.
Andrew Trick2bc74c22013-08-30 04:36:57 +00001040 if (!TRI->isVirtualRegister(Reg))
1041 continue;
Andrew Trickffdbefb2013-09-06 17:32:39 +00001042
Matthias Braund4f64092016-01-20 00:23:32 +00001043 if (ShouldTrackLaneMasks) {
1044 // If the register has just become live then other uses won't change
1045 // this fact anymore => decrement pressure.
1046 // If the register has just become dead then other uses make it come
1047 // back to life => increment pressure.
1048 bool Decrement = P.LaneMask != 0;
1049
1050 for (const VReg2SUnit &V2SU
1051 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1052 SUnit &SU = *V2SU.SU;
1053 if (SU.isScheduled || &SU == &ExitSU)
1054 continue;
1055
1056 PressureDiff &PDiff = getPressureDiff(&SU);
1057 PDiff.addPressureChange(Reg, Decrement, &MRI);
1058 DEBUG(
1059 dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
1060 << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1061 << ' ' << *SU.getInstr();
1062 dbgs() << " to ";
1063 PDiff.dump(*TRI);
1064 );
1065 }
1066 } else {
1067 assert(P.LaneMask != 0);
1068 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1069 // This may be called before CurrentBottom has been initialized. However,
1070 // BotRPTracker must have a valid position. We want the value live into the
1071 // instruction or live out of the block, so ask for the previous
1072 // instruction's live-out.
1073 const LiveInterval &LI = LIS->getInterval(Reg);
1074 VNInfo *VNI;
1075 MachineBasicBlock::const_iterator I =
1076 nextIfDebug(BotRPTracker.getPos(), BB->end());
1077 if (I == BB->end())
1078 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1079 else {
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001080 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
Matthias Braund4f64092016-01-20 00:23:32 +00001081 VNI = LRQ.valueIn();
1082 }
1083 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1084 assert(VNI && "No live value at use.");
1085 for (const VReg2SUnit &V2SU
1086 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1087 SUnit *SU = V2SU.SU;
1088 // If this use comes before the reaching def, it cannot be a last use,
1089 // so decrease its pressure change.
1090 if (!SU->isScheduled && SU != &ExitSU) {
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001091 LiveQueryResult LRQ =
1092 LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
Matthias Braund4f64092016-01-20 00:23:32 +00001093 if (LRQ.valueIn() == VNI) {
1094 PressureDiff &PDiff = getPressureDiff(SU);
1095 PDiff.addPressureChange(Reg, true, &MRI);
1096 DEBUG(
1097 dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
1098 << *SU->getInstr();
1099 dbgs() << " to ";
1100 PDiff.dump(*TRI);
1101 );
1102 }
Matthias Braun9198c672015-11-06 20:59:02 +00001103 }
Andrew Trick2bc74c22013-08-30 04:36:57 +00001104 }
1105 }
1106 }
1107}
1108
Andrew Trick8823dec2012-03-14 04:00:41 +00001109/// schedule - Called back from MachineScheduler::runOnMachineFunction
Andrew Trick88639922012-04-24 17:56:43 +00001110/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1111/// only includes instructions that have DAG nodes, not scheduling boundaries.
Andrew Trick7a8e1002012-09-11 00:39:15 +00001112///
1113/// This is a skeletal driver, with all the functionality pushed into helpers,
Nick Lewycky06b0ea22015-08-18 22:41:58 +00001114/// so that it can be easily extended by experimental schedulers. Generally,
Andrew Trick7a8e1002012-09-11 00:39:15 +00001115/// implementing MachineSchedStrategy should be sufficient to implement a new
1116/// scheduling algorithm. However, if a scheduler further subclasses
Andrew Trickd7f890e2013-12-28 21:56:47 +00001117/// ScheduleDAGMILive then it will want to override this virtual method in order
1118/// to update any specialized state.
1119void ScheduleDAGMILive::schedule() {
James Y Knighte72b0db2015-09-18 18:52:20 +00001120 DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1121 DEBUG(SchedImpl->dumpPolicy());
Andrew Trick7a8e1002012-09-11 00:39:15 +00001122 buildDAGWithRegPressure();
1123
Andrew Tricka7714a02012-11-12 19:40:10 +00001124 Topo.InitDAGTopologicalSorting();
1125
Andrew Tricka2733e92012-09-14 17:22:42 +00001126 postprocessDAG();
1127
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001128 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1129 findRootsAndBiasEdges(TopRoots, BotRoots);
1130
1131 // Initialize the strategy before modifying the DAG.
1132 // This may initialize a DFSResult to be used for queue priority.
1133 SchedImpl->initialize(this);
1134
Matthias Braun9198c672015-11-06 20:59:02 +00001135 DEBUG(
Matthias Braun69f1d122016-11-11 22:37:28 +00001136 if (EntrySU.getInstr() != nullptr)
1137 EntrySU.dumpAll(this);
Matthias Braun9198c672015-11-06 20:59:02 +00001138 for (const SUnit &SU : SUnits) {
1139 SU.dumpAll(this);
1140 if (ShouldTrackPressure) {
1141 dbgs() << " Pressure Diff : ";
1142 getPressureDiff(&SU).dump(*TRI);
1143 }
1144 dbgs() << '\n';
1145 }
Matthias Braun69f1d122016-11-11 22:37:28 +00001146 if (ExitSU.getInstr() != nullptr)
1147 ExitSU.dumpAll(this);
Matthias Braun9198c672015-11-06 20:59:02 +00001148 );
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001149 if (ViewMISchedDAGs) viewGraph();
Andrew Trick7a8e1002012-09-11 00:39:15 +00001150
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001151 // Initialize ready queues now that the DAG and priority data are finalized.
1152 initQueues(TopRoots, BotRoots);
Andrew Trick7a8e1002012-09-11 00:39:15 +00001153
1154 bool IsTopNode = false;
James Y Knighte72b0db2015-09-18 18:52:20 +00001155 while (true) {
1156 DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1157 SUnit *SU = SchedImpl->pickNode(IsTopNode);
1158 if (!SU) break;
1159
Andrew Trick984d98b2012-10-08 18:53:53 +00001160 assert(!SU->isScheduled && "Node already scheduled");
Andrew Trick7a8e1002012-09-11 00:39:15 +00001161 if (!checkSchedLimit())
1162 break;
1163
1164 scheduleMI(SU, IsTopNode);
1165
Andrew Trickd7f890e2013-12-28 21:56:47 +00001166 if (DFSResult) {
1167 unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1168 if (!ScheduledTrees.test(SubtreeID)) {
1169 ScheduledTrees.set(SubtreeID);
1170 DFSResult->scheduleTree(SubtreeID);
1171 SchedImpl->scheduleTree(SubtreeID);
1172 }
1173 }
1174
1175 // Notify the scheduling strategy after updating the DAG.
1176 SchedImpl->schedNode(SU, IsTopNode);
Andrew Trick43adfb32015-03-27 06:10:13 +00001177
1178 updateQueues(SU, IsTopNode);
Andrew Trick7a8e1002012-09-11 00:39:15 +00001179 }
1180 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1181
1182 placeDebugValues();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001183
1184 DEBUG({
Andrew Trickcf7e6972012-11-28 03:42:47 +00001185 unsigned BBNum = begin()->getParent()->getNumber();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001186 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1187 dumpSchedule();
1188 dbgs() << '\n';
1189 });
Andrew Trick7a8e1002012-09-11 00:39:15 +00001190}
1191
1192/// Build the DAG and setup three register pressure trackers.
Andrew Trickd7f890e2013-12-28 21:56:47 +00001193void ScheduleDAGMILive::buildDAGWithRegPressure() {
Andrew Trickb6e74712013-09-04 20:59:59 +00001194 if (!ShouldTrackPressure) {
1195 RPTracker.reset();
1196 RegionCriticalPSets.clear();
1197 buildSchedGraph(AA);
1198 return;
1199 }
1200
Andrew Trick4add42f2012-05-10 21:06:10 +00001201 // Initialize the register pressure tracker used by buildSchedGraph.
Andrew Trick9c17eab2013-07-30 19:59:12 +00001202 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
Matthias Braund4f64092016-01-20 00:23:32 +00001203 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
Andrew Trick88639922012-04-24 17:56:43 +00001204
Andrew Trick4add42f2012-05-10 21:06:10 +00001205 // Account for liveness generate by the region boundary.
1206 if (LiveRegionEnd != RegionEnd)
1207 RPTracker.recede();
1208
1209 // Build the DAG, and compute current register pressure.
Matthias Braund4f64092016-01-20 00:23:32 +00001210 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
Andrew Trick02a80da2012-03-08 01:41:12 +00001211
Andrew Trick4add42f2012-05-10 21:06:10 +00001212 // Initialize top/bottom trackers after computing region pressure.
1213 initRegPressure();
Andrew Trick7a8e1002012-09-11 00:39:15 +00001214}
Andrew Trick4add42f2012-05-10 21:06:10 +00001215
Andrew Trickd7f890e2013-12-28 21:56:47 +00001216void ScheduleDAGMILive::computeDFSResult() {
Andrew Trick44f750a2013-01-25 04:01:04 +00001217 if (!DFSResult)
1218 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1219 DFSResult->clear();
Andrew Trick44f750a2013-01-25 04:01:04 +00001220 ScheduledTrees.clear();
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001221 DFSResult->resize(SUnits.size());
1222 DFSResult->compute(SUnits);
Andrew Trick44f750a2013-01-25 04:01:04 +00001223 ScheduledTrees.resize(DFSResult->getNumSubtrees());
1224}
1225
Andrew Trick483f4192013-08-29 18:04:49 +00001226/// Compute the max cyclic critical path through the DAG. The scheduling DAG
1227/// only provides the critical path for single block loops. To handle loops that
1228/// span blocks, we could use the vreg path latencies provided by
1229/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1230/// available for use in the scheduler.
1231///
1232/// The cyclic path estimation identifies a def-use pair that crosses the back
Andrew Trickef80f502013-08-30 02:02:12 +00001233/// edge and considers the depth and height of the nodes. For example, consider
Andrew Trick483f4192013-08-29 18:04:49 +00001234/// the following instruction sequence where each instruction has unit latency
1235/// and defines an epomymous virtual register:
1236///
1237/// a->b(a,c)->c(b)->d(c)->exit
1238///
1239/// The cyclic critical path is a two cycles: b->c->b
1240/// The acyclic critical path is four cycles: a->b->c->d->exit
1241/// LiveOutHeight = height(c) = len(c->d->exit) = 2
1242/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1243/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1244/// LiveInDepth = depth(b) = len(a->b) = 1
1245///
1246/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1247/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1248/// CyclicCriticalPath = min(2, 2) = 2
Andrew Trickd7f890e2013-12-28 21:56:47 +00001249///
1250/// This could be relevant to PostRA scheduling, but is currently implemented
1251/// assuming LiveIntervals.
1252unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
Andrew Trick483f4192013-08-29 18:04:49 +00001253 // This only applies to single block loop.
1254 if (!BB->isSuccessor(BB))
1255 return 0;
1256
1257 unsigned MaxCyclicLatency = 0;
1258 // Visit each live out vreg def to find def/use pairs that cross iterations.
Matthias Braun5d458612016-01-20 00:23:26 +00001259 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1260 unsigned Reg = P.RegUnit;
Andrew Trick483f4192013-08-29 18:04:49 +00001261 if (!TRI->isVirtualRegister(Reg))
1262 continue;
1263 const LiveInterval &LI = LIS->getInterval(Reg);
1264 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1265 if (!DefVNI)
1266 continue;
1267
1268 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1269 const SUnit *DefSU = getSUnit(DefMI);
1270 if (!DefSU)
1271 continue;
1272
1273 unsigned LiveOutHeight = DefSU->getHeight();
1274 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1275 // Visit all local users of the vreg def.
Matthias Braunb0c437b2015-10-29 03:57:17 +00001276 for (const VReg2SUnit &V2SU
1277 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1278 SUnit *SU = V2SU.SU;
1279 if (SU == &ExitSU)
Andrew Trick483f4192013-08-29 18:04:49 +00001280 continue;
1281
1282 // Only consider uses of the phi.
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001283 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
Andrew Trick483f4192013-08-29 18:04:49 +00001284 if (!LRQ.valueIn()->isPHIDef())
1285 continue;
1286
1287 // Assume that a path spanning two iterations is a cycle, which could
1288 // overestimate in strange cases. This allows cyclic latency to be
1289 // estimated as the minimum slack of the vreg's depth or height.
1290 unsigned CyclicLatency = 0;
Matthias Braunb0c437b2015-10-29 03:57:17 +00001291 if (LiveOutDepth > SU->getDepth())
1292 CyclicLatency = LiveOutDepth - SU->getDepth();
Andrew Trick483f4192013-08-29 18:04:49 +00001293
Matthias Braunb0c437b2015-10-29 03:57:17 +00001294 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
Andrew Trick483f4192013-08-29 18:04:49 +00001295 if (LiveInHeight > LiveOutHeight) {
1296 if (LiveInHeight - LiveOutHeight < CyclicLatency)
1297 CyclicLatency = LiveInHeight - LiveOutHeight;
Matthias Braunb550b762016-04-21 01:54:13 +00001298 } else
Andrew Trick483f4192013-08-29 18:04:49 +00001299 CyclicLatency = 0;
1300
1301 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
Matthias Braunb0c437b2015-10-29 03:57:17 +00001302 << SU->NodeNum << ") = " << CyclicLatency << "c\n");
Andrew Trick483f4192013-08-29 18:04:49 +00001303 if (CyclicLatency > MaxCyclicLatency)
1304 MaxCyclicLatency = CyclicLatency;
1305 }
1306 }
1307 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1308 return MaxCyclicLatency;
1309}
1310
Krzysztof Parzyszek7ea9a522016-04-28 19:17:44 +00001311/// Release ExitSU predecessors and setup scheduler queues. Re-position
1312/// the Top RP tracker in case the region beginning has changed.
1313void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1314 ArrayRef<SUnit*> BotRoots) {
1315 ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1316 if (ShouldTrackPressure) {
1317 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1318 TopRPTracker.setPos(CurrentTop);
1319 }
1320}
1321
Andrew Trick7a8e1002012-09-11 00:39:15 +00001322/// Move an instruction and update register pressure.
Andrew Trickd7f890e2013-12-28 21:56:47 +00001323void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
Andrew Trick7a8e1002012-09-11 00:39:15 +00001324 // Move the instruction to its new location in the instruction stream.
1325 MachineInstr *MI = SU->getInstr();
Andrew Trick02a80da2012-03-08 01:41:12 +00001326
Andrew Trick7a8e1002012-09-11 00:39:15 +00001327 if (IsTopNode) {
1328 assert(SU->isTopReady() && "node still has unscheduled dependencies");
1329 if (&*CurrentTop == MI)
1330 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
Andrew Trick8823dec2012-03-14 04:00:41 +00001331 else {
Andrew Trick7a8e1002012-09-11 00:39:15 +00001332 moveInstruction(MI, CurrentTop);
1333 TopRPTracker.setPos(MI);
Andrew Trick8823dec2012-03-14 04:00:41 +00001334 }
Andrew Trickc3ea0052012-04-24 18:04:37 +00001335
Andrew Trickb6e74712013-09-04 20:59:59 +00001336 if (ShouldTrackPressure) {
1337 // Update top scheduled pressure.
Matthias Braund4f64092016-01-20 00:23:32 +00001338 RegisterOperands RegOpers;
1339 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1340 if (ShouldTrackLaneMasks) {
1341 // Adjust liveness and add missing dead+read-undef flags.
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001342 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
Matthias Braund4f64092016-01-20 00:23:32 +00001343 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1344 } else {
1345 // Adjust for missing dead-def flags.
1346 RegOpers.detectDeadDefs(*MI, *LIS);
1347 }
1348
1349 TopRPTracker.advance(RegOpers);
Andrew Trickb6e74712013-09-04 20:59:59 +00001350 assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
Matthias Braun9198c672015-11-06 20:59:02 +00001351 DEBUG(
1352 dbgs() << "Top Pressure:\n";
1353 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1354 );
1355
Andrew Trickb248b4a2013-09-06 17:32:47 +00001356 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
Andrew Trickb6e74712013-09-04 20:59:59 +00001357 }
Matthias Braunb550b762016-04-21 01:54:13 +00001358 } else {
Andrew Trick7a8e1002012-09-11 00:39:15 +00001359 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1360 MachineBasicBlock::iterator priorII =
1361 priorNonDebug(CurrentBottom, CurrentTop);
1362 if (&*priorII == MI)
1363 CurrentBottom = priorII;
1364 else {
1365 if (&*CurrentTop == MI) {
1366 CurrentTop = nextIfDebug(++CurrentTop, priorII);
1367 TopRPTracker.setPos(CurrentTop);
1368 }
1369 moveInstruction(MI, CurrentBottom);
1370 CurrentBottom = MI;
1371 }
Andrew Trickb6e74712013-09-04 20:59:59 +00001372 if (ShouldTrackPressure) {
Matthias Braund4f64092016-01-20 00:23:32 +00001373 RegisterOperands RegOpers;
1374 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1375 if (ShouldTrackLaneMasks) {
1376 // Adjust liveness and add missing dead+read-undef flags.
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001377 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
Matthias Braund4f64092016-01-20 00:23:32 +00001378 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1379 } else {
1380 // Adjust for missing dead-def flags.
1381 RegOpers.detectDeadDefs(*MI, *LIS);
1382 }
1383
1384 BotRPTracker.recedeSkipDebugValues();
Matthias Braun5d458612016-01-20 00:23:26 +00001385 SmallVector<RegisterMaskPair, 8> LiveUses;
Matthias Braund4f64092016-01-20 00:23:32 +00001386 BotRPTracker.recede(RegOpers, &LiveUses);
Andrew Trickb6e74712013-09-04 20:59:59 +00001387 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
Matthias Braun9198c672015-11-06 20:59:02 +00001388 DEBUG(
1389 dbgs() << "Bottom Pressure:\n";
1390 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1391 );
1392
Andrew Trickb248b4a2013-09-06 17:32:47 +00001393 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
Andrew Trickb6e74712013-09-04 20:59:59 +00001394 updatePressureDiffs(LiveUses);
Andrew Trickb6e74712013-09-04 20:59:59 +00001395 }
Andrew Trick7a8e1002012-09-11 00:39:15 +00001396 }
1397}
1398
Andrew Trick263280242012-11-12 19:52:20 +00001399//===----------------------------------------------------------------------===//
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001400// BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
Andrew Trick263280242012-11-12 19:52:20 +00001401//===----------------------------------------------------------------------===//
1402
Andrew Tricka7714a02012-11-12 19:40:10 +00001403namespace {
1404/// \brief Post-process the DAG to create cluster edges between neighboring
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001405/// loads or between neighboring stores.
1406class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1407 struct MemOpInfo {
Andrew Tricka7714a02012-11-12 19:40:10 +00001408 SUnit *SU;
1409 unsigned BaseReg;
Chad Rosierc27a18f2016-03-09 16:00:35 +00001410 int64_t Offset;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001411 MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1412 : SU(su), BaseReg(reg), Offset(ofs) {}
Benjamin Kramerb0f74b22014-03-07 21:35:39 +00001413
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001414 bool operator<(const MemOpInfo&RHS) const {
Mandeep Singh Grange82678a2016-10-18 00:11:19 +00001415 return std::tie(BaseReg, Offset, SU->NodeNum) <
1416 std::tie(RHS.BaseReg, RHS.Offset, RHS.SU->NodeNum);
Benjamin Kramerb0f74b22014-03-07 21:35:39 +00001417 }
Andrew Tricka7714a02012-11-12 19:40:10 +00001418 };
Andrew Tricka7714a02012-11-12 19:40:10 +00001419
1420 const TargetInstrInfo *TII;
1421 const TargetRegisterInfo *TRI;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001422 bool IsLoad;
1423
Andrew Tricka7714a02012-11-12 19:40:10 +00001424public:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001425 BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1426 const TargetRegisterInfo *tri, bool IsLoad)
1427 : TII(tii), TRI(tri), IsLoad(IsLoad) {}
Andrew Tricka7714a02012-11-12 19:40:10 +00001428
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001429 void apply(ScheduleDAGInstrs *DAGInstrs) override;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001430
Andrew Tricka7714a02012-11-12 19:40:10 +00001431protected:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001432 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1433};
1434
1435class StoreClusterMutation : public BaseMemOpClusterMutation {
1436public:
1437 StoreClusterMutation(const TargetInstrInfo *tii,
1438 const TargetRegisterInfo *tri)
1439 : BaseMemOpClusterMutation(tii, tri, false) {}
1440};
1441
1442class LoadClusterMutation : public BaseMemOpClusterMutation {
1443public:
1444 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1445 : BaseMemOpClusterMutation(tii, tri, true) {}
Andrew Tricka7714a02012-11-12 19:40:10 +00001446};
Alexander Kornienkof00654e2015-06-23 09:49:53 +00001447} // anonymous
Andrew Tricka7714a02012-11-12 19:40:10 +00001448
Tom Stellard68726a52016-08-19 19:59:18 +00001449namespace llvm {
1450
1451std::unique_ptr<ScheduleDAGMutation>
1452createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1453 const TargetRegisterInfo *TRI) {
1454 return make_unique<LoadClusterMutation>(TII, TRI);
1455}
1456
1457std::unique_ptr<ScheduleDAGMutation>
1458createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1459 const TargetRegisterInfo *TRI) {
1460 return make_unique<StoreClusterMutation>(TII, TRI);
1461}
1462
1463} // namespace llvm
1464
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001465void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1466 ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1467 SmallVector<MemOpInfo, 32> MemOpRecords;
1468 for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1469 SUnit *SU = MemOps[Idx];
Andrew Tricka7714a02012-11-12 19:40:10 +00001470 unsigned BaseReg;
Chad Rosierc27a18f2016-03-09 16:00:35 +00001471 int64_t Offset;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001472 if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001473 MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
Andrew Tricka7714a02012-11-12 19:40:10 +00001474 }
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001475 if (MemOpRecords.size() < 2)
Andrew Tricka7714a02012-11-12 19:40:10 +00001476 return;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001477
1478 std::sort(MemOpRecords.begin(), MemOpRecords.end());
Andrew Tricka7714a02012-11-12 19:40:10 +00001479 unsigned ClusterLength = 1;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001480 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1481 if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
Andrew Tricka7714a02012-11-12 19:40:10 +00001482 ClusterLength = 1;
1483 continue;
1484 }
1485
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001486 SUnit *SUa = MemOpRecords[Idx].SU;
1487 SUnit *SUb = MemOpRecords[Idx+1].SU;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001488 if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
1489 ClusterLength) &&
1490 DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001491 DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
Andrew Tricka7714a02012-11-12 19:40:10 +00001492 << SUb->NodeNum << ")\n");
1493 // Copy successor edges from SUa to SUb. Interleaving computation
1494 // dependent on SUa can prevent load combining due to register reuse.
1495 // Predecessor edges do not need to be copied from SUb to SUa since nearby
1496 // loads should have effectively the same inputs.
1497 for (SUnit::const_succ_iterator
1498 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1499 if (SI->getSUnit() == SUb)
1500 continue;
1501 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1502 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1503 }
1504 ++ClusterLength;
Matthias Braunb550b762016-04-21 01:54:13 +00001505 } else
Andrew Tricka7714a02012-11-12 19:40:10 +00001506 ClusterLength = 1;
1507 }
1508}
1509
1510/// \brief Callback from DAG postProcessing to create cluster edges for loads.
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001511void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1512
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001513 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1514
Andrew Tricka7714a02012-11-12 19:40:10 +00001515 // Map DAG NodeNum to store chain ID.
1516 DenseMap<unsigned, unsigned> StoreChainIDs;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001517 // Map each store chain to a set of dependent MemOps.
Andrew Tricka7714a02012-11-12 19:40:10 +00001518 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1519 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1520 SUnit *SU = &DAG->SUnits[Idx];
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001521 if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1522 (!IsLoad && !SU->getInstr()->mayStore()))
Andrew Tricka7714a02012-11-12 19:40:10 +00001523 continue;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001524
Andrew Tricka7714a02012-11-12 19:40:10 +00001525 unsigned ChainPredID = DAG->SUnits.size();
1526 for (SUnit::const_pred_iterator
1527 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1528 if (PI->isCtrl()) {
1529 ChainPredID = PI->getSUnit()->NodeNum;
1530 break;
1531 }
1532 }
1533 // Check if this chain-like pred has been seen
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001534 // before. ChainPredID==MaxNodeID at the top of the schedule.
Andrew Tricka7714a02012-11-12 19:40:10 +00001535 unsigned NumChains = StoreChainDependents.size();
1536 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1537 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1538 if (Result.second)
1539 StoreChainDependents.resize(NumChains + 1);
1540 StoreChainDependents[Result.first->second].push_back(SU);
1541 }
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001542
Andrew Tricka7714a02012-11-12 19:40:10 +00001543 // Iterate over the store chains.
1544 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001545 clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
Andrew Tricka7714a02012-11-12 19:40:10 +00001546}
1547
Andrew Trick02a80da2012-03-08 01:41:12 +00001548//===----------------------------------------------------------------------===//
Andrew Trick263280242012-11-12 19:52:20 +00001549// MacroFusion - DAG post-processing to encourage fusion of macro ops.
1550//===----------------------------------------------------------------------===//
1551
1552namespace {
1553/// \brief Post-process the DAG to create cluster edges between instructions
1554/// that may be fused by the processor into a single operation.
1555class MacroFusion : public ScheduleDAGMutation {
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001556 const TargetInstrInfo &TII;
Andrew Trick263280242012-11-12 19:52:20 +00001557public:
Matthias Braun325cd2c2016-11-11 01:34:21 +00001558 MacroFusion(const TargetInstrInfo &TII)
1559 : TII(TII) {}
Andrew Trick263280242012-11-12 19:52:20 +00001560
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001561 void apply(ScheduleDAGInstrs *DAGInstrs) override;
Andrew Trick263280242012-11-12 19:52:20 +00001562};
Alexander Kornienkof00654e2015-06-23 09:49:53 +00001563} // anonymous
Andrew Trick263280242012-11-12 19:52:20 +00001564
Tom Stellard68726a52016-08-19 19:59:18 +00001565namespace llvm {
1566
1567std::unique_ptr<ScheduleDAGMutation>
Matthias Braun325cd2c2016-11-11 01:34:21 +00001568createMacroFusionDAGMutation(const TargetInstrInfo *TII) {
1569 return make_unique<MacroFusion>(*TII);
Tom Stellard68726a52016-08-19 19:59:18 +00001570}
1571
1572} // namespace llvm
1573
Andrew Trick263280242012-11-12 19:52:20 +00001574/// \brief Callback from DAG postProcessing to create cluster edges to encourage
1575/// fused operations.
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001576void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1577 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1578
Andrew Trick263280242012-11-12 19:52:20 +00001579 // For now, assume targets can only fuse with the branch.
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001580 SUnit &ExitSU = DAG->ExitSU;
1581 MachineInstr *Branch = ExitSU.getInstr();
Andrew Trick263280242012-11-12 19:52:20 +00001582 if (!Branch)
1583 return;
1584
Matthias Braun325cd2c2016-11-11 01:34:21 +00001585 for (SDep &PredDep : ExitSU.Preds) {
1586 if (PredDep.isWeak())
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001587 continue;
Matthias Braun325cd2c2016-11-11 01:34:21 +00001588 SUnit &SU = *PredDep.getSUnit();
1589 MachineInstr &Pred = *SU.getInstr();
1590 if (!TII.shouldScheduleAdjacent(Pred, *Branch))
Andrew Trick263280242012-11-12 19:52:20 +00001591 continue;
1592
1593 // Create a single weak edge from SU to ExitSU. The only effect is to cause
1594 // bottom-up scheduling to heavily prioritize the clustered SU. There is no
1595 // need to copy predecessor edges from ExitSU to SU, since top-down
1596 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1597 // of SU, we could create an artificial edge from the deepest root, but it
1598 // hasn't been needed yet.
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001599 bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
Andrew Trick263280242012-11-12 19:52:20 +00001600 (void)Success;
1601 assert(Success && "No DAG nodes should be reachable from ExitSU");
1602
Matthias Braun325cd2c2016-11-11 01:34:21 +00001603 // Adjust latency of data deps between the nodes.
1604 for (SDep &PredDep : ExitSU.Preds) {
1605 if (PredDep.getSUnit() == &SU)
1606 PredDep.setLatency(0);
1607 }
1608 for (SDep &SuccDep : SU.Succs) {
1609 if (SuccDep.getSUnit() == &ExitSU)
1610 SuccDep.setLatency(0);
1611 }
1612
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001613 DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
Andrew Trick263280242012-11-12 19:52:20 +00001614 break;
1615 }
1616}
1617
1618//===----------------------------------------------------------------------===//
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001619// CopyConstrain - DAG post-processing to encourage copy elimination.
1620//===----------------------------------------------------------------------===//
1621
1622namespace {
1623/// \brief Post-process the DAG to create weak edges from all uses of a copy to
1624/// the one use that defines the copy's source vreg, most likely an induction
1625/// variable increment.
1626class CopyConstrain : public ScheduleDAGMutation {
1627 // Transient state.
1628 SlotIndex RegionBeginIdx;
Andrew Trick2e875172013-04-24 23:19:56 +00001629 // RegionEndIdx is the slot index of the last non-debug instruction in the
1630 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001631 SlotIndex RegionEndIdx;
1632public:
1633 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1634
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001635 void apply(ScheduleDAGInstrs *DAGInstrs) override;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001636
1637protected:
Andrew Trickd7f890e2013-12-28 21:56:47 +00001638 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001639};
Alexander Kornienkof00654e2015-06-23 09:49:53 +00001640} // anonymous
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001641
Tom Stellard68726a52016-08-19 19:59:18 +00001642namespace llvm {
1643
1644std::unique_ptr<ScheduleDAGMutation>
1645createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1646 const TargetRegisterInfo *TRI) {
1647 return make_unique<CopyConstrain>(TII, TRI);
1648}
1649
1650} // namespace llvm
1651
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001652/// constrainLocalCopy handles two possibilities:
1653/// 1) Local src:
1654/// I0: = dst
1655/// I1: src = ...
1656/// I2: = dst
1657/// I3: dst = src (copy)
1658/// (create pred->succ edges I0->I1, I2->I1)
1659///
1660/// 2) Local copy:
1661/// I0: dst = src (copy)
1662/// I1: = dst
1663/// I2: src = ...
1664/// I3: = dst
1665/// (create pred->succ edges I1->I2, I3->I2)
1666///
1667/// Although the MachineScheduler is currently constrained to single blocks,
1668/// this algorithm should handle extended blocks. An EBB is a set of
1669/// contiguously numbered blocks such that the previous block in the EBB is
1670/// always the single predecessor.
Andrew Trickd7f890e2013-12-28 21:56:47 +00001671void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001672 LiveIntervals *LIS = DAG->getLIS();
1673 MachineInstr *Copy = CopySU->getInstr();
1674
1675 // Check for pure vreg copies.
Matthias Braun7511abd2016-04-04 21:23:46 +00001676 const MachineOperand &SrcOp = Copy->getOperand(1);
1677 unsigned SrcReg = SrcOp.getReg();
1678 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001679 return;
1680
Matthias Braun7511abd2016-04-04 21:23:46 +00001681 const MachineOperand &DstOp = Copy->getOperand(0);
1682 unsigned DstReg = DstOp.getReg();
1683 if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001684 return;
1685
1686 // Check if either the dest or source is local. If it's live across a back
1687 // edge, it's not local. Note that if both vregs are live across the back
1688 // edge, we cannot successfully contrain the copy without cyclic scheduling.
Michael Kuperstein54c61ed2015-01-19 07:30:47 +00001689 // If both the copy's source and dest are local live intervals, then we
1690 // should treat the dest as the global for the purpose of adding
1691 // constraints. This adds edges from source's other uses to the copy.
1692 unsigned LocalReg = SrcReg;
1693 unsigned GlobalReg = DstReg;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001694 LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1695 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
Michael Kuperstein54c61ed2015-01-19 07:30:47 +00001696 LocalReg = DstReg;
1697 GlobalReg = SrcReg;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001698 LocalLI = &LIS->getInterval(LocalReg);
1699 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1700 return;
1701 }
1702 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1703
1704 // Find the global segment after the start of the local LI.
1705 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1706 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1707 // local live range. We could create edges from other global uses to the local
1708 // start, but the coalescer should have already eliminated these cases, so
1709 // don't bother dealing with it.
1710 if (GlobalSegment == GlobalLI->end())
1711 return;
1712
1713 // If GlobalSegment is killed at the LocalLI->start, the call to find()
1714 // returned the next global segment. But if GlobalSegment overlaps with
1715 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1716 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1717 if (GlobalSegment->contains(LocalLI->beginIndex()))
1718 ++GlobalSegment;
1719
1720 if (GlobalSegment == GlobalLI->end())
1721 return;
1722
1723 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1724 if (GlobalSegment != GlobalLI->begin()) {
1725 // Two address defs have no hole.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00001726 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001727 GlobalSegment->start)) {
1728 return;
1729 }
Andrew Trickd9761772013-07-30 19:59:08 +00001730 // If the prior global segment may be defined by the same two-address
1731 // instruction that also defines LocalLI, then can't make a hole here.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00001732 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
Andrew Trickd9761772013-07-30 19:59:08 +00001733 LocalLI->beginIndex())) {
1734 return;
1735 }
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001736 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1737 // it would be a disconnected component in the live range.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00001738 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001739 "Disconnected LRG within the scheduling region.");
1740 }
1741 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1742 if (!GlobalDef)
1743 return;
1744
1745 SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1746 if (!GlobalSU)
1747 return;
1748
1749 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1750 // constraining the uses of the last local def to precede GlobalDef.
1751 SmallVector<SUnit*,8> LocalUses;
1752 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1753 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1754 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1755 for (SUnit::const_succ_iterator
1756 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1757 I != E; ++I) {
1758 if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1759 continue;
1760 if (I->getSUnit() == GlobalSU)
1761 continue;
1762 if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1763 return;
1764 LocalUses.push_back(I->getSUnit());
1765 }
1766 // Open the top of the GlobalLI hole by constraining any earlier global uses
1767 // to precede the start of LocalLI.
1768 SmallVector<SUnit*,8> GlobalUses;
1769 MachineInstr *FirstLocalDef =
1770 LIS->getInstructionFromIndex(LocalLI->beginIndex());
1771 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1772 for (SUnit::const_pred_iterator
1773 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1774 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1775 continue;
1776 if (I->getSUnit() == FirstLocalSU)
1777 continue;
1778 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1779 return;
1780 GlobalUses.push_back(I->getSUnit());
1781 }
1782 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1783 // Add the weak edges.
1784 for (SmallVectorImpl<SUnit*>::const_iterator
1785 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1786 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
1787 << GlobalSU->NodeNum << ")\n");
1788 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1789 }
1790 for (SmallVectorImpl<SUnit*>::const_iterator
1791 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1792 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
1793 << FirstLocalSU->NodeNum << ")\n");
1794 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1795 }
1796}
1797
1798/// \brief Callback from DAG postProcessing to create weak edges to encourage
1799/// copy elimination.
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001800void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1801 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
Andrew Trickd7f890e2013-12-28 21:56:47 +00001802 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1803
Andrew Trick2e875172013-04-24 23:19:56 +00001804 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1805 if (FirstPos == DAG->end())
1806 return;
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001807 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001808 RegionEndIdx = DAG->getLIS()->getInstructionIndex(
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001809 *priorNonDebug(DAG->end(), DAG->begin()));
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001810
1811 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1812 SUnit *SU = &DAG->SUnits[Idx];
1813 if (!SU->getInstr()->isCopy())
1814 continue;
1815
Andrew Trickd7f890e2013-12-28 21:56:47 +00001816 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001817 }
1818}
1819
1820//===----------------------------------------------------------------------===//
Andrew Trickfc127d12013-12-07 05:59:44 +00001821// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1822// and possibly other custom schedulers.
Andrew Trickd14d7c22013-12-28 21:56:57 +00001823//===----------------------------------------------------------------------===//
Andrew Tricke1c034f2012-01-17 06:55:03 +00001824
Andrew Trick5a22df42013-12-05 17:56:02 +00001825static const unsigned InvalidCycle = ~0U;
1826
Andrew Trickfc127d12013-12-07 05:59:44 +00001827SchedBoundary::~SchedBoundary() { delete HazardRec; }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001828
Andrew Trickfc127d12013-12-07 05:59:44 +00001829void SchedBoundary::reset() {
1830 // A new HazardRec is created for each DAG and owned by SchedBoundary.
1831 // Destroying and reconstructing it is very expensive though. So keep
1832 // invalid, placeholder HazardRecs.
1833 if (HazardRec && HazardRec->isEnabled()) {
1834 delete HazardRec;
Craig Topperc0196b12014-04-14 00:51:57 +00001835 HazardRec = nullptr;
Andrew Trickfc127d12013-12-07 05:59:44 +00001836 }
1837 Available.clear();
1838 Pending.clear();
1839 CheckPending = false;
Andrew Trickfc127d12013-12-07 05:59:44 +00001840 CurrCycle = 0;
1841 CurrMOps = 0;
1842 MinReadyCycle = UINT_MAX;
1843 ExpectedLatency = 0;
1844 DependentLatency = 0;
1845 RetiredMOps = 0;
1846 MaxExecutedResCount = 0;
1847 ZoneCritResIdx = 0;
1848 IsResourceLimited = false;
1849 ReservedCycles.clear();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001850#ifndef NDEBUG
Andrew Trickd14d7c22013-12-28 21:56:57 +00001851 // Track the maximum number of stall cycles that could arise either from the
1852 // latency of a DAG edge or the number of cycles that a processor resource is
1853 // reserved (SchedBoundary::ReservedCycles).
Andrew Trick7f1ebbe2014-06-07 01:48:43 +00001854 MaxObservedStall = 0;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001855#endif
Andrew Trickfc127d12013-12-07 05:59:44 +00001856 // Reserve a zero-count for invalid CritResIdx.
1857 ExecutedResCounts.resize(1);
1858 assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1859}
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001860
Andrew Trickfc127d12013-12-07 05:59:44 +00001861void SchedRemainder::
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001862init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1863 reset();
1864 if (!SchedModel->hasInstrSchedModel())
1865 return;
1866 RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1867 for (std::vector<SUnit>::iterator
1868 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1869 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001870 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1871 * SchedModel->getMicroOpFactor();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001872 for (TargetSchedModel::ProcResIter
1873 PI = SchedModel->getWriteProcResBegin(SC),
1874 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1875 unsigned PIdx = PI->ProcResourceIdx;
1876 unsigned Factor = SchedModel->getResourceFactor(PIdx);
1877 RemainingCounts[PIdx] += (Factor * PI->Cycles);
1878 }
1879 }
1880}
1881
Andrew Trickfc127d12013-12-07 05:59:44 +00001882void SchedBoundary::
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001883init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1884 reset();
1885 DAG = dag;
1886 SchedModel = smodel;
1887 Rem = rem;
Andrew Trick5a22df42013-12-05 17:56:02 +00001888 if (SchedModel->hasInstrSchedModel()) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001889 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
Andrew Trick5a22df42013-12-05 17:56:02 +00001890 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1891 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001892}
1893
Andrew Trick880e5732013-12-05 17:55:58 +00001894/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1895/// these "soft stalls" differently than the hard stall cycles based on CPU
1896/// resources and computed by checkHazard(). A fully in-order model
1897/// (MicroOpBufferSize==0) will not make use of this since instructions are not
1898/// available for scheduling until they are ready. However, a weaker in-order
1899/// model may use this for heuristics. For example, if a processor has in-order
1900/// behavior when reading certain resources, this may come into play.
Andrew Trickfc127d12013-12-07 05:59:44 +00001901unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
Andrew Trick880e5732013-12-05 17:55:58 +00001902 if (!SU->isUnbuffered)
1903 return 0;
1904
1905 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1906 if (ReadyCycle > CurrCycle)
1907 return ReadyCycle - CurrCycle;
1908 return 0;
1909}
1910
Andrew Trick5a22df42013-12-05 17:56:02 +00001911/// Compute the next cycle at which the given processor resource can be
1912/// scheduled.
Andrew Trickfc127d12013-12-07 05:59:44 +00001913unsigned SchedBoundary::
Andrew Trick5a22df42013-12-05 17:56:02 +00001914getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1915 unsigned NextUnreserved = ReservedCycles[PIdx];
1916 // If this resource has never been used, always return cycle zero.
1917 if (NextUnreserved == InvalidCycle)
1918 return 0;
1919 // For bottom-up scheduling add the cycles needed for the current operation.
1920 if (!isTop())
1921 NextUnreserved += Cycles;
1922 return NextUnreserved;
1923}
1924
Andrew Trick8c9e6722012-06-29 03:23:24 +00001925/// Does this SU have a hazard within the current instruction group.
1926///
1927/// The scheduler supports two modes of hazard recognition. The first is the
1928/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1929/// supports highly complicated in-order reservation tables
1930/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1931///
1932/// The second is a streamlined mechanism that checks for hazards based on
1933/// simple counters that the scheduler itself maintains. It explicitly checks
1934/// for instruction dispatch limitations, including the number of micro-ops that
1935/// can dispatch per cycle.
1936///
1937/// TODO: Also check whether the SU must start a new group.
Andrew Trickfc127d12013-12-07 05:59:44 +00001938bool SchedBoundary::checkHazard(SUnit *SU) {
Andrew Trickd14d7c22013-12-28 21:56:57 +00001939 if (HazardRec->isEnabled()
1940 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1941 return true;
1942 }
Andrew Trickdd79f0f2012-10-10 05:43:09 +00001943 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
Andrew Tricke2ff5752013-06-15 04:49:49 +00001944 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001945 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
1946 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
Andrew Trick8c9e6722012-06-29 03:23:24 +00001947 return true;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001948 }
Andrew Trick5a22df42013-12-05 17:56:02 +00001949 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1950 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1951 for (TargetSchedModel::ProcResIter
1952 PI = SchedModel->getWriteProcResBegin(SC),
1953 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
Andrew Trick56327222014-06-27 04:57:05 +00001954 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1955 if (NRCycle > CurrCycle) {
Andrew Trick040c0da2014-06-27 05:09:36 +00001956#ifndef NDEBUG
Chad Rosieraba845e2014-07-02 16:46:08 +00001957 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
Andrew Trick040c0da2014-06-27 05:09:36 +00001958#endif
Andrew Trick56327222014-06-27 04:57:05 +00001959 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
1960 << SchedModel->getResourceName(PI->ProcResourceIdx)
1961 << "=" << NRCycle << "c\n");
Andrew Trick5a22df42013-12-05 17:56:02 +00001962 return true;
Andrew Trick56327222014-06-27 04:57:05 +00001963 }
Andrew Trick5a22df42013-12-05 17:56:02 +00001964 }
1965 }
Andrew Trick8c9e6722012-06-29 03:23:24 +00001966 return false;
1967}
1968
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001969// Find the unscheduled node in ReadySUs with the highest latency.
Andrew Trickfc127d12013-12-07 05:59:44 +00001970unsigned SchedBoundary::
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001971findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
Craig Topperc0196b12014-04-14 00:51:57 +00001972 SUnit *LateSU = nullptr;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001973 unsigned RemLatency = 0;
1974 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001975 I != E; ++I) {
1976 unsigned L = getUnscheduledLatency(*I);
Andrew Trickf5b8ef22013-06-15 04:49:44 +00001977 if (L > RemLatency) {
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001978 RemLatency = L;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001979 LateSU = *I;
Andrew Trickf5b8ef22013-06-15 04:49:44 +00001980 }
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001981 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001982 if (LateSU) {
1983 DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1984 << LateSU->NodeNum << ") " << RemLatency << "c\n");
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001985 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001986 return RemLatency;
1987}
Andrew Trickf5b8ef22013-06-15 04:49:44 +00001988
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001989// Count resources in this zone and the remaining unscheduled
1990// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1991// resource index, or zero if the zone is issue limited.
Andrew Trickfc127d12013-12-07 05:59:44 +00001992unsigned SchedBoundary::
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001993getOtherResourceCount(unsigned &OtherCritIdx) {
Alexey Samsonov64c391d2013-07-19 08:55:18 +00001994 OtherCritIdx = 0;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001995 if (!SchedModel->hasInstrSchedModel())
1996 return 0;
1997
1998 unsigned OtherCritCount = Rem->RemIssueCount
1999 + (RetiredMOps * SchedModel->getMicroOpFactor());
2000 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
2001 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002002 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
2003 PIdx != PEnd; ++PIdx) {
2004 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
2005 if (OtherCount > OtherCritCount) {
2006 OtherCritCount = OtherCount;
2007 OtherCritIdx = PIdx;
2008 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002009 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002010 if (OtherCritIdx) {
2011 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
2012 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
Andrew Trickfc127d12013-12-07 05:59:44 +00002013 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002014 }
2015 return OtherCritCount;
2016}
2017
Andrew Trickfc127d12013-12-07 05:59:44 +00002018void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
Andrew Trick7f1ebbe2014-06-07 01:48:43 +00002019 assert(SU->getInstr() && "Scheduled SUnit must have instr");
2020
2021#ifndef NDEBUG
Andrew Trick491e34a2014-06-12 22:36:28 +00002022 // ReadyCycle was been bumped up to the CurrCycle when this node was
2023 // scheduled, but CurrCycle may have been eagerly advanced immediately after
2024 // scheduling, so may now be greater than ReadyCycle.
2025 if (ReadyCycle > CurrCycle)
2026 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
Andrew Trick7f1ebbe2014-06-07 01:48:43 +00002027#endif
2028
Andrew Trick61f1a272012-05-24 22:11:09 +00002029 if (ReadyCycle < MinReadyCycle)
2030 MinReadyCycle = ReadyCycle;
2031
2032 // Check for interlocks first. For the purpose of other heuristics, an
2033 // instruction that cannot issue appears as if it's not in the ReadyQueue.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002034 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
Matthias Braun6493bc22016-04-22 19:09:17 +00002035 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
2036 Available.size() >= ReadyListLimit)
Andrew Trick61f1a272012-05-24 22:11:09 +00002037 Pending.push(SU);
2038 else
2039 Available.push(SU);
2040}
2041
2042/// Move the boundary of scheduled code by one cycle.
Andrew Trickfc127d12013-12-07 05:59:44 +00002043void SchedBoundary::bumpCycle(unsigned NextCycle) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002044 if (SchedModel->getMicroOpBufferSize() == 0) {
2045 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
2046 if (MinReadyCycle > NextCycle)
2047 NextCycle = MinReadyCycle;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002048 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002049 // Update the current micro-ops, which will issue in the next cycle.
2050 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2051 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2052
2053 // Decrement DependentLatency based on the next cycle.
Andrew Trickf5b8ef22013-06-15 04:49:44 +00002054 if ((NextCycle - CurrCycle) > DependentLatency)
2055 DependentLatency = 0;
2056 else
2057 DependentLatency -= (NextCycle - CurrCycle);
Andrew Trick61f1a272012-05-24 22:11:09 +00002058
2059 if (!HazardRec->isEnabled()) {
Andrew Trick45446062012-06-05 21:11:27 +00002060 // Bypass HazardRec virtual calls.
Andrew Trick61f1a272012-05-24 22:11:09 +00002061 CurrCycle = NextCycle;
Matthias Braunb550b762016-04-21 01:54:13 +00002062 } else {
Andrew Trick45446062012-06-05 21:11:27 +00002063 // Bypass getHazardType calls in case of long latency.
Andrew Trick61f1a272012-05-24 22:11:09 +00002064 for (; CurrCycle != NextCycle; ++CurrCycle) {
2065 if (isTop())
2066 HazardRec->AdvanceCycle();
2067 else
2068 HazardRec->RecedeCycle();
2069 }
2070 }
2071 CheckPending = true;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002072 unsigned LFactor = SchedModel->getLatencyFactor();
2073 IsResourceLimited =
2074 (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2075 > (int)LFactor;
Andrew Trick61f1a272012-05-24 22:11:09 +00002076
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002077 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2078}
2079
Andrew Trickfc127d12013-12-07 05:59:44 +00002080void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002081 ExecutedResCounts[PIdx] += Count;
2082 if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2083 MaxExecutedResCount = ExecutedResCounts[PIdx];
Andrew Trick61f1a272012-05-24 22:11:09 +00002084}
2085
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002086/// Add the given processor resource to this scheduled zone.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002087///
2088/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2089/// during which this resource is consumed.
2090///
2091/// \return the next cycle at which the instruction may execute without
2092/// oversubscribing resources.
Andrew Trickfc127d12013-12-07 05:59:44 +00002093unsigned SchedBoundary::
Andrew Trick5a22df42013-12-05 17:56:02 +00002094countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002095 unsigned Factor = SchedModel->getResourceFactor(PIdx);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002096 unsigned Count = Factor * Cycles;
Andrew Trickfc127d12013-12-07 05:59:44 +00002097 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002098 << " +" << Cycles << "x" << Factor << "u\n");
2099
2100 // Update Executed resources counts.
2101 incExecutedResources(PIdx, Count);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002102 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2103 Rem->RemainingCounts[PIdx] -= Count;
2104
Andrew Trickb13ef172013-07-19 00:20:07 +00002105 // Check if this resource exceeds the current critical resource. If so, it
2106 // becomes the critical resource.
2107 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002108 ZoneCritResIdx = PIdx;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002109 DEBUG(dbgs() << " *** Critical resource "
Andrew Trickfc127d12013-12-07 05:59:44 +00002110 << SchedModel->getResourceName(PIdx) << ": "
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002111 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002112 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002113 // For reserved resources, record the highest cycle using the resource.
2114 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2115 if (NextAvailable > CurrCycle) {
2116 DEBUG(dbgs() << " Resource conflict: "
2117 << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2118 << NextAvailable << "\n");
2119 }
2120 return NextAvailable;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002121}
2122
Andrew Trick45446062012-06-05 21:11:27 +00002123/// Move the boundary of scheduled code by one SUnit.
Andrew Trickfc127d12013-12-07 05:59:44 +00002124void SchedBoundary::bumpNode(SUnit *SU) {
Andrew Trick45446062012-06-05 21:11:27 +00002125 // Update the reservation table.
2126 if (HazardRec->isEnabled()) {
2127 if (!isTop() && SU->isCall) {
2128 // Calls are scheduled with their preceding instructions. For bottom-up
2129 // scheduling, clear the pipeline state before emitting.
2130 HazardRec->Reset();
2131 }
2132 HazardRec->EmitInstruction(SU);
2133 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002134 // checkHazard should prevent scheduling multiple instructions per cycle that
2135 // exceed the issue width.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002136 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2137 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
Daniel Jasper0d92abd2013-12-06 08:58:22 +00002138 assert(
2139 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
Andrew Trickf7760a22013-12-06 17:19:20 +00002140 "Cannot schedule this instruction's MicroOps in the current cycle.");
Andrew Trick5a22df42013-12-05 17:56:02 +00002141
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002142 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2143 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
2144
Andrew Trick5a22df42013-12-05 17:56:02 +00002145 unsigned NextCycle = CurrCycle;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002146 switch (SchedModel->getMicroOpBufferSize()) {
2147 case 0:
2148 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2149 break;
2150 case 1:
2151 if (ReadyCycle > NextCycle) {
2152 NextCycle = ReadyCycle;
2153 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
2154 }
2155 break;
2156 default:
2157 // We don't currently model the OOO reorder buffer, so consider all
Andrew Trick880e5732013-12-05 17:55:58 +00002158 // scheduled MOps to be "retired". We do loosely model in-order resource
2159 // latency. If this instruction uses an in-order resource, account for any
2160 // likely stall cycles.
2161 if (SU->isUnbuffered && ReadyCycle > NextCycle)
2162 NextCycle = ReadyCycle;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002163 break;
2164 }
2165 RetiredMOps += IncMOps;
2166
2167 // Update resource counts and critical resource.
2168 if (SchedModel->hasInstrSchedModel()) {
2169 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2170 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2171 Rem->RemIssueCount -= DecRemIssue;
2172 if (ZoneCritResIdx) {
2173 // Scale scheduled micro-ops for comparing with the critical resource.
2174 unsigned ScaledMOps =
2175 RetiredMOps * SchedModel->getMicroOpFactor();
2176
2177 // If scaled micro-ops are now more than the previous critical resource by
2178 // a full cycle, then micro-ops issue becomes critical.
2179 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2180 >= (int)SchedModel->getLatencyFactor()) {
2181 ZoneCritResIdx = 0;
2182 DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2183 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2184 }
2185 }
2186 for (TargetSchedModel::ProcResIter
2187 PI = SchedModel->getWriteProcResBegin(SC),
2188 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2189 unsigned RCycle =
Andrew Trick5a22df42013-12-05 17:56:02 +00002190 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002191 if (RCycle > NextCycle)
2192 NextCycle = RCycle;
2193 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002194 if (SU->hasReservedResource) {
2195 // For reserved resources, record the highest cycle using the resource.
2196 // For top-down scheduling, this is the cycle in which we schedule this
2197 // instruction plus the number of cycles the operations reserves the
2198 // resource. For bottom-up is it simply the instruction's cycle.
2199 for (TargetSchedModel::ProcResIter
2200 PI = SchedModel->getWriteProcResBegin(SC),
2201 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2202 unsigned PIdx = PI->ProcResourceIdx;
Andrew Trickd14d7c22013-12-28 21:56:57 +00002203 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
Chad Rosieraba845e2014-07-02 16:46:08 +00002204 if (isTop()) {
2205 ReservedCycles[PIdx] =
2206 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2207 }
2208 else
2209 ReservedCycles[PIdx] = NextCycle;
Andrew Trickd14d7c22013-12-28 21:56:57 +00002210 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002211 }
2212 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002213 }
2214 // Update ExpectedLatency and DependentLatency.
2215 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2216 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2217 if (SU->getDepth() > TopLatency) {
2218 TopLatency = SU->getDepth();
2219 DEBUG(dbgs() << " " << Available.getName()
2220 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2221 }
2222 if (SU->getHeight() > BotLatency) {
2223 BotLatency = SU->getHeight();
2224 DEBUG(dbgs() << " " << Available.getName()
2225 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2226 }
2227 // If we stall for any reason, bump the cycle.
2228 if (NextCycle > CurrCycle) {
2229 bumpCycle(NextCycle);
Matthias Braunb550b762016-04-21 01:54:13 +00002230 } else {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002231 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
Alp Tokercb402912014-01-24 17:20:08 +00002232 // resource limited. If a stall occurred, bumpCycle does this.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002233 unsigned LFactor = SchedModel->getLatencyFactor();
2234 IsResourceLimited =
2235 (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2236 > (int)LFactor;
2237 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002238 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2239 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2240 // one cycle. Since we commonly reach the max MOps here, opportunistically
2241 // bump the cycle to avoid uselessly checking everything in the readyQ.
2242 CurrMOps += IncMOps;
2243 while (CurrMOps >= SchedModel->getIssueWidth()) {
Andrew Trick5a22df42013-12-05 17:56:02 +00002244 DEBUG(dbgs() << " *** Max MOps " << CurrMOps
2245 << " at cycle " << CurrCycle << '\n');
Andrew Trickd14d7c22013-12-28 21:56:57 +00002246 bumpCycle(++NextCycle);
Andrew Trick5a22df42013-12-05 17:56:02 +00002247 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002248 DEBUG(dumpScheduledState());
Andrew Trick45446062012-06-05 21:11:27 +00002249}
2250
Andrew Trick61f1a272012-05-24 22:11:09 +00002251/// Release pending ready nodes in to the available queue. This makes them
2252/// visible to heuristics.
Andrew Trickfc127d12013-12-07 05:59:44 +00002253void SchedBoundary::releasePending() {
Andrew Trick61f1a272012-05-24 22:11:09 +00002254 // If the available queue is empty, it is safe to reset MinReadyCycle.
2255 if (Available.empty())
2256 MinReadyCycle = UINT_MAX;
2257
2258 // Check to see if any of the pending instructions are ready to issue. If
2259 // so, add them to the available queue.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002260 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
Andrew Trick61f1a272012-05-24 22:11:09 +00002261 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2262 SUnit *SU = *(Pending.begin()+i);
Andrew Trick45446062012-06-05 21:11:27 +00002263 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
Andrew Trick61f1a272012-05-24 22:11:09 +00002264
2265 if (ReadyCycle < MinReadyCycle)
2266 MinReadyCycle = ReadyCycle;
2267
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002268 if (!IsBuffered && ReadyCycle > CurrCycle)
Andrew Trick61f1a272012-05-24 22:11:09 +00002269 continue;
2270
Andrew Trick8c9e6722012-06-29 03:23:24 +00002271 if (checkHazard(SU))
Andrew Trick61f1a272012-05-24 22:11:09 +00002272 continue;
2273
Matthias Braun6493bc22016-04-22 19:09:17 +00002274 if (Available.size() >= ReadyListLimit)
2275 break;
2276
Andrew Trick61f1a272012-05-24 22:11:09 +00002277 Available.push(SU);
2278 Pending.remove(Pending.begin()+i);
2279 --i; --e;
2280 }
2281 CheckPending = false;
2282}
2283
2284/// Remove SU from the ready set for this boundary.
Andrew Trickfc127d12013-12-07 05:59:44 +00002285void SchedBoundary::removeReady(SUnit *SU) {
Andrew Trick61f1a272012-05-24 22:11:09 +00002286 if (Available.isInQueue(SU))
2287 Available.remove(Available.find(SU));
2288 else {
2289 assert(Pending.isInQueue(SU) && "bad ready count");
2290 Pending.remove(Pending.find(SU));
2291 }
2292}
2293
2294/// If this queue only has one ready candidate, return it. As a side effect,
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002295/// defer any nodes that now hit a hazard, and advance the cycle until at least
2296/// one node is ready. If multiple instructions are ready, return NULL.
Andrew Trickfc127d12013-12-07 05:59:44 +00002297SUnit *SchedBoundary::pickOnlyChoice() {
Andrew Trick61f1a272012-05-24 22:11:09 +00002298 if (CheckPending)
2299 releasePending();
2300
Andrew Tricke2ff5752013-06-15 04:49:49 +00002301 if (CurrMOps > 0) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002302 // Defer any ready instrs that now have a hazard.
2303 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2304 if (checkHazard(*I)) {
2305 Pending.push(*I);
2306 I = Available.remove(I);
2307 continue;
2308 }
2309 ++I;
2310 }
2311 }
Andrew Trick61f1a272012-05-24 22:11:09 +00002312 for (unsigned i = 0; Available.empty(); ++i) {
Chad Rosieraba845e2014-07-02 16:46:08 +00002313// FIXME: Re-enable assert once PR20057 is resolved.
2314// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2315// "permanent hazard");
2316 (void)i;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002317 bumpCycle(CurrCycle + 1);
Andrew Trick61f1a272012-05-24 22:11:09 +00002318 releasePending();
2319 }
Matthias Braund29d31e2016-06-23 21:27:38 +00002320
2321 DEBUG(Pending.dump());
2322 DEBUG(Available.dump());
2323
Andrew Trick61f1a272012-05-24 22:11:09 +00002324 if (Available.size() == 1)
2325 return *Available.begin();
Craig Topperc0196b12014-04-14 00:51:57 +00002326 return nullptr;
Andrew Trick61f1a272012-05-24 22:11:09 +00002327}
2328
Andrew Trick8e8415f2013-06-15 05:46:47 +00002329#ifndef NDEBUG
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002330// This is useful information to dump after bumpNode.
2331// Note that the Queue contents are more useful before pickNodeFromQueue.
Andrew Trickfc127d12013-12-07 05:59:44 +00002332void SchedBoundary::dumpScheduledState() {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002333 unsigned ResFactor;
2334 unsigned ResCount;
2335 if (ZoneCritResIdx) {
2336 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2337 ResCount = getResourceCount(ZoneCritResIdx);
Matthias Braunb550b762016-04-21 01:54:13 +00002338 } else {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002339 ResFactor = SchedModel->getMicroOpFactor();
2340 ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002341 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002342 unsigned LFactor = SchedModel->getLatencyFactor();
2343 dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2344 << " Retired: " << RetiredMOps;
2345 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
2346 dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
Andrew Trickfc127d12013-12-07 05:59:44 +00002347 << ResCount / ResFactor << " "
2348 << SchedModel->getResourceName(ZoneCritResIdx)
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002349 << "\n ExpectedLatency: " << ExpectedLatency << "c\n"
2350 << (IsResourceLimited ? " - Resource" : " - Latency")
2351 << " limited.\n";
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002352}
Andrew Trick8e8415f2013-06-15 05:46:47 +00002353#endif
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002354
Andrew Trickfc127d12013-12-07 05:59:44 +00002355//===----------------------------------------------------------------------===//
Andrew Trickd14d7c22013-12-28 21:56:57 +00002356// GenericScheduler - Generic implementation of MachineSchedStrategy.
Andrew Trickfc127d12013-12-07 05:59:44 +00002357//===----------------------------------------------------------------------===//
2358
Andrew Trickd14d7c22013-12-28 21:56:57 +00002359void GenericSchedulerBase::SchedCandidate::
2360initResourceDelta(const ScheduleDAGMI *DAG,
2361 const TargetSchedModel *SchedModel) {
2362 if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2363 return;
2364
2365 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2366 for (TargetSchedModel::ProcResIter
2367 PI = SchedModel->getWriteProcResBegin(SC),
2368 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2369 if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2370 ResDelta.CritResources += PI->Cycles;
2371 if (PI->ProcResourceIdx == Policy.DemandResIdx)
2372 ResDelta.DemandedResources += PI->Cycles;
2373 }
2374}
2375
2376/// Set the CandPolicy given a scheduling zone given the current resources and
2377/// latencies inside and outside the zone.
Matthias Braunb550b762016-04-21 01:54:13 +00002378void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
Andrew Trickd14d7c22013-12-28 21:56:57 +00002379 SchedBoundary &CurrZone,
2380 SchedBoundary *OtherZone) {
Eric Christopher572e03a2015-06-19 01:53:21 +00002381 // Apply preemptive heuristics based on the total latency and resources
Andrew Trickd14d7c22013-12-28 21:56:57 +00002382 // inside and outside this zone. Potential stalls should be considered before
2383 // following this policy.
2384
2385 // Compute remaining latency. We need this both to determine whether the
2386 // overall schedule has become latency-limited and whether the instructions
2387 // outside this zone are resource or latency limited.
2388 //
2389 // The "dependent" latency is updated incrementally during scheduling as the
2390 // max height/depth of scheduled nodes minus the cycles since it was
2391 // scheduled:
2392 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2393 //
2394 // The "independent" latency is the max ready queue depth:
2395 // ILat = max N.depth for N in Available|Pending
2396 //
2397 // RemainingLatency is the greater of independent and dependent latency.
2398 unsigned RemLatency = CurrZone.getDependentLatency();
2399 RemLatency = std::max(RemLatency,
2400 CurrZone.findMaxLatency(CurrZone.Available.elements()));
2401 RemLatency = std::max(RemLatency,
2402 CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2403
2404 // Compute the critical resource outside the zone.
Andrew Trick7afe4812013-12-28 22:25:57 +00002405 unsigned OtherCritIdx = 0;
Andrew Trickd14d7c22013-12-28 21:56:57 +00002406 unsigned OtherCount =
2407 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2408
2409 bool OtherResLimited = false;
2410 if (SchedModel->hasInstrSchedModel()) {
2411 unsigned LFactor = SchedModel->getLatencyFactor();
2412 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2413 }
2414 // Schedule aggressively for latency in PostRA mode. We don't check for
2415 // acyclic latency during PostRA, and highly out-of-order processors will
2416 // skip PostRA scheduling.
2417 if (!OtherResLimited) {
2418 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2419 Policy.ReduceLatency |= true;
2420 DEBUG(dbgs() << " " << CurrZone.Available.getName()
2421 << " RemainingLatency " << RemLatency << " + "
2422 << CurrZone.getCurrCycle() << "c > CritPath "
2423 << Rem.CriticalPath << "\n");
2424 }
2425 }
2426 // If the same resource is limiting inside and outside the zone, do nothing.
2427 if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2428 return;
2429
2430 DEBUG(
2431 if (CurrZone.isResourceLimited()) {
2432 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
2433 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2434 << "\n";
2435 }
2436 if (OtherResLimited)
2437 dbgs() << " RemainingLimit: "
2438 << SchedModel->getResourceName(OtherCritIdx) << "\n";
2439 if (!CurrZone.isResourceLimited() && !OtherResLimited)
2440 dbgs() << " Latency limited both directions.\n");
2441
2442 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2443 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2444
2445 if (OtherResLimited)
2446 Policy.DemandResIdx = OtherCritIdx;
2447}
2448
2449#ifndef NDEBUG
2450const char *GenericSchedulerBase::getReasonStr(
2451 GenericSchedulerBase::CandReason Reason) {
2452 switch (Reason) {
2453 case NoCand: return "NOCAND ";
Matthias Braun49cb6e92016-05-27 22:14:26 +00002454 case Only1: return "ONLY1 ";
2455 case PhysRegCopy: return "PREG-COPY ";
Andrew Trickd14d7c22013-12-28 21:56:57 +00002456 case RegExcess: return "REG-EXCESS";
2457 case RegCritical: return "REG-CRIT ";
2458 case Stall: return "STALL ";
2459 case Cluster: return "CLUSTER ";
2460 case Weak: return "WEAK ";
2461 case RegMax: return "REG-MAX ";
2462 case ResourceReduce: return "RES-REDUCE";
2463 case ResourceDemand: return "RES-DEMAND";
2464 case TopDepthReduce: return "TOP-DEPTH ";
2465 case TopPathReduce: return "TOP-PATH ";
2466 case BotHeightReduce:return "BOT-HEIGHT";
2467 case BotPathReduce: return "BOT-PATH ";
2468 case NextDefUse: return "DEF-USE ";
2469 case NodeOrder: return "ORDER ";
2470 };
2471 llvm_unreachable("Unknown reason!");
2472}
2473
2474void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2475 PressureChange P;
2476 unsigned ResIdx = 0;
2477 unsigned Latency = 0;
2478 switch (Cand.Reason) {
2479 default:
2480 break;
2481 case RegExcess:
2482 P = Cand.RPDelta.Excess;
2483 break;
2484 case RegCritical:
2485 P = Cand.RPDelta.CriticalMax;
2486 break;
2487 case RegMax:
2488 P = Cand.RPDelta.CurrentMax;
2489 break;
2490 case ResourceReduce:
2491 ResIdx = Cand.Policy.ReduceResIdx;
2492 break;
2493 case ResourceDemand:
2494 ResIdx = Cand.Policy.DemandResIdx;
2495 break;
2496 case TopDepthReduce:
2497 Latency = Cand.SU->getDepth();
2498 break;
2499 case TopPathReduce:
2500 Latency = Cand.SU->getHeight();
2501 break;
2502 case BotHeightReduce:
2503 Latency = Cand.SU->getHeight();
2504 break;
2505 case BotPathReduce:
2506 Latency = Cand.SU->getDepth();
2507 break;
2508 }
James Y Knighte72b0db2015-09-18 18:52:20 +00002509 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
Andrew Trickd14d7c22013-12-28 21:56:57 +00002510 if (P.isValid())
2511 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2512 << ":" << P.getUnitInc() << " ";
2513 else
2514 dbgs() << " ";
2515 if (ResIdx)
2516 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2517 else
2518 dbgs() << " ";
2519 if (Latency)
2520 dbgs() << " " << Latency << " cycles ";
2521 else
2522 dbgs() << " ";
2523 dbgs() << '\n';
2524}
2525#endif
2526
2527/// Return true if this heuristic determines order.
2528static bool tryLess(int TryVal, int CandVal,
2529 GenericSchedulerBase::SchedCandidate &TryCand,
2530 GenericSchedulerBase::SchedCandidate &Cand,
2531 GenericSchedulerBase::CandReason Reason) {
2532 if (TryVal < CandVal) {
2533 TryCand.Reason = Reason;
2534 return true;
2535 }
2536 if (TryVal > CandVal) {
2537 if (Cand.Reason > Reason)
2538 Cand.Reason = Reason;
2539 return true;
2540 }
Andrew Trickd14d7c22013-12-28 21:56:57 +00002541 return false;
2542}
2543
2544static bool tryGreater(int TryVal, int CandVal,
2545 GenericSchedulerBase::SchedCandidate &TryCand,
2546 GenericSchedulerBase::SchedCandidate &Cand,
2547 GenericSchedulerBase::CandReason Reason) {
2548 if (TryVal > CandVal) {
2549 TryCand.Reason = Reason;
2550 return true;
2551 }
2552 if (TryVal < CandVal) {
2553 if (Cand.Reason > Reason)
2554 Cand.Reason = Reason;
2555 return true;
2556 }
Andrew Trickd14d7c22013-12-28 21:56:57 +00002557 return false;
2558}
2559
2560static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2561 GenericSchedulerBase::SchedCandidate &Cand,
2562 SchedBoundary &Zone) {
2563 if (Zone.isTop()) {
2564 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2565 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2566 TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2567 return true;
2568 }
2569 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2570 TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2571 return true;
Matthias Braunb550b762016-04-21 01:54:13 +00002572 } else {
Andrew Trickd14d7c22013-12-28 21:56:57 +00002573 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2574 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2575 TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2576 return true;
2577 }
2578 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2579 TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2580 return true;
2581 }
2582 return false;
2583}
2584
Matthias Braun49cb6e92016-05-27 22:14:26 +00002585static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2586 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2587 << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2588}
2589
Matthias Braun6ad3d052016-06-25 00:23:00 +00002590static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2591 tracePick(Cand.Reason, Cand.AtTop);
Andrew Trickd14d7c22013-12-28 21:56:57 +00002592}
2593
Andrew Trickfc127d12013-12-07 05:59:44 +00002594void GenericScheduler::initialize(ScheduleDAGMI *dag) {
Andrew Trickd7f890e2013-12-28 21:56:47 +00002595 assert(dag->hasVRegLiveness() &&
2596 "(PreRA)GenericScheduler needs vreg liveness");
2597 DAG = static_cast<ScheduleDAGMILive*>(dag);
Andrew Trickfc127d12013-12-07 05:59:44 +00002598 SchedModel = DAG->getSchedModel();
2599 TRI = DAG->TRI;
2600
2601 Rem.init(DAG, SchedModel);
2602 Top.init(DAG, SchedModel, &Rem);
2603 Bot.init(DAG, SchedModel, &Rem);
2604
2605 // Initialize resource counts.
2606
2607 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2608 // are disabled, then these HazardRecs will be disabled.
2609 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
Andrew Trickfc127d12013-12-07 05:59:44 +00002610 if (!Top.HazardRec) {
2611 Top.HazardRec =
Eric Christopher99556d72014-10-14 06:56:25 +00002612 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Eric Christopherd9134482014-08-04 21:25:23 +00002613 Itin, DAG);
Andrew Trickfc127d12013-12-07 05:59:44 +00002614 }
2615 if (!Bot.HazardRec) {
2616 Bot.HazardRec =
Eric Christopher99556d72014-10-14 06:56:25 +00002617 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Eric Christopherd9134482014-08-04 21:25:23 +00002618 Itin, DAG);
Andrew Trickfc127d12013-12-07 05:59:44 +00002619 }
Matthias Brauncc676c42016-06-25 02:03:36 +00002620 TopCand.SU = nullptr;
2621 BotCand.SU = nullptr;
Andrew Trickfc127d12013-12-07 05:59:44 +00002622}
2623
2624/// Initialize the per-region scheduling policy.
2625void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2626 MachineBasicBlock::iterator End,
2627 unsigned NumRegionInstrs) {
Eric Christopher99556d72014-10-14 06:56:25 +00002628 const MachineFunction &MF = *Begin->getParent()->getParent();
2629 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
Andrew Trickfc127d12013-12-07 05:59:44 +00002630
2631 // Avoid setting up the register pressure tracker for small regions to save
2632 // compile time. As a rough heuristic, only track pressure when the number of
2633 // schedulable instructions exceeds half the integer register file.
Andrew Trick350ff2c2014-01-21 21:27:37 +00002634 RegionPolicy.ShouldTrackPressure = true;
Andrew Trick46753512014-01-22 03:38:55 +00002635 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2636 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2637 if (TLI->isTypeLegal(LegalIntVT)) {
Andrew Trick350ff2c2014-01-21 21:27:37 +00002638 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
Andrew Trick46753512014-01-22 03:38:55 +00002639 TLI->getRegClassFor(LegalIntVT));
Andrew Trick350ff2c2014-01-21 21:27:37 +00002640 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2641 }
2642 }
Andrew Trickfc127d12013-12-07 05:59:44 +00002643
2644 // For generic targets, we default to bottom-up, because it's simpler and more
2645 // compile-time optimizations have been implemented in that direction.
2646 RegionPolicy.OnlyBottomUp = true;
2647
2648 // Allow the subtarget to override default policy.
Duncan P. N. Exon Smith63298722016-07-01 00:23:27 +00002649 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
Andrew Trickfc127d12013-12-07 05:59:44 +00002650
2651 // After subtarget overrides, apply command line options.
2652 if (!EnableRegPressure)
2653 RegionPolicy.ShouldTrackPressure = false;
2654
2655 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2656 // e.g. -misched-bottomup=false allows scheduling in both directions.
2657 assert((!ForceTopDown || !ForceBottomUp) &&
2658 "-misched-topdown incompatible with -misched-bottomup");
2659 if (ForceBottomUp.getNumOccurrences() > 0) {
2660 RegionPolicy.OnlyBottomUp = ForceBottomUp;
2661 if (RegionPolicy.OnlyBottomUp)
2662 RegionPolicy.OnlyTopDown = false;
2663 }
2664 if (ForceTopDown.getNumOccurrences() > 0) {
2665 RegionPolicy.OnlyTopDown = ForceTopDown;
2666 if (RegionPolicy.OnlyTopDown)
2667 RegionPolicy.OnlyBottomUp = false;
2668 }
2669}
2670
James Y Knighte72b0db2015-09-18 18:52:20 +00002671void GenericScheduler::dumpPolicy() {
2672 dbgs() << "GenericScheduler RegionPolicy: "
2673 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2674 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2675 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2676 << "\n";
2677}
2678
Andrew Trickfc127d12013-12-07 05:59:44 +00002679/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2680/// critical path by more cycles than it takes to drain the instruction buffer.
2681/// We estimate an upper bounds on in-flight instructions as:
2682///
2683/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2684/// InFlightIterations = AcyclicPath / CyclesPerIteration
2685/// InFlightResources = InFlightIterations * LoopResources
2686///
2687/// TODO: Check execution resources in addition to IssueCount.
2688void GenericScheduler::checkAcyclicLatency() {
2689 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2690 return;
2691
2692 // Scaled number of cycles per loop iteration.
2693 unsigned IterCount =
2694 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2695 Rem.RemIssueCount);
2696 // Scaled acyclic critical path.
2697 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2698 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2699 unsigned InFlightCount =
2700 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2701 unsigned BufferLimit =
2702 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2703
2704 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2705
2706 DEBUG(dbgs() << "IssueCycles="
2707 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2708 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2709 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2710 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2711 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2712 if (Rem.IsAcyclicLatencyLimited)
2713 dbgs() << " ACYCLIC LATENCY LIMIT\n");
2714}
2715
2716void GenericScheduler::registerRoots() {
2717 Rem.CriticalPath = DAG->ExitSU.getDepth();
2718
2719 // Some roots may not feed into ExitSU. Check all of them in case.
2720 for (std::vector<SUnit*>::const_iterator
2721 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2722 if ((*I)->getDepth() > Rem.CriticalPath)
2723 Rem.CriticalPath = (*I)->getDepth();
2724 }
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +00002725 DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2726 if (DumpCriticalPathLength) {
2727 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2728 }
Andrew Trickfc127d12013-12-07 05:59:44 +00002729
2730 if (EnableCyclicPath) {
2731 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2732 checkAcyclicLatency();
2733 }
2734}
2735
Andrew Trick1a831342013-08-30 03:49:48 +00002736static bool tryPressure(const PressureChange &TryP,
2737 const PressureChange &CandP,
Andrew Trickd14d7c22013-12-28 21:56:57 +00002738 GenericSchedulerBase::SchedCandidate &TryCand,
2739 GenericSchedulerBase::SchedCandidate &Cand,
Tom Stellard5ce53062015-12-16 18:31:01 +00002740 GenericSchedulerBase::CandReason Reason,
2741 const TargetRegisterInfo *TRI,
2742 const MachineFunction &MF) {
Andrew Trickb1a45b62013-08-30 04:27:29 +00002743 // If one candidate decreases and the other increases, go with it.
2744 // Invalid candidates have UnitInc==0.
Hal Finkel7a87f8a2014-10-10 17:06:20 +00002745 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2746 Reason)) {
Andrew Trickb1a45b62013-08-30 04:27:29 +00002747 return true;
2748 }
Matthias Braun6ad3d052016-06-25 00:23:00 +00002749 // Do not compare the magnitude of pressure changes between top and bottom
2750 // boundary.
2751 if (Cand.AtTop != TryCand.AtTop)
2752 return false;
2753
2754 // If both candidates affect the same set in the same boundary, go with the
2755 // smallest increase.
2756 unsigned TryPSet = TryP.getPSetOrMax();
2757 unsigned CandPSet = CandP.getPSetOrMax();
2758 if (TryPSet == CandPSet) {
2759 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2760 Reason);
2761 }
Tom Stellard5ce53062015-12-16 18:31:01 +00002762
2763 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2764 std::numeric_limits<int>::max();
2765
2766 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2767 std::numeric_limits<int>::max();
2768
Andrew Trick401b6952013-07-25 07:26:35 +00002769 // If the candidates are decreasing pressure, reverse priority.
Andrew Trick1a831342013-08-30 03:49:48 +00002770 if (TryP.getUnitInc() < 0)
Andrew Trick401b6952013-07-25 07:26:35 +00002771 std::swap(TryRank, CandRank);
2772 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2773}
2774
Andrew Tricka7714a02012-11-12 19:40:10 +00002775static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2776 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2777}
2778
Andrew Tricke833e1c2013-04-13 06:07:40 +00002779/// Minimize physical register live ranges. Regalloc wants them adjacent to
2780/// their physreg def/use.
2781///
2782/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2783/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2784/// with the operation that produces or consumes the physreg. We'll do this when
2785/// regalloc has support for parallel copies.
2786static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2787 const MachineInstr *MI = SU->getInstr();
2788 if (!MI->isCopy())
2789 return 0;
2790
2791 unsigned ScheduledOper = isTop ? 1 : 0;
2792 unsigned UnscheduledOper = isTop ? 0 : 1;
2793 // If we have already scheduled the physreg produce/consumer, immediately
2794 // schedule the copy.
2795 if (TargetRegisterInfo::isPhysicalRegister(
2796 MI->getOperand(ScheduledOper).getReg()))
2797 return 1;
2798 // If the physreg is at the boundary, defer it. Otherwise schedule it
2799 // immediately to free the dependent. We can hoist the copy later.
2800 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2801 if (TargetRegisterInfo::isPhysicalRegister(
2802 MI->getOperand(UnscheduledOper).getReg()))
2803 return AtBoundary ? -1 : 1;
2804 return 0;
2805}
2806
Matthias Braun4f573772016-04-22 19:10:15 +00002807void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2808 bool AtTop,
2809 const RegPressureTracker &RPTracker,
2810 RegPressureTracker &TempTracker) {
2811 Cand.SU = SU;
Matthias Braun6ad3d052016-06-25 00:23:00 +00002812 Cand.AtTop = AtTop;
Matthias Braun4f573772016-04-22 19:10:15 +00002813 if (DAG->isTrackingPressure()) {
2814 if (AtTop) {
2815 TempTracker.getMaxDownwardPressureDelta(
2816 Cand.SU->getInstr(),
2817 Cand.RPDelta,
2818 DAG->getRegionCriticalPSets(),
2819 DAG->getRegPressure().MaxSetPressure);
2820 } else {
2821 if (VerifyScheduling) {
2822 TempTracker.getMaxUpwardPressureDelta(
2823 Cand.SU->getInstr(),
2824 &DAG->getPressureDiff(Cand.SU),
2825 Cand.RPDelta,
2826 DAG->getRegionCriticalPSets(),
2827 DAG->getRegPressure().MaxSetPressure);
2828 } else {
2829 RPTracker.getUpwardPressureDelta(
2830 Cand.SU->getInstr(),
2831 DAG->getPressureDiff(Cand.SU),
2832 Cand.RPDelta,
2833 DAG->getRegionCriticalPSets(),
2834 DAG->getRegPressure().MaxSetPressure);
2835 }
2836 }
2837 }
2838 DEBUG(if (Cand.RPDelta.Excess.isValid())
2839 dbgs() << " Try SU(" << Cand.SU->NodeNum << ") "
2840 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2841 << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2842}
2843
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002844/// Apply a set of heursitics to a new candidate. Heuristics are currently
2845/// hierarchical. This may be more efficient than a graduated cost model because
2846/// we don't need to evaluate all aspects of the model for each node in the
2847/// queue. But it's really done to make the heuristics easier to debug and
2848/// statistically analyze.
2849///
2850/// \param Cand provides the policy and current best candidate.
2851/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002852/// \param Zone describes the scheduled zone that we are extending, or nullptr
2853// if Cand is from a different zone than TryCand.
Andrew Trick665d3ec2013-09-19 23:10:59 +00002854void GenericScheduler::tryCandidate(SchedCandidate &Cand,
Andrew Trickbb1247b2013-12-05 17:55:47 +00002855 SchedCandidate &TryCand,
Matthias Braun6ad3d052016-06-25 00:23:00 +00002856 SchedBoundary *Zone) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002857 // Initialize the candidate if needed.
2858 if (!Cand.isValid()) {
2859 TryCand.Reason = NodeOrder;
2860 return;
2861 }
Andrew Tricke833e1c2013-04-13 06:07:40 +00002862
Matthias Braun6ad3d052016-06-25 00:23:00 +00002863 if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
2864 biasPhysRegCopy(Cand.SU, Cand.AtTop),
Andrew Tricke833e1c2013-04-13 06:07:40 +00002865 TryCand, Cand, PhysRegCopy))
2866 return;
2867
Andrew Tricke02d5da2015-05-17 23:40:27 +00002868 // Avoid exceeding the target's limit.
Andrew Trick66c3dfb2013-09-04 21:00:11 +00002869 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2870 Cand.RPDelta.Excess,
Tom Stellard5ce53062015-12-16 18:31:01 +00002871 TryCand, Cand, RegExcess, TRI,
2872 DAG->MF))
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002873 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002874
2875 // Avoid increasing the max critical pressure in the scheduled region.
Andrew Trick66c3dfb2013-09-04 21:00:11 +00002876 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2877 Cand.RPDelta.CriticalMax,
Tom Stellard5ce53062015-12-16 18:31:01 +00002878 TryCand, Cand, RegCritical, TRI,
2879 DAG->MF))
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002880 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002881
Matthias Braun6ad3d052016-06-25 00:23:00 +00002882 // We only compare a subset of features when comparing nodes between
2883 // Top and Bottom boundary. Some properties are simply incomparable, in many
2884 // other instances we should only override the other boundary if something
2885 // is a clear good pick on one boundary. Skip heuristics that are more
2886 // "tie-breaking" in nature.
2887 bool SameBoundary = Zone != nullptr;
2888 if (SameBoundary) {
2889 // For loops that are acyclic path limited, aggressively schedule for
Jonas Paulssonbaeb4022016-11-04 08:31:14 +00002890 // latency. Within an single cycle, whenever CurrMOps > 0, allow normal
2891 // heuristics to take precedence.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002892 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
2893 tryLatency(TryCand, Cand, *Zone))
2894 return;
Andrew Trickddffae92013-09-06 17:32:36 +00002895
Matthias Braun6ad3d052016-06-25 00:23:00 +00002896 // Prioritize instructions that read unbuffered resources by stall cycles.
2897 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
2898 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2899 return;
2900 }
Andrew Trick880e5732013-12-05 17:55:58 +00002901
Andrew Tricka7714a02012-11-12 19:40:10 +00002902 // Keep clustered nodes together to encourage downstream peephole
2903 // optimizations which may reduce resource requirements.
2904 //
2905 // This is a best effort to set things up for a post-RA pass. Optimizations
2906 // like generating loads of multiple registers should ideally be done within
2907 // the scheduler pass by combining the loads during DAG postprocessing.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002908 const SUnit *CandNextClusterSU =
2909 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2910 const SUnit *TryCandNextClusterSU =
2911 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2912 if (tryGreater(TryCand.SU == TryCandNextClusterSU,
2913 Cand.SU == CandNextClusterSU,
Andrew Tricka7714a02012-11-12 19:40:10 +00002914 TryCand, Cand, Cluster))
2915 return;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00002916
Matthias Braun6ad3d052016-06-25 00:23:00 +00002917 if (SameBoundary) {
2918 // Weak edges are for clustering and other constraints.
2919 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
2920 getWeakLeft(Cand.SU, Cand.AtTop),
2921 TryCand, Cand, Weak))
2922 return;
Andrew Tricka7714a02012-11-12 19:40:10 +00002923 }
Matthias Braun6ad3d052016-06-25 00:23:00 +00002924
Andrew Trick71f08a32013-06-17 21:45:13 +00002925 // Avoid increasing the max pressure of the entire region.
Andrew Trick66c3dfb2013-09-04 21:00:11 +00002926 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2927 Cand.RPDelta.CurrentMax,
Tom Stellard5ce53062015-12-16 18:31:01 +00002928 TryCand, Cand, RegMax, TRI,
2929 DAG->MF))
Andrew Trick71f08a32013-06-17 21:45:13 +00002930 return;
2931
Matthias Braun6ad3d052016-06-25 00:23:00 +00002932 if (SameBoundary) {
2933 // Avoid critical resource consumption and balance the schedule.
2934 TryCand.initResourceDelta(DAG, SchedModel);
2935 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2936 TryCand, Cand, ResourceReduce))
2937 return;
2938 if (tryGreater(TryCand.ResDelta.DemandedResources,
2939 Cand.ResDelta.DemandedResources,
2940 TryCand, Cand, ResourceDemand))
2941 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002942
Matthias Braun6ad3d052016-06-25 00:23:00 +00002943 // Avoid serializing long latency dependence chains.
2944 // For acyclic path limited loops, latency was already checked above.
2945 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
2946 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
2947 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002948
Matthias Braun6ad3d052016-06-25 00:23:00 +00002949 // Fall through to original instruction order.
2950 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2951 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2952 TryCand.Reason = NodeOrder;
2953 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002954 }
2955}
Andrew Trick419eae22012-05-10 21:06:19 +00002956
Andrew Trickc573cd92013-09-06 17:32:44 +00002957/// Pick the best candidate from the queue.
Andrew Trick7ee9de52012-05-10 21:06:16 +00002958///
2959/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2960/// DAG building. To adjust for the current scheduling location we need to
2961/// maintain the number of vreg uses remaining to be top-scheduled.
Andrew Trick665d3ec2013-09-19 23:10:59 +00002962void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
Matthias Braun6ad3d052016-06-25 00:23:00 +00002963 const CandPolicy &ZonePolicy,
Andrew Trickbb1247b2013-12-05 17:55:47 +00002964 const RegPressureTracker &RPTracker,
2965 SchedCandidate &Cand) {
Andrew Trick7ee9de52012-05-10 21:06:16 +00002966 // getMaxPressureDelta temporarily modifies the tracker.
2967 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2968
Matthias Braund29d31e2016-06-23 21:27:38 +00002969 ReadyQueue &Q = Zone.Available;
Andrew Trickdd375dd2012-05-24 22:11:03 +00002970 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
Andrew Trick7ee9de52012-05-10 21:06:16 +00002971
Matthias Braun6ad3d052016-06-25 00:23:00 +00002972 SchedCandidate TryCand(ZonePolicy);
Matthias Braun4f573772016-04-22 19:10:15 +00002973 initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
Matthias Braun6ad3d052016-06-25 00:23:00 +00002974 // Pass SchedBoundary only when comparing nodes from the same boundary.
2975 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
2976 tryCandidate(Cand, TryCand, ZoneArg);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002977 if (TryCand.Reason != NoCand) {
2978 // Initialize resource delta if needed in case future heuristics query it.
2979 if (TryCand.ResDelta == SchedResourceDelta())
2980 TryCand.initResourceDelta(DAG, SchedModel);
2981 Cand.setBest(TryCand);
Andrew Trick419d4912013-04-05 00:31:29 +00002982 DEBUG(traceCandidate(Cand));
Andrew Trick22025772012-05-17 18:35:10 +00002983 }
Andrew Trick7ee9de52012-05-10 21:06:16 +00002984 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002985}
2986
Andrew Trick22025772012-05-17 18:35:10 +00002987/// Pick the best candidate node from either the top or bottom queue.
Andrew Trick665d3ec2013-09-19 23:10:59 +00002988SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
Andrew Trick22025772012-05-17 18:35:10 +00002989 // Schedule as far as possible in the direction of no choice. This is most
2990 // efficient, but also provides the best heuristics for CriticalPSets.
Andrew Trick61f1a272012-05-24 22:11:09 +00002991 if (SUnit *SU = Bot.pickOnlyChoice()) {
Andrew Trick22025772012-05-17 18:35:10 +00002992 IsTopNode = false;
Matthias Braun49cb6e92016-05-27 22:14:26 +00002993 tracePick(Only1, false);
Andrew Trick61f1a272012-05-24 22:11:09 +00002994 return SU;
Andrew Trick22025772012-05-17 18:35:10 +00002995 }
Andrew Trick61f1a272012-05-24 22:11:09 +00002996 if (SUnit *SU = Top.pickOnlyChoice()) {
Andrew Trick22025772012-05-17 18:35:10 +00002997 IsTopNode = true;
Matthias Braun49cb6e92016-05-27 22:14:26 +00002998 tracePick(Only1, true);
Andrew Trick61f1a272012-05-24 22:11:09 +00002999 return SU;
Andrew Trick22025772012-05-17 18:35:10 +00003000 }
Andrew Trickfc127d12013-12-07 05:59:44 +00003001 // Set the bottom-up policy based on the state of the current bottom zone and
3002 // the instructions outside the zone, including the top zone.
Matthias Braun6ad3d052016-06-25 00:23:00 +00003003 CandPolicy BotPolicy;
3004 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
Andrew Trickfc127d12013-12-07 05:59:44 +00003005 // Set the top-down policy based on the state of the current top zone and
3006 // the instructions outside the zone, including the bottom zone.
Matthias Braun6ad3d052016-06-25 00:23:00 +00003007 CandPolicy TopPolicy;
3008 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003009
Matthias Brauncc676c42016-06-25 02:03:36 +00003010 // See if BotCand is still valid (because we previously scheduled from Top).
Matthias Braund29d31e2016-06-23 21:27:38 +00003011 DEBUG(dbgs() << "Picking from Bot:\n");
Matthias Brauncc676c42016-06-25 02:03:36 +00003012 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
3013 BotCand.Policy != BotPolicy) {
3014 BotCand.reset(CandPolicy());
3015 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
3016 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
3017 } else {
3018 DEBUG(traceCandidate(BotCand));
3019#ifndef NDEBUG
3020 if (VerifyScheduling) {
3021 SchedCandidate TCand;
3022 TCand.reset(CandPolicy());
3023 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3024 assert(TCand.SU == BotCand.SU &&
3025 "Last pick result should correspond to re-picking right now");
3026 }
3027#endif
3028 }
Andrew Trick22025772012-05-17 18:35:10 +00003029
Andrew Trick22025772012-05-17 18:35:10 +00003030 // Check if the top Q has a better candidate.
Matthias Braund29d31e2016-06-23 21:27:38 +00003031 DEBUG(dbgs() << "Picking from Top:\n");
Matthias Brauncc676c42016-06-25 02:03:36 +00003032 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3033 TopCand.Policy != TopPolicy) {
3034 TopCand.reset(CandPolicy());
3035 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3036 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3037 } else {
3038 DEBUG(traceCandidate(TopCand));
3039#ifndef NDEBUG
3040 if (VerifyScheduling) {
3041 SchedCandidate TCand;
3042 TCand.reset(CandPolicy());
3043 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3044 assert(TCand.SU == TopCand.SU &&
3045 "Last pick result should correspond to re-picking right now");
3046 }
3047#endif
3048 }
3049
3050 // Pick best from BotCand and TopCand.
3051 assert(BotCand.isValid());
3052 assert(TopCand.isValid());
3053 SchedCandidate Cand = BotCand;
3054 TopCand.Reason = NoCand;
3055 tryCandidate(Cand, TopCand, nullptr);
3056 if (TopCand.Reason != NoCand) {
3057 Cand.setBest(TopCand);
3058 DEBUG(traceCandidate(Cand));
3059 }
Andrew Trick22025772012-05-17 18:35:10 +00003060
Matthias Braun6ad3d052016-06-25 00:23:00 +00003061 IsTopNode = Cand.AtTop;
3062 tracePick(Cand);
3063 return Cand.SU;
Andrew Trick22025772012-05-17 18:35:10 +00003064}
3065
3066/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
Andrew Trick665d3ec2013-09-19 23:10:59 +00003067SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
Andrew Trick7ee9de52012-05-10 21:06:16 +00003068 if (DAG->top() == DAG->bottom()) {
Andrew Trick61f1a272012-05-24 22:11:09 +00003069 assert(Top.Available.empty() && Top.Pending.empty() &&
3070 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
Craig Topperc0196b12014-04-14 00:51:57 +00003071 return nullptr;
Andrew Trick7ee9de52012-05-10 21:06:16 +00003072 }
Andrew Trick7ee9de52012-05-10 21:06:16 +00003073 SUnit *SU;
Andrew Trick984d98b2012-10-08 18:53:53 +00003074 do {
Andrew Trick75e411c2013-09-06 17:32:34 +00003075 if (RegionPolicy.OnlyTopDown) {
Andrew Trick984d98b2012-10-08 18:53:53 +00003076 SU = Top.pickOnlyChoice();
3077 if (!SU) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003078 CandPolicy NoPolicy;
Matthias Brauncc676c42016-06-25 02:03:36 +00003079 TopCand.reset(NoPolicy);
Matthias Braun6ad3d052016-06-25 00:23:00 +00003080 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
Andrew Trick1ab16d92013-09-04 21:00:13 +00003081 assert(TopCand.Reason != NoCand && "failed to find a candidate");
Matthias Braun6ad3d052016-06-25 00:23:00 +00003082 tracePick(TopCand);
Andrew Trick984d98b2012-10-08 18:53:53 +00003083 SU = TopCand.SU;
3084 }
3085 IsTopNode = true;
Matthias Braunb550b762016-04-21 01:54:13 +00003086 } else if (RegionPolicy.OnlyBottomUp) {
Andrew Trick984d98b2012-10-08 18:53:53 +00003087 SU = Bot.pickOnlyChoice();
3088 if (!SU) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003089 CandPolicy NoPolicy;
Matthias Brauncc676c42016-06-25 02:03:36 +00003090 BotCand.reset(NoPolicy);
Matthias Braun6ad3d052016-06-25 00:23:00 +00003091 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
Andrew Trick1ab16d92013-09-04 21:00:13 +00003092 assert(BotCand.Reason != NoCand && "failed to find a candidate");
Matthias Braun6ad3d052016-06-25 00:23:00 +00003093 tracePick(BotCand);
Andrew Trick984d98b2012-10-08 18:53:53 +00003094 SU = BotCand.SU;
3095 }
3096 IsTopNode = false;
Matthias Braunb550b762016-04-21 01:54:13 +00003097 } else {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003098 SU = pickNodeBidirectional(IsTopNode);
Andrew Trick984d98b2012-10-08 18:53:53 +00003099 }
3100 } while (SU->isScheduled);
3101
Andrew Trick61f1a272012-05-24 22:11:09 +00003102 if (SU->isTopReady())
3103 Top.removeReady(SU);
3104 if (SU->isBottomReady())
3105 Bot.removeReady(SU);
Andrew Trick4e7f6a72012-05-25 02:02:39 +00003106
Andrew Trick1f0bb692013-04-13 06:07:49 +00003107 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
Andrew Trick7ee9de52012-05-10 21:06:16 +00003108 return SU;
3109}
3110
Andrew Trick665d3ec2013-09-19 23:10:59 +00003111void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
Andrew Tricke833e1c2013-04-13 06:07:40 +00003112
3113 MachineBasicBlock::iterator InsertPos = SU->getInstr();
3114 if (!isTop)
3115 ++InsertPos;
3116 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3117
3118 // Find already scheduled copies with a single physreg dependence and move
3119 // them just above the scheduled instruction.
3120 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3121 I != E; ++I) {
3122 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3123 continue;
3124 SUnit *DepSU = I->getSUnit();
3125 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3126 continue;
3127 MachineInstr *Copy = DepSU->getInstr();
3128 if (!Copy->isCopy())
3129 continue;
3130 DEBUG(dbgs() << " Rescheduling physreg copy ";
3131 I->getSUnit()->dump(DAG));
3132 DAG->moveInstruction(Copy, InsertPos);
3133 }
3134}
3135
Andrew Trick61f1a272012-05-24 22:11:09 +00003136/// Update the scheduler's state after scheduling a node. This is the same node
Andrew Trickd14d7c22013-12-28 21:56:57 +00003137/// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3138/// update it's state based on the current cycle before MachineSchedStrategy
3139/// does.
Andrew Tricke833e1c2013-04-13 06:07:40 +00003140///
3141/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3142/// them here. See comments in biasPhysRegCopy.
Andrew Trick665d3ec2013-09-19 23:10:59 +00003143void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
Andrew Trick45446062012-06-05 21:11:27 +00003144 if (IsTopNode) {
Andrew Trickfc127d12013-12-07 05:59:44 +00003145 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Andrew Trickce27bb92012-06-29 03:23:22 +00003146 Top.bumpNode(SU);
Andrew Tricke833e1c2013-04-13 06:07:40 +00003147 if (SU->hasPhysRegUses)
3148 reschedulePhysRegCopies(SU, true);
Matthias Braunb550b762016-04-21 01:54:13 +00003149 } else {
Andrew Trickfc127d12013-12-07 05:59:44 +00003150 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
Andrew Trickce27bb92012-06-29 03:23:22 +00003151 Bot.bumpNode(SU);
Andrew Tricke833e1c2013-04-13 06:07:40 +00003152 if (SU->hasPhysRegDefs)
3153 reschedulePhysRegCopies(SU, false);
Andrew Trick61f1a272012-05-24 22:11:09 +00003154 }
3155}
3156
Andrew Trick8823dec2012-03-14 04:00:41 +00003157/// Create the standard converging machine scheduler. This will be used as the
3158/// default scheduler if the target does not set a default.
Andrew Trickd14d7c22013-12-28 21:56:57 +00003159static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003160 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
Andrew Tricka7714a02012-11-12 19:40:10 +00003161 // Register DAG post-processors.
Andrew Trick85a1d4c2013-04-24 15:54:43 +00003162 //
3163 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3164 // data and pass it to later mutations. Have a single mutation that gathers
3165 // the interesting nodes in one pass.
Tom Stellard68726a52016-08-19 19:59:18 +00003166 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00003167 if (EnableMemOpCluster) {
3168 if (DAG->TII->enableClusterLoads())
Tom Stellard68726a52016-08-19 19:59:18 +00003169 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00003170 if (DAG->TII->enableClusterStores())
Tom Stellard68726a52016-08-19 19:59:18 +00003171 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00003172 }
Andrew Trick263280242012-11-12 19:52:20 +00003173 if (EnableMacroFusion)
Matthias Braun325cd2c2016-11-11 01:34:21 +00003174 DAG->addMutation(createMacroFusionDAGMutation(DAG->TII));
Andrew Tricka7714a02012-11-12 19:40:10 +00003175 return DAG;
Andrew Tricke1c034f2012-01-17 06:55:03 +00003176}
Andrew Trickd14d7c22013-12-28 21:56:57 +00003177
Andrew Tricke1c034f2012-01-17 06:55:03 +00003178static MachineSchedRegistry
Andrew Trick665d3ec2013-09-19 23:10:59 +00003179GenericSchedRegistry("converge", "Standard converging scheduler.",
Andrew Trickd14d7c22013-12-28 21:56:57 +00003180 createGenericSchedLive);
3181
3182//===----------------------------------------------------------------------===//
3183// PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3184//===----------------------------------------------------------------------===//
3185
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003186void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3187 DAG = Dag;
3188 SchedModel = DAG->getSchedModel();
3189 TRI = DAG->TRI;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003190
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003191 Rem.init(DAG, SchedModel);
3192 Top.init(DAG, SchedModel, &Rem);
3193 BotRoots.clear();
Andrew Trickd14d7c22013-12-28 21:56:57 +00003194
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003195 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3196 // or are disabled, then these HazardRecs will be disabled.
3197 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003198 if (!Top.HazardRec) {
3199 Top.HazardRec =
Eric Christopher99556d72014-10-14 06:56:25 +00003200 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Eric Christopherd9134482014-08-04 21:25:23 +00003201 Itin, DAG);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003202 }
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003203}
Andrew Trickd14d7c22013-12-28 21:56:57 +00003204
Andrew Trickd14d7c22013-12-28 21:56:57 +00003205
3206void PostGenericScheduler::registerRoots() {
3207 Rem.CriticalPath = DAG->ExitSU.getDepth();
3208
3209 // Some roots may not feed into ExitSU. Check all of them in case.
3210 for (SmallVectorImpl<SUnit*>::const_iterator
3211 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3212 if ((*I)->getDepth() > Rem.CriticalPath)
3213 Rem.CriticalPath = (*I)->getDepth();
3214 }
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +00003215 DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3216 if (DumpCriticalPathLength) {
3217 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3218 }
Andrew Trickd14d7c22013-12-28 21:56:57 +00003219}
3220
3221/// Apply a set of heursitics to a new candidate for PostRA scheduling.
3222///
3223/// \param Cand provides the policy and current best candidate.
3224/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3225void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3226 SchedCandidate &TryCand) {
3227
3228 // Initialize the candidate if needed.
3229 if (!Cand.isValid()) {
3230 TryCand.Reason = NodeOrder;
3231 return;
3232 }
3233
3234 // Prioritize instructions that read unbuffered resources by stall cycles.
3235 if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3236 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3237 return;
3238
3239 // Avoid critical resource consumption and balance the schedule.
3240 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3241 TryCand, Cand, ResourceReduce))
3242 return;
3243 if (tryGreater(TryCand.ResDelta.DemandedResources,
3244 Cand.ResDelta.DemandedResources,
3245 TryCand, Cand, ResourceDemand))
3246 return;
3247
3248 // Avoid serializing long latency dependence chains.
3249 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3250 return;
3251 }
3252
3253 // Fall through to original instruction order.
3254 if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3255 TryCand.Reason = NodeOrder;
3256}
3257
3258void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3259 ReadyQueue &Q = Top.Available;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003260 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3261 SchedCandidate TryCand(Cand.Policy);
3262 TryCand.SU = *I;
Matthias Braun6ad3d052016-06-25 00:23:00 +00003263 TryCand.AtTop = true;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003264 TryCand.initResourceDelta(DAG, SchedModel);
3265 tryCandidate(Cand, TryCand);
3266 if (TryCand.Reason != NoCand) {
3267 Cand.setBest(TryCand);
3268 DEBUG(traceCandidate(Cand));
3269 }
3270 }
3271}
3272
3273/// Pick the next node to schedule.
3274SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3275 if (DAG->top() == DAG->bottom()) {
3276 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
Craig Topperc0196b12014-04-14 00:51:57 +00003277 return nullptr;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003278 }
3279 SUnit *SU;
3280 do {
3281 SU = Top.pickOnlyChoice();
Matthias Braun49cb6e92016-05-27 22:14:26 +00003282 if (SU) {
3283 tracePick(Only1, true);
3284 } else {
Andrew Trickd14d7c22013-12-28 21:56:57 +00003285 CandPolicy NoPolicy;
3286 SchedCandidate TopCand(NoPolicy);
3287 // Set the top-down policy based on the state of the current top zone and
3288 // the instructions outside the zone, including the bottom zone.
Craig Topperc0196b12014-04-14 00:51:57 +00003289 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003290 pickNodeFromQueue(TopCand);
3291 assert(TopCand.Reason != NoCand && "failed to find a candidate");
Matthias Braun6ad3d052016-06-25 00:23:00 +00003292 tracePick(TopCand);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003293 SU = TopCand.SU;
3294 }
3295 } while (SU->isScheduled);
3296
3297 IsTopNode = true;
3298 Top.removeReady(SU);
3299
3300 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3301 return SU;
3302}
3303
3304/// Called after ScheduleDAGMI has scheduled an instruction and updated
3305/// scheduled/remaining flags in the DAG nodes.
3306void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3307 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3308 Top.bumpNode(SU);
3309}
3310
3311/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3312static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
Jonas Paulsson28f29482016-11-09 09:59:27 +00003313 return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C),
3314 /*RemoveKillFlags=*/true);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003315}
Andrew Tricke1c034f2012-01-17 06:55:03 +00003316
3317//===----------------------------------------------------------------------===//
Andrew Trick90f711d2012-10-15 18:02:27 +00003318// ILP Scheduler. Currently for experimental analysis of heuristics.
3319//===----------------------------------------------------------------------===//
3320
3321namespace {
3322/// \brief Order nodes by the ILP metric.
3323struct ILPOrder {
Andrew Trick44f750a2013-01-25 04:01:04 +00003324 const SchedDFSResult *DFSResult;
3325 const BitVector *ScheduledTrees;
Andrew Trick90f711d2012-10-15 18:02:27 +00003326 bool MaximizeILP;
3327
Craig Topperc0196b12014-04-14 00:51:57 +00003328 ILPOrder(bool MaxILP)
3329 : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
Andrew Trick90f711d2012-10-15 18:02:27 +00003330
3331 /// \brief Apply a less-than relation on node priority.
Andrew Trick48d392e2012-11-28 05:13:28 +00003332 ///
3333 /// (Return true if A comes after B in the Q.)
Andrew Trick90f711d2012-10-15 18:02:27 +00003334 bool operator()(const SUnit *A, const SUnit *B) const {
Andrew Trick48d392e2012-11-28 05:13:28 +00003335 unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3336 unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3337 if (SchedTreeA != SchedTreeB) {
3338 // Unscheduled trees have lower priority.
3339 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3340 return ScheduledTrees->test(SchedTreeB);
3341
3342 // Trees with shallower connections have have lower priority.
3343 if (DFSResult->getSubtreeLevel(SchedTreeA)
3344 != DFSResult->getSubtreeLevel(SchedTreeB)) {
3345 return DFSResult->getSubtreeLevel(SchedTreeA)
3346 < DFSResult->getSubtreeLevel(SchedTreeB);
3347 }
3348 }
Andrew Trick90f711d2012-10-15 18:02:27 +00003349 if (MaximizeILP)
Andrew Trick48d392e2012-11-28 05:13:28 +00003350 return DFSResult->getILP(A) < DFSResult->getILP(B);
Andrew Trick90f711d2012-10-15 18:02:27 +00003351 else
Andrew Trick48d392e2012-11-28 05:13:28 +00003352 return DFSResult->getILP(A) > DFSResult->getILP(B);
Andrew Trick90f711d2012-10-15 18:02:27 +00003353 }
3354};
3355
3356/// \brief Schedule based on the ILP metric.
3357class ILPScheduler : public MachineSchedStrategy {
Andrew Trickd7f890e2013-12-28 21:56:47 +00003358 ScheduleDAGMILive *DAG;
Andrew Trick90f711d2012-10-15 18:02:27 +00003359 ILPOrder Cmp;
3360
3361 std::vector<SUnit*> ReadyQ;
3362public:
Craig Topperc0196b12014-04-14 00:51:57 +00003363 ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
Andrew Trick90f711d2012-10-15 18:02:27 +00003364
Craig Topper4584cd52014-03-07 09:26:03 +00003365 void initialize(ScheduleDAGMI *dag) override {
Andrew Trickd7f890e2013-12-28 21:56:47 +00003366 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3367 DAG = static_cast<ScheduleDAGMILive*>(dag);
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00003368 DAG->computeDFSResult();
Andrew Trick44f750a2013-01-25 04:01:04 +00003369 Cmp.DFSResult = DAG->getDFSResult();
3370 Cmp.ScheduledTrees = &DAG->getScheduledTrees();
Andrew Trick90f711d2012-10-15 18:02:27 +00003371 ReadyQ.clear();
Andrew Trick90f711d2012-10-15 18:02:27 +00003372 }
3373
Craig Topper4584cd52014-03-07 09:26:03 +00003374 void registerRoots() override {
Benjamin Krameraa598b32012-11-29 14:36:26 +00003375 // Restore the heap in ReadyQ with the updated DFS results.
3376 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
Andrew Trick90f711d2012-10-15 18:02:27 +00003377 }
3378
3379 /// Implement MachineSchedStrategy interface.
3380 /// -----------------------------------------
3381
Andrew Trick48d392e2012-11-28 05:13:28 +00003382 /// Callback to select the highest priority node from the ready Q.
Craig Topper4584cd52014-03-07 09:26:03 +00003383 SUnit *pickNode(bool &IsTopNode) override {
Craig Topperc0196b12014-04-14 00:51:57 +00003384 if (ReadyQ.empty()) return nullptr;
Matt Arsenault4ab769f2013-03-21 00:57:21 +00003385 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
Andrew Trick90f711d2012-10-15 18:02:27 +00003386 SUnit *SU = ReadyQ.back();
3387 ReadyQ.pop_back();
3388 IsTopNode = false;
Andrew Trick1f0bb692013-04-13 06:07:49 +00003389 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
Andrew Trick44f750a2013-01-25 04:01:04 +00003390 << " ILP: " << DAG->getDFSResult()->getILP(SU)
3391 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3392 << DAG->getDFSResult()->getSubtreeLevel(
Andrew Trick1f0bb692013-04-13 06:07:49 +00003393 DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3394 << "Scheduling " << *SU->getInstr());
Andrew Trick90f711d2012-10-15 18:02:27 +00003395 return SU;
3396 }
3397
Andrew Trick44f750a2013-01-25 04:01:04 +00003398 /// \brief Scheduler callback to notify that a new subtree is scheduled.
Craig Topper4584cd52014-03-07 09:26:03 +00003399 void scheduleTree(unsigned SubtreeID) override {
Andrew Trick44f750a2013-01-25 04:01:04 +00003400 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3401 }
3402
Andrew Trick48d392e2012-11-28 05:13:28 +00003403 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3404 /// DFSResults, and resort the priority Q.
Craig Topper4584cd52014-03-07 09:26:03 +00003405 void schedNode(SUnit *SU, bool IsTopNode) override {
Andrew Trick48d392e2012-11-28 05:13:28 +00003406 assert(!IsTopNode && "SchedDFSResult needs bottom-up");
Andrew Trick48d392e2012-11-28 05:13:28 +00003407 }
Andrew Trick90f711d2012-10-15 18:02:27 +00003408
Craig Topper4584cd52014-03-07 09:26:03 +00003409 void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
Andrew Trick90f711d2012-10-15 18:02:27 +00003410
Craig Topper4584cd52014-03-07 09:26:03 +00003411 void releaseBottomNode(SUnit *SU) override {
Andrew Trick90f711d2012-10-15 18:02:27 +00003412 ReadyQ.push_back(SU);
3413 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3414 }
3415};
3416} // namespace
3417
3418static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003419 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
Andrew Trick90f711d2012-10-15 18:02:27 +00003420}
3421static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003422 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
Andrew Trick90f711d2012-10-15 18:02:27 +00003423}
3424static MachineSchedRegistry ILPMaxRegistry(
3425 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3426static MachineSchedRegistry ILPMinRegistry(
3427 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3428
3429//===----------------------------------------------------------------------===//
Andrew Trick63440872012-01-14 02:17:06 +00003430// Machine Instruction Shuffler for Correctness Testing
3431//===----------------------------------------------------------------------===//
3432
Andrew Tricke77e84e2012-01-13 06:30:30 +00003433#ifndef NDEBUG
3434namespace {
Andrew Trick8823dec2012-03-14 04:00:41 +00003435/// Apply a less-than relation on the node order, which corresponds to the
3436/// instruction order prior to scheduling. IsReverse implements greater-than.
3437template<bool IsReverse>
3438struct SUnitOrder {
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003439 bool operator()(SUnit *A, SUnit *B) const {
Andrew Trick8823dec2012-03-14 04:00:41 +00003440 if (IsReverse)
3441 return A->NodeNum > B->NodeNum;
3442 else
3443 return A->NodeNum < B->NodeNum;
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003444 }
3445};
3446
Andrew Tricke77e84e2012-01-13 06:30:30 +00003447/// Reorder instructions as much as possible.
Andrew Trick8823dec2012-03-14 04:00:41 +00003448class InstructionShuffler : public MachineSchedStrategy {
3449 bool IsAlternating;
3450 bool IsTopDown;
3451
3452 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3453 // gives nodes with a higher number higher priority causing the latest
3454 // instructions to be scheduled first.
3455 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3456 TopQ;
3457 // When scheduling bottom-up, use greater-than as the queue priority.
3458 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3459 BottomQ;
Andrew Tricke77e84e2012-01-13 06:30:30 +00003460public:
Andrew Trick8823dec2012-03-14 04:00:41 +00003461 InstructionShuffler(bool alternate, bool topdown)
3462 : IsAlternating(alternate), IsTopDown(topdown) {}
Andrew Tricke77e84e2012-01-13 06:30:30 +00003463
Craig Topper9d74a5a2014-04-29 07:58:41 +00003464 void initialize(ScheduleDAGMI*) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003465 TopQ.clear();
3466 BottomQ.clear();
3467 }
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003468
Andrew Trick8823dec2012-03-14 04:00:41 +00003469 /// Implement MachineSchedStrategy interface.
3470 /// -----------------------------------------
3471
Craig Topper9d74a5a2014-04-29 07:58:41 +00003472 SUnit *pickNode(bool &IsTopNode) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003473 SUnit *SU;
3474 if (IsTopDown) {
3475 do {
Craig Topperc0196b12014-04-14 00:51:57 +00003476 if (TopQ.empty()) return nullptr;
Andrew Trick8823dec2012-03-14 04:00:41 +00003477 SU = TopQ.top();
3478 TopQ.pop();
3479 } while (SU->isScheduled);
3480 IsTopNode = true;
Matthias Braunb550b762016-04-21 01:54:13 +00003481 } else {
Andrew Trick8823dec2012-03-14 04:00:41 +00003482 do {
Craig Topperc0196b12014-04-14 00:51:57 +00003483 if (BottomQ.empty()) return nullptr;
Andrew Trick8823dec2012-03-14 04:00:41 +00003484 SU = BottomQ.top();
3485 BottomQ.pop();
3486 } while (SU->isScheduled);
3487 IsTopNode = false;
3488 }
3489 if (IsAlternating)
3490 IsTopDown = !IsTopDown;
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003491 return SU;
3492 }
3493
Craig Topper9d74a5a2014-04-29 07:58:41 +00003494 void schedNode(SUnit *SU, bool IsTopNode) override {}
Andrew Trick61f1a272012-05-24 22:11:09 +00003495
Craig Topper9d74a5a2014-04-29 07:58:41 +00003496 void releaseTopNode(SUnit *SU) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003497 TopQ.push(SU);
3498 }
Craig Topper9d74a5a2014-04-29 07:58:41 +00003499 void releaseBottomNode(SUnit *SU) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003500 BottomQ.push(SU);
Andrew Tricke77e84e2012-01-13 06:30:30 +00003501 }
3502};
3503} // namespace
3504
Andrew Trick02a80da2012-03-08 01:41:12 +00003505static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
Andrew Trick8823dec2012-03-14 04:00:41 +00003506 bool Alternate = !ForceTopDown && !ForceBottomUp;
3507 bool TopDown = !ForceBottomUp;
Benjamin Kramer05e7a842012-03-14 11:26:37 +00003508 assert((TopDown || !ForceTopDown) &&
Andrew Trick8823dec2012-03-14 04:00:41 +00003509 "-misched-topdown incompatible with -misched-bottomup");
David Blaikie422b93d2014-04-21 20:32:32 +00003510 return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
Andrew Tricke77e84e2012-01-13 06:30:30 +00003511}
Andrew Trick8823dec2012-03-14 04:00:41 +00003512static MachineSchedRegistry ShufflerRegistry(
3513 "shuffle", "Shuffle machine instructions alternating directions",
3514 createInstructionShuffler);
Andrew Tricke77e84e2012-01-13 06:30:30 +00003515#endif // !NDEBUG
Andrew Trickea9fd952013-01-25 07:45:29 +00003516
3517//===----------------------------------------------------------------------===//
Andrew Trickd7f890e2013-12-28 21:56:47 +00003518// GraphWriter support for ScheduleDAGMILive.
Andrew Trickea9fd952013-01-25 07:45:29 +00003519//===----------------------------------------------------------------------===//
3520
3521#ifndef NDEBUG
3522namespace llvm {
3523
3524template<> struct GraphTraits<
3525 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3526
3527template<>
3528struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3529
3530 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3531
3532 static std::string getGraphName(const ScheduleDAG *G) {
3533 return G->MF.getName();
3534 }
3535
3536 static bool renderGraphFromBottomUp() {
3537 return true;
3538 }
3539
3540 static bool isNodeHidden(const SUnit *Node) {
Matthias Braund78ee542015-09-17 21:09:59 +00003541 if (ViewMISchedCutoff == 0)
3542 return false;
3543 return (Node->Preds.size() > ViewMISchedCutoff
3544 || Node->Succs.size() > ViewMISchedCutoff);
Andrew Trickea9fd952013-01-25 07:45:29 +00003545 }
3546
Andrew Trickea9fd952013-01-25 07:45:29 +00003547 /// If you want to override the dot attributes printed for a particular
3548 /// edge, override this method.
3549 static std::string getEdgeAttributes(const SUnit *Node,
3550 SUnitIterator EI,
3551 const ScheduleDAG *Graph) {
3552 if (EI.isArtificialDep())
3553 return "color=cyan,style=dashed";
3554 if (EI.isCtrlDep())
3555 return "color=blue,style=dashed";
3556 return "";
3557 }
3558
3559 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
Alp Tokere69170a2014-06-26 22:52:05 +00003560 std::string Str;
3561 raw_string_ostream SS(Str);
Andrew Trickd7f890e2013-12-28 21:56:47 +00003562 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3563 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
Craig Topperc0196b12014-04-14 00:51:57 +00003564 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
Andrew Trick7609b7d2013-09-06 17:32:42 +00003565 SS << "SU:" << SU->NodeNum;
3566 if (DFS)
3567 SS << " I:" << DFS->getNumInstrs(SU);
Andrew Trickea9fd952013-01-25 07:45:29 +00003568 return SS.str();
3569 }
3570 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3571 return G->getGraphNodeLabel(SU);
3572 }
3573
Andrew Trickd7f890e2013-12-28 21:56:47 +00003574 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
Andrew Trickea9fd952013-01-25 07:45:29 +00003575 std::string Str("shape=Mrecord");
Andrew Trickd7f890e2013-12-28 21:56:47 +00003576 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3577 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
Craig Topperc0196b12014-04-14 00:51:57 +00003578 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
Andrew Trickea9fd952013-01-25 07:45:29 +00003579 if (DFS) {
3580 Str += ",style=filled,fillcolor=\"#";
3581 Str += DOT::getColorString(DFS->getSubtreeID(N));
3582 Str += '"';
3583 }
3584 return Str;
3585 }
3586};
3587} // namespace llvm
3588#endif // NDEBUG
3589
3590/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3591/// rendered using 'dot'.
3592///
3593void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3594#ifndef NDEBUG
3595 ViewGraph(this, Name, false, Title);
3596#else
3597 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3598 << "systems with Graphviz or gv!\n";
3599#endif // NDEBUG
3600}
3601
3602/// Out-of-line implementation with no arguments is handy for gdb.
3603void ScheduleDAGMI::viewGraph() {
3604 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3605}