blob: fae22eb9b2593457b269e95d740ea80d3686a301 [file] [log] [blame]
Andrew Trick6a50baa2012-05-17 22:37:09 +00001//===- MachineScheduler.cpp - Machine Instruction Scheduler ---------------===//
Andrew Tricke77e84e2012-01-13 06:30:30 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// MachineScheduler schedules machine instructions after phi elimination. It
11// preserves LiveIntervals so it can be invoked before register allocation.
12//
13//===----------------------------------------------------------------------===//
14
Andrew Trick02a80da2012-03-08 01:41:12 +000015#include "llvm/CodeGen/MachineScheduler.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000016#include "llvm/ADT/PriorityQueue.h"
17#include "llvm/Analysis/AliasAnalysis.h"
18#include "llvm/CodeGen/LiveIntervalAnalysis.h"
Jakub Staszakdf17ddd2013-03-10 13:11:23 +000019#include "llvm/CodeGen/MachineDominators.h"
20#include "llvm/CodeGen/MachineLoopInfo.h"
Andrew Trick736dd9a2013-06-21 18:32:58 +000021#include "llvm/CodeGen/MachineRegisterInfo.h"
Andrew Tricke77e84e2012-01-13 06:30:30 +000022#include "llvm/CodeGen/Passes.h"
Andrew Trick05ff4662012-06-06 20:29:31 +000023#include "llvm/CodeGen/RegisterClassInfo.h"
Andrew Trickcd1c2f92012-11-28 05:13:24 +000024#include "llvm/CodeGen/ScheduleDFS.h"
Andrew Trick61f1a272012-05-24 22:11:09 +000025#include "llvm/CodeGen/ScheduleHazardRecognizer.h"
Matthias Braun31d19d42016-05-10 03:21:59 +000026#include "llvm/CodeGen/TargetPassConfig.h"
Andrew Tricke77e84e2012-01-13 06:30:30 +000027#include "llvm/Support/CommandLine.h"
28#include "llvm/Support/Debug.h"
29#include "llvm/Support/ErrorHandling.h"
Andrew Trickea9fd952013-01-25 07:45:29 +000030#include "llvm/Support/GraphWriter.h"
Andrew Tricke77e84e2012-01-13 06:30:30 +000031#include "llvm/Support/raw_ostream.h"
Jakub Staszak80df8b82013-06-14 00:00:13 +000032#include "llvm/Target/TargetInstrInfo.h"
Andrew Trick7ccdc5c2012-01-17 06:55:07 +000033
Andrew Tricke77e84e2012-01-13 06:30:30 +000034using namespace llvm;
35
Chandler Carruth1b9dde02014-04-22 02:02:50 +000036#define DEBUG_TYPE "misched"
37
Andrew Trick7a8e1002012-09-11 00:39:15 +000038namespace llvm {
39cl::opt<bool> ForceTopDown("misched-topdown", cl::Hidden,
40 cl::desc("Force top-down list scheduling"));
41cl::opt<bool> ForceBottomUp("misched-bottomup", cl::Hidden,
42 cl::desc("Force bottom-up list scheduling"));
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +000043cl::opt<bool>
44DumpCriticalPathLength("misched-dcpl", cl::Hidden,
45 cl::desc("Print critical path length to stdout"));
Andrew Trick7a8e1002012-09-11 00:39:15 +000046}
Andrew Trick8823dec2012-03-14 04:00:41 +000047
Andrew Tricka5f19562012-03-07 00:18:25 +000048#ifndef NDEBUG
49static cl::opt<bool> ViewMISchedDAGs("view-misched-dags", cl::Hidden,
50 cl::desc("Pop up a window to show MISched dags after they are processed"));
Lang Hamesdd98c492012-03-19 18:38:38 +000051
Matthias Braund78ee542015-09-17 21:09:59 +000052/// In some situations a few uninteresting nodes depend on nearly all other
53/// nodes in the graph, provide a cutoff to hide them.
54static cl::opt<unsigned> ViewMISchedCutoff("view-misched-cutoff", cl::Hidden,
55 cl::desc("Hide nodes with more predecessor/successor than cutoff"));
56
Lang Hamesdd98c492012-03-19 18:38:38 +000057static cl::opt<unsigned> MISchedCutoff("misched-cutoff", cl::Hidden,
58 cl::desc("Stop scheduling after N instructions"), cl::init(~0U));
Andrew Trick33e05d72013-12-28 21:57:02 +000059
60static cl::opt<std::string> SchedOnlyFunc("misched-only-func", cl::Hidden,
61 cl::desc("Only schedule this function"));
62static cl::opt<unsigned> SchedOnlyBlock("misched-only-block", cl::Hidden,
63 cl::desc("Only schedule this MBB#"));
Andrew Tricka5f19562012-03-07 00:18:25 +000064#else
65static bool ViewMISchedDAGs = false;
66#endif // NDEBUG
67
Matthias Braun6493bc22016-04-22 19:09:17 +000068/// Avoid quadratic complexity in unusually large basic blocks by limiting the
69/// size of the ready lists.
70static cl::opt<unsigned> ReadyListLimit("misched-limit", cl::Hidden,
71 cl::desc("Limit ready list to N instructions"), cl::init(256));
72
Andrew Trickb6e74712013-09-04 20:59:59 +000073static cl::opt<bool> EnableRegPressure("misched-regpressure", cl::Hidden,
74 cl::desc("Enable register pressure scheduling."), cl::init(true));
75
Andrew Trickc01b0042013-08-23 17:48:43 +000076static cl::opt<bool> EnableCyclicPath("misched-cyclicpath", cl::Hidden,
Andrew Trick6c88b352013-09-09 23:31:14 +000077 cl::desc("Enable cyclic critical path analysis."), cl::init(true));
Andrew Trickc01b0042013-08-23 17:48:43 +000078
Jun Bum Lim4c5bd582016-04-15 14:58:38 +000079static cl::opt<bool> EnableMemOpCluster("misched-cluster", cl::Hidden,
80 cl::desc("Enable memop clustering."),
81 cl::init(true));
Andrew Tricka7714a02012-11-12 19:40:10 +000082
Andrew Trick263280242012-11-12 19:52:20 +000083// Experimental heuristics
84static cl::opt<bool> EnableMacroFusion("misched-fusion", cl::Hidden,
Andrew Trick108c88c2012-11-13 08:47:29 +000085 cl::desc("Enable scheduling for macro fusion."), cl::init(true));
Andrew Trick263280242012-11-12 19:52:20 +000086
Andrew Trick48f2a722013-03-08 05:40:34 +000087static cl::opt<bool> VerifyScheduling("verify-misched", cl::Hidden,
88 cl::desc("Verify machine instrs before and after machine scheduling"));
89
Andrew Trick44f750a2013-01-25 04:01:04 +000090// DAG subtrees must have at least this many nodes.
91static const unsigned MinSubtreeSize = 8;
92
Juergen Ributzkad12ccbd2013-11-19 00:57:56 +000093// Pin the vtables to this file.
94void MachineSchedStrategy::anchor() {}
95void ScheduleDAGMutation::anchor() {}
96
Andrew Trick63440872012-01-14 02:17:06 +000097//===----------------------------------------------------------------------===//
98// Machine Instruction Scheduling Pass and Registry
99//===----------------------------------------------------------------------===//
100
Andrew Trick4d4b5462012-04-24 20:36:19 +0000101MachineSchedContext::MachineSchedContext():
Craig Topperc0196b12014-04-14 00:51:57 +0000102 MF(nullptr), MLI(nullptr), MDT(nullptr), PassConfig(nullptr), AA(nullptr), LIS(nullptr) {
Andrew Trick4d4b5462012-04-24 20:36:19 +0000103 RegClassInfo = new RegisterClassInfo();
104}
105
106MachineSchedContext::~MachineSchedContext() {
107 delete RegClassInfo;
108}
109
Andrew Tricke77e84e2012-01-13 06:30:30 +0000110namespace {
Andrew Trickd7f890e2013-12-28 21:56:47 +0000111/// Base class for a machine scheduler class that can run at any point.
112class MachineSchedulerBase : public MachineSchedContext,
113 public MachineFunctionPass {
114public:
115 MachineSchedulerBase(char &ID): MachineFunctionPass(ID) {}
116
Craig Topperc0196b12014-04-14 00:51:57 +0000117 void print(raw_ostream &O, const Module* = nullptr) const override;
Andrew Trickd7f890e2013-12-28 21:56:47 +0000118
119protected:
Matthias Braun93563e72015-11-03 01:53:29 +0000120 void scheduleRegions(ScheduleDAGInstrs &Scheduler, bool FixKillFlags);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000121};
122
Andrew Tricke1c034f2012-01-17 06:55:03 +0000123/// MachineScheduler runs after coalescing and before register allocation.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000124class MachineScheduler : public MachineSchedulerBase {
Andrew Tricke77e84e2012-01-13 06:30:30 +0000125public:
Andrew Tricke1c034f2012-01-17 06:55:03 +0000126 MachineScheduler();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000127
Craig Topper4584cd52014-03-07 09:26:03 +0000128 void getAnalysisUsage(AnalysisUsage &AU) const override;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000129
Craig Topper4584cd52014-03-07 09:26:03 +0000130 bool runOnMachineFunction(MachineFunction&) override;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000131
Andrew Tricke77e84e2012-01-13 06:30:30 +0000132 static char ID; // Class identification, replacement for typeinfo
Andrew Trick978674b2013-09-20 05:14:41 +0000133
134protected:
135 ScheduleDAGInstrs *createMachineScheduler();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000136};
Andrew Trick17080b92013-12-28 21:56:51 +0000137
138/// PostMachineScheduler runs after shortly before code emission.
139class PostMachineScheduler : public MachineSchedulerBase {
140public:
141 PostMachineScheduler();
142
Craig Topper4584cd52014-03-07 09:26:03 +0000143 void getAnalysisUsage(AnalysisUsage &AU) const override;
Andrew Trick17080b92013-12-28 21:56:51 +0000144
Craig Topper4584cd52014-03-07 09:26:03 +0000145 bool runOnMachineFunction(MachineFunction&) override;
Andrew Trick17080b92013-12-28 21:56:51 +0000146
147 static char ID; // Class identification, replacement for typeinfo
148
149protected:
150 ScheduleDAGInstrs *createPostMachineScheduler();
151};
Andrew Tricke77e84e2012-01-13 06:30:30 +0000152} // namespace
153
Andrew Tricke1c034f2012-01-17 06:55:03 +0000154char MachineScheduler::ID = 0;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000155
Andrew Tricke1c034f2012-01-17 06:55:03 +0000156char &llvm::MachineSchedulerID = MachineScheduler::ID;
Andrew Tricke77e84e2012-01-13 06:30:30 +0000157
Akira Hatanaka7ba78302014-12-13 04:52:04 +0000158INITIALIZE_PASS_BEGIN(MachineScheduler, "machine-scheduler",
Andrew Tricke77e84e2012-01-13 06:30:30 +0000159 "Machine Instruction Scheduler", false, false)
Chandler Carruth7b560d42015-09-09 17:55:00 +0000160INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
Andrew Tricke77e84e2012-01-13 06:30:30 +0000161INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
162INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
Akira Hatanaka7ba78302014-12-13 04:52:04 +0000163INITIALIZE_PASS_END(MachineScheduler, "machine-scheduler",
Andrew Tricke77e84e2012-01-13 06:30:30 +0000164 "Machine Instruction Scheduler", false, false)
165
Andrew Tricke1c034f2012-01-17 06:55:03 +0000166MachineScheduler::MachineScheduler()
Andrew Trickd7f890e2013-12-28 21:56:47 +0000167: MachineSchedulerBase(ID) {
Andrew Tricke1c034f2012-01-17 06:55:03 +0000168 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
Andrew Tricke77e84e2012-01-13 06:30:30 +0000169}
170
Andrew Tricke1c034f2012-01-17 06:55:03 +0000171void MachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
Andrew Tricke77e84e2012-01-13 06:30:30 +0000172 AU.setPreservesCFG();
173 AU.addRequiredID(MachineDominatorsID);
174 AU.addRequired<MachineLoopInfo>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000175 AU.addRequired<AAResultsWrapperPass>();
Andrew Trick45300682012-03-09 00:52:20 +0000176 AU.addRequired<TargetPassConfig>();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000177 AU.addRequired<SlotIndexes>();
178 AU.addPreserved<SlotIndexes>();
179 AU.addRequired<LiveIntervals>();
180 AU.addPreserved<LiveIntervals>();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000181 MachineFunctionPass::getAnalysisUsage(AU);
182}
183
Andrew Trick17080b92013-12-28 21:56:51 +0000184char PostMachineScheduler::ID = 0;
185
186char &llvm::PostMachineSchedulerID = PostMachineScheduler::ID;
187
188INITIALIZE_PASS(PostMachineScheduler, "postmisched",
Saleem Abdulrasool7230b372013-12-28 22:47:55 +0000189 "PostRA Machine Instruction Scheduler", false, false)
Andrew Trick17080b92013-12-28 21:56:51 +0000190
191PostMachineScheduler::PostMachineScheduler()
192: MachineSchedulerBase(ID) {
193 initializePostMachineSchedulerPass(*PassRegistry::getPassRegistry());
194}
195
196void PostMachineScheduler::getAnalysisUsage(AnalysisUsage &AU) const {
197 AU.setPreservesCFG();
198 AU.addRequiredID(MachineDominatorsID);
199 AU.addRequired<MachineLoopInfo>();
200 AU.addRequired<TargetPassConfig>();
201 MachineFunctionPass::getAnalysisUsage(AU);
202}
203
Andrew Tricke77e84e2012-01-13 06:30:30 +0000204MachinePassRegistry MachineSchedRegistry::Registry;
205
Andrew Trick45300682012-03-09 00:52:20 +0000206/// A dummy default scheduler factory indicates whether the scheduler
207/// is overridden on the command line.
208static ScheduleDAGInstrs *useDefaultMachineSched(MachineSchedContext *C) {
Craig Topperc0196b12014-04-14 00:51:57 +0000209 return nullptr;
Andrew Trick45300682012-03-09 00:52:20 +0000210}
Andrew Tricke77e84e2012-01-13 06:30:30 +0000211
212/// MachineSchedOpt allows command line selection of the scheduler.
213static cl::opt<MachineSchedRegistry::ScheduleDAGCtor, false,
214 RegisterPassParser<MachineSchedRegistry> >
215MachineSchedOpt("misched",
Andrew Trick45300682012-03-09 00:52:20 +0000216 cl::init(&useDefaultMachineSched), cl::Hidden,
Andrew Tricke77e84e2012-01-13 06:30:30 +0000217 cl::desc("Machine instruction scheduler to use"));
218
Andrew Trick45300682012-03-09 00:52:20 +0000219static MachineSchedRegistry
Andrew Trick8823dec2012-03-14 04:00:41 +0000220DefaultSchedRegistry("default", "Use the target's default scheduler choice.",
Andrew Trick45300682012-03-09 00:52:20 +0000221 useDefaultMachineSched);
222
Eric Christopher5f141b02015-03-11 22:56:10 +0000223static cl::opt<bool> EnableMachineSched(
224 "enable-misched",
225 cl::desc("Enable the machine instruction scheduling pass."), cl::init(true),
226 cl::Hidden);
227
Chad Rosier816a1ab2016-01-20 23:08:32 +0000228static cl::opt<bool> EnablePostRAMachineSched(
229 "enable-post-misched",
230 cl::desc("Enable the post-ra machine instruction scheduling pass."),
231 cl::init(true), cl::Hidden);
232
Andrew Trick8823dec2012-03-14 04:00:41 +0000233/// Forward declare the standard machine scheduler. This will be used as the
Andrew Trick45300682012-03-09 00:52:20 +0000234/// default scheduler if the target does not set a default.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000235static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C);
236static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C);
Andrew Trickcc45a282012-04-24 18:04:34 +0000237
238/// Decrement this iterator until reaching the top or a non-debug instr.
Andrew Trick2bc74c22013-08-30 04:36:57 +0000239static MachineBasicBlock::const_iterator
240priorNonDebug(MachineBasicBlock::const_iterator I,
241 MachineBasicBlock::const_iterator Beg) {
Andrew Trickcc45a282012-04-24 18:04:34 +0000242 assert(I != Beg && "reached the top of the region, cannot decrement");
243 while (--I != Beg) {
244 if (!I->isDebugValue())
245 break;
246 }
247 return I;
248}
249
Andrew Trick2bc74c22013-08-30 04:36:57 +0000250/// Non-const version.
251static MachineBasicBlock::iterator
252priorNonDebug(MachineBasicBlock::iterator I,
253 MachineBasicBlock::const_iterator Beg) {
Duncan P. N. Exon Smithdcbce9c2016-08-16 23:34:07 +0000254 return priorNonDebug(MachineBasicBlock::const_iterator(I), Beg)
255 .getNonConstIterator();
Andrew Trick2bc74c22013-08-30 04:36:57 +0000256}
257
Andrew Trickcc45a282012-04-24 18:04:34 +0000258/// If this iterator is a debug value, increment until reaching the End or a
259/// non-debug instruction.
Andrew Trick2c4f8b72013-08-31 05:17:58 +0000260static MachineBasicBlock::const_iterator
261nextIfDebug(MachineBasicBlock::const_iterator I,
262 MachineBasicBlock::const_iterator End) {
Andrew Trick463b2f12012-05-17 18:35:03 +0000263 for(; I != End; ++I) {
Andrew Trickcc45a282012-04-24 18:04:34 +0000264 if (!I->isDebugValue())
265 break;
266 }
267 return I;
268}
269
Andrew Trick2c4f8b72013-08-31 05:17:58 +0000270/// Non-const version.
271static MachineBasicBlock::iterator
272nextIfDebug(MachineBasicBlock::iterator I,
273 MachineBasicBlock::const_iterator End) {
Duncan P. N. Exon Smithdcbce9c2016-08-16 23:34:07 +0000274 return nextIfDebug(MachineBasicBlock::const_iterator(I), End)
275 .getNonConstIterator();
Andrew Trick2c4f8b72013-08-31 05:17:58 +0000276}
277
Andrew Trickdc4c1ad2013-09-24 17:11:19 +0000278/// Instantiate a ScheduleDAGInstrs that will be owned by the caller.
Andrew Trick978674b2013-09-20 05:14:41 +0000279ScheduleDAGInstrs *MachineScheduler::createMachineScheduler() {
280 // Select the scheduler, or set the default.
281 MachineSchedRegistry::ScheduleDAGCtor Ctor = MachineSchedOpt;
282 if (Ctor != useDefaultMachineSched)
283 return Ctor(this);
284
285 // Get the default scheduler set by the target for this function.
286 ScheduleDAGInstrs *Scheduler = PassConfig->createMachineScheduler(this);
287 if (Scheduler)
288 return Scheduler;
289
290 // Default to GenericScheduler.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000291 return createGenericSchedLive(this);
Andrew Trick978674b2013-09-20 05:14:41 +0000292}
293
Andrew Trick17080b92013-12-28 21:56:51 +0000294/// Instantiate a ScheduleDAGInstrs for PostRA scheduling that will be owned by
295/// the caller. We don't have a command line option to override the postRA
296/// scheduler. The Target must configure it.
297ScheduleDAGInstrs *PostMachineScheduler::createPostMachineScheduler() {
298 // Get the postRA scheduler set by the target for this function.
299 ScheduleDAGInstrs *Scheduler = PassConfig->createPostMachineScheduler(this);
300 if (Scheduler)
301 return Scheduler;
302
303 // Default to GenericScheduler.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000304 return createGenericSchedPostRA(this);
Andrew Trick17080b92013-12-28 21:56:51 +0000305}
306
Andrew Trick72515be2012-03-14 04:00:38 +0000307/// Top-level MachineScheduler pass driver.
308///
309/// Visit blocks in function order. Divide each block into scheduling regions
Andrew Trick8823dec2012-03-14 04:00:41 +0000310/// and visit them bottom-up. Visiting regions bottom-up is not required, but is
311/// consistent with the DAG builder, which traverses the interior of the
312/// scheduling regions bottom-up.
Andrew Trick72515be2012-03-14 04:00:38 +0000313///
314/// This design avoids exposing scheduling boundaries to the DAG builder,
Andrew Trick8823dec2012-03-14 04:00:41 +0000315/// simplifying the DAG builder's support for "special" target instructions.
316/// At the same time the design allows target schedulers to operate across
Andrew Trick72515be2012-03-14 04:00:38 +0000317/// scheduling boundaries, for example to bundle the boudary instructions
318/// without reordering them. This creates complexity, because the target
319/// scheduler must update the RegionBegin and RegionEnd positions cached by
320/// ScheduleDAGInstrs whenever adding or removing instructions. A much simpler
321/// design would be to split blocks at scheduling boundaries, but LLVM has a
322/// general bias against block splitting purely for implementation simplicity.
Andrew Tricke1c034f2012-01-17 06:55:03 +0000323bool MachineScheduler::runOnMachineFunction(MachineFunction &mf) {
Andrew Kayloraa641a52016-04-22 22:06:11 +0000324 if (skipFunction(*mf.getFunction()))
Chad Rosier6338d7c2016-01-20 22:38:25 +0000325 return false;
326
Eric Christopher5f141b02015-03-11 22:56:10 +0000327 if (EnableMachineSched.getNumOccurrences()) {
328 if (!EnableMachineSched)
329 return false;
330 } else if (!mf.getSubtarget().enableMachineScheduler())
331 return false;
332
Matthias Braundc7580a2015-10-29 03:57:28 +0000333 DEBUG(dbgs() << "Before MISched:\n"; mf.print(dbgs()));
Andrew Trickc5d70082012-05-10 21:06:21 +0000334
Andrew Tricke77e84e2012-01-13 06:30:30 +0000335 // Initialize the context of the pass.
336 MF = &mf;
337 MLI = &getAnalysis<MachineLoopInfo>();
338 MDT = &getAnalysis<MachineDominatorTree>();
Andrew Trick45300682012-03-09 00:52:20 +0000339 PassConfig = &getAnalysis<TargetPassConfig>();
Chandler Carruth7b560d42015-09-09 17:55:00 +0000340 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
Andrew Trick02a80da2012-03-08 01:41:12 +0000341
Lang Hamesad33d5a2012-01-27 22:36:19 +0000342 LIS = &getAnalysis<LiveIntervals>();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000343
Andrew Trick48f2a722013-03-08 05:40:34 +0000344 if (VerifyScheduling) {
Andrew Trick97064962013-07-25 07:26:26 +0000345 DEBUG(LIS->dump());
Andrew Trick48f2a722013-03-08 05:40:34 +0000346 MF->verify(this, "Before machine scheduling.");
347 }
Andrew Trick4d4b5462012-04-24 20:36:19 +0000348 RegClassInfo->runOnMachineFunction(*MF);
Andrew Trick88639922012-04-24 17:56:43 +0000349
Andrew Trick978674b2013-09-20 05:14:41 +0000350 // Instantiate the selected scheduler for this target, function, and
351 // optimization level.
Ahmed Charles56440fd2014-03-06 05:51:42 +0000352 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createMachineScheduler());
Matthias Braun93563e72015-11-03 01:53:29 +0000353 scheduleRegions(*Scheduler, false);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000354
355 DEBUG(LIS->dump());
356 if (VerifyScheduling)
357 MF->verify(this, "After machine scheduling.");
358 return true;
359}
360
Andrew Trick17080b92013-12-28 21:56:51 +0000361bool PostMachineScheduler::runOnMachineFunction(MachineFunction &mf) {
Andrew Kayloraa641a52016-04-22 22:06:11 +0000362 if (skipFunction(*mf.getFunction()))
Paul Robinson7c99ec52014-03-31 17:43:35 +0000363 return false;
364
Chad Rosier816a1ab2016-01-20 23:08:32 +0000365 if (EnablePostRAMachineSched.getNumOccurrences()) {
366 if (!EnablePostRAMachineSched)
367 return false;
368 } else if (!mf.getSubtarget().enablePostRAScheduler()) {
Andrew Trick8d2ee372014-06-04 07:06:27 +0000369 DEBUG(dbgs() << "Subtarget disables post-MI-sched.\n");
370 return false;
371 }
Andrew Trick17080b92013-12-28 21:56:51 +0000372 DEBUG(dbgs() << "Before post-MI-sched:\n"; mf.print(dbgs()));
373
374 // Initialize the context of the pass.
375 MF = &mf;
376 PassConfig = &getAnalysis<TargetPassConfig>();
377
378 if (VerifyScheduling)
379 MF->verify(this, "Before post machine scheduling.");
380
381 // Instantiate the selected scheduler for this target, function, and
382 // optimization level.
Ahmed Charles56440fd2014-03-06 05:51:42 +0000383 std::unique_ptr<ScheduleDAGInstrs> Scheduler(createPostMachineScheduler());
Matthias Braun93563e72015-11-03 01:53:29 +0000384 scheduleRegions(*Scheduler, true);
Andrew Trick17080b92013-12-28 21:56:51 +0000385
386 if (VerifyScheduling)
387 MF->verify(this, "After post machine scheduling.");
388 return true;
389}
390
Andrew Trickd14d7c22013-12-28 21:56:57 +0000391/// Return true of the given instruction should not be included in a scheduling
392/// region.
393///
394/// MachineScheduler does not currently support scheduling across calls. To
395/// handle calls, the DAG builder needs to be modified to create register
396/// anti/output dependencies on the registers clobbered by the call's regmask
397/// operand. In PreRA scheduling, the stack pointer adjustment already prevents
398/// scheduling across calls. In PostRA scheduling, we need the isCall to enforce
399/// the boundary, but there would be no benefit to postRA scheduling across
400/// calls this late anyway.
401static bool isSchedBoundary(MachineBasicBlock::iterator MI,
402 MachineBasicBlock *MBB,
403 MachineFunction *MF,
Matthias Braun93563e72015-11-03 01:53:29 +0000404 const TargetInstrInfo *TII) {
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +0000405 return MI->isCall() || TII->isSchedulingBoundary(*MI, MBB, *MF);
Andrew Trickd14d7c22013-12-28 21:56:57 +0000406}
407
Andrew Trickd7f890e2013-12-28 21:56:47 +0000408/// Main driver for both MachineScheduler and PostMachineScheduler.
Matthias Braun93563e72015-11-03 01:53:29 +0000409void MachineSchedulerBase::scheduleRegions(ScheduleDAGInstrs &Scheduler,
410 bool FixKillFlags) {
Eric Christopherfc6de422014-08-05 02:39:49 +0000411 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000412
413 // Visit all machine basic blocks.
Andrew Trick88639922012-04-24 17:56:43 +0000414 //
415 // TODO: Visit blocks in global postorder or postorder within the bottom-up
416 // loop tree. Then we can optionally compute global RegPressure.
Andrew Tricke77e84e2012-01-13 06:30:30 +0000417 for (MachineFunction::iterator MBB = MF->begin(), MBBEnd = MF->end();
418 MBB != MBBEnd; ++MBB) {
419
Duncan P. N. Exon Smith5ec15682015-10-09 19:40:45 +0000420 Scheduler.startBlock(&*MBB);
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000421
Andrew Trick33e05d72013-12-28 21:57:02 +0000422#ifndef NDEBUG
423 if (SchedOnlyFunc.getNumOccurrences() && SchedOnlyFunc != MF->getName())
424 continue;
425 if (SchedOnlyBlock.getNumOccurrences()
426 && (int)SchedOnlyBlock != MBB->getNumber())
427 continue;
428#endif
429
Andrew Trick7e120f42012-01-14 02:17:09 +0000430 // Break the block into scheduling regions [I, RegionEnd), and schedule each
Sylvestre Ledru35521e22012-07-23 08:51:15 +0000431 // region as soon as it is discovered. RegionEnd points the scheduling
Andrew Trickaf1bee72012-03-09 22:34:56 +0000432 // boundary at the bottom of the region. The DAG does not include RegionEnd,
433 // but the region does (i.e. the next RegionEnd is above the previous
434 // RegionBegin). If the current block has no terminator then RegionEnd ==
435 // MBB->end() for the bottom region.
436 //
437 // The Scheduler may insert instructions during either schedule() or
438 // exitRegion(), even for empty regions. So the local iterators 'I' and
439 // 'RegionEnd' are invalid across these calls.
Andrew Trickd14d7c22013-12-28 21:56:57 +0000440 //
441 // MBB::size() uses instr_iterator to count. Here we need a bundle to count
442 // as a single instruction.
Andrew Tricka21daf72012-03-09 03:46:39 +0000443 for(MachineBasicBlock::iterator RegionEnd = MBB->end();
Andrew Trickd7f890e2013-12-28 21:56:47 +0000444 RegionEnd != MBB->begin(); RegionEnd = Scheduler.begin()) {
Andrew Trick88639922012-04-24 17:56:43 +0000445
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000446 // Avoid decrementing RegionEnd for blocks with no terminator.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000447 if (RegionEnd != MBB->end() ||
Matthias Braun93563e72015-11-03 01:53:29 +0000448 isSchedBoundary(&*std::prev(RegionEnd), &*MBB, MF, TII)) {
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000449 --RegionEnd;
Andrew Trickedfe2ec2012-03-09 08:02:51 +0000450 }
451
Andrew Trick7e120f42012-01-14 02:17:09 +0000452 // The next region starts above the previous region. Look backward in the
453 // instruction stream until we find the nearest boundary.
Andrew Tricka53e1012013-08-23 17:48:33 +0000454 unsigned NumRegionInstrs = 0;
Andrew Trick7e120f42012-01-14 02:17:09 +0000455 MachineBasicBlock::iterator I = RegionEnd;
Matthias Braun858d1df2016-05-20 19:46:13 +0000456 for (;I != MBB->begin(); --I) {
Duncan P. N. Exon Smith38eea4a2016-08-11 20:03:09 +0000457 MachineInstr &MI = *std::prev(I);
458 if (isSchedBoundary(&MI, &*MBB, MF, TII))
Andrew Trick7e120f42012-01-14 02:17:09 +0000459 break;
Duncan P. N. Exon Smith38eea4a2016-08-11 20:03:09 +0000460 if (!MI.isDebugValue())
Andrea Di Biagiod65fd9f2014-12-12 15:09:58 +0000461 ++NumRegionInstrs;
Andrew Trick7e120f42012-01-14 02:17:09 +0000462 }
Andrew Trick60cf03e2012-03-07 05:21:52 +0000463 // Notify the scheduler of the region, even if we may skip scheduling
464 // it. Perhaps it still needs to be bundled.
Duncan P. N. Exon Smith5ec15682015-10-09 19:40:45 +0000465 Scheduler.enterRegion(&*MBB, I, RegionEnd, NumRegionInstrs);
Andrew Trick60cf03e2012-03-07 05:21:52 +0000466
467 // Skip empty scheduling regions (0 or 1 schedulable instructions).
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000468 if (I == RegionEnd || I == std::prev(RegionEnd)) {
Andrew Trick60cf03e2012-03-07 05:21:52 +0000469 // Close the current region. Bundle the terminator if needed.
Andrew Trickaf1bee72012-03-09 22:34:56 +0000470 // This invalidates 'RegionEnd' and 'I'.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000471 Scheduler.exitRegion();
Andrew Trick7ccdc5c2012-01-17 06:55:07 +0000472 continue;
Andrew Trick59ac4fb2012-01-14 02:17:18 +0000473 }
Matthias Braun93563e72015-11-03 01:53:29 +0000474 DEBUG(dbgs() << "********** MI Scheduling **********\n");
Craig Toppera538d832012-08-22 06:07:19 +0000475 DEBUG(dbgs() << MF->getName()
Andrew Trick54b2ce32013-01-25 07:45:31 +0000476 << ":BB#" << MBB->getNumber() << " " << MBB->getName()
477 << "\n From: " << *I << " To: ";
Andrew Tricke57583a2012-02-08 02:17:21 +0000478 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
479 else dbgs() << "End";
Matthias Braun858d1df2016-05-20 19:46:13 +0000480 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +0000481 if (DumpCriticalPathLength) {
482 errs() << MF->getName();
483 errs() << ":BB# " << MBB->getNumber();
484 errs() << " " << MBB->getName() << " \n";
485 }
Andrew Trick7ccdc5c2012-01-17 06:55:07 +0000486
Andrew Trick1c0ec452012-03-09 03:46:42 +0000487 // Schedule a region: possibly reorder instructions.
Andrew Trickaf1bee72012-03-09 22:34:56 +0000488 // This invalidates 'RegionEnd' and 'I'.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000489 Scheduler.schedule();
Andrew Trick1c0ec452012-03-09 03:46:42 +0000490
491 // Close the current region.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000492 Scheduler.exitRegion();
Andrew Trick60cf03e2012-03-07 05:21:52 +0000493
494 // Scheduling has invalidated the current iterator 'I'. Ask the
495 // scheduler for the top of it's scheduled region.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000496 RegionEnd = Scheduler.begin();
Andrew Trick7e120f42012-01-14 02:17:09 +0000497 }
Andrew Trickd7f890e2013-12-28 21:56:47 +0000498 Scheduler.finishBlock();
Matthias Braun93563e72015-11-03 01:53:29 +0000499 // FIXME: Ideally, no further passes should rely on kill flags. However,
500 // thumb2 size reduction is currently an exception, so the PostMIScheduler
501 // needs to do this.
502 if (FixKillFlags)
503 Scheduler.fixupKills(&*MBB);
Andrew Tricke77e84e2012-01-13 06:30:30 +0000504 }
Andrew Trickd7f890e2013-12-28 21:56:47 +0000505 Scheduler.finalizeSchedule();
Andrew Tricke77e84e2012-01-13 06:30:30 +0000506}
507
Andrew Trickd7f890e2013-12-28 21:56:47 +0000508void MachineSchedulerBase::print(raw_ostream &O, const Module* m) const {
Andrew Tricke77e84e2012-01-13 06:30:30 +0000509 // unimplemented
510}
511
Alp Tokerd8d510a2014-07-01 21:19:13 +0000512LLVM_DUMP_METHOD
Andrew Trick7a8e1002012-09-11 00:39:15 +0000513void ReadyQueue::dump() {
James Y Knighte72b0db2015-09-18 18:52:20 +0000514 dbgs() << "Queue " << Name << ": ";
Andrew Trick7a8e1002012-09-11 00:39:15 +0000515 for (unsigned i = 0, e = Queue.size(); i < e; ++i)
516 dbgs() << Queue[i]->NodeNum << " ";
517 dbgs() << "\n";
518}
Andrew Trick8823dec2012-03-14 04:00:41 +0000519
520//===----------------------------------------------------------------------===//
Andrew Trickd7f890e2013-12-28 21:56:47 +0000521// ScheduleDAGMI - Basic machine instruction scheduling. This is
522// independent of PreRA/PostRA scheduling and involves no extra book-keeping for
523// virtual registers.
524// ===----------------------------------------------------------------------===/
Andrew Trick8823dec2012-03-14 04:00:41 +0000525
David Blaikie422b93d2014-04-21 20:32:32 +0000526// Provide a vtable anchor.
Andrew Trick44f750a2013-01-25 04:01:04 +0000527ScheduleDAGMI::~ScheduleDAGMI() {
Andrew Trick44f750a2013-01-25 04:01:04 +0000528}
529
Andrew Trick85a1d4c2013-04-24 15:54:43 +0000530bool ScheduleDAGMI::canAddEdge(SUnit *SuccSU, SUnit *PredSU) {
531 return SuccSU == &ExitSU || !Topo.IsReachable(PredSU, SuccSU);
532}
533
Andrew Tricka7714a02012-11-12 19:40:10 +0000534bool ScheduleDAGMI::addEdge(SUnit *SuccSU, const SDep &PredDep) {
Andrew Trick263280242012-11-12 19:52:20 +0000535 if (SuccSU != &ExitSU) {
536 // Do not use WillCreateCycle, it assumes SD scheduling.
537 // If Pred is reachable from Succ, then the edge creates a cycle.
538 if (Topo.IsReachable(PredDep.getSUnit(), SuccSU))
539 return false;
540 Topo.AddPred(SuccSU, PredDep.getSUnit());
541 }
Andrew Tricka7714a02012-11-12 19:40:10 +0000542 SuccSU->addPred(PredDep, /*Required=*/!PredDep.isArtificial());
543 // Return true regardless of whether a new edge needed to be inserted.
544 return true;
545}
546
Andrew Trick02a80da2012-03-08 01:41:12 +0000547/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. When
548/// NumPredsLeft reaches zero, release the successor node.
Andrew Trick61f1a272012-05-24 22:11:09 +0000549///
550/// FIXME: Adjust SuccSU height based on MinLatency.
Andrew Trick8823dec2012-03-14 04:00:41 +0000551void ScheduleDAGMI::releaseSucc(SUnit *SU, SDep *SuccEdge) {
Andrew Trick02a80da2012-03-08 01:41:12 +0000552 SUnit *SuccSU = SuccEdge->getSUnit();
553
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000554 if (SuccEdge->isWeak()) {
555 --SuccSU->WeakPredsLeft;
Andrew Tricka7714a02012-11-12 19:40:10 +0000556 if (SuccEdge->isCluster())
557 NextClusterSucc = SuccSU;
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000558 return;
559 }
Andrew Trick02a80da2012-03-08 01:41:12 +0000560#ifndef NDEBUG
561 if (SuccSU->NumPredsLeft == 0) {
562 dbgs() << "*** Scheduling failed! ***\n";
563 SuccSU->dump(this);
564 dbgs() << " has been released too many times!\n";
Craig Topperc0196b12014-04-14 00:51:57 +0000565 llvm_unreachable(nullptr);
Andrew Trick02a80da2012-03-08 01:41:12 +0000566 }
567#endif
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000568 // SU->TopReadyCycle was set to CurrCycle when it was scheduled. However,
569 // CurrCycle may have advanced since then.
570 if (SuccSU->TopReadyCycle < SU->TopReadyCycle + SuccEdge->getLatency())
571 SuccSU->TopReadyCycle = SU->TopReadyCycle + SuccEdge->getLatency();
572
Andrew Trick02a80da2012-03-08 01:41:12 +0000573 --SuccSU->NumPredsLeft;
574 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
Andrew Trick8823dec2012-03-14 04:00:41 +0000575 SchedImpl->releaseTopNode(SuccSU);
Andrew Trick02a80da2012-03-08 01:41:12 +0000576}
577
578/// releaseSuccessors - Call releaseSucc on each of SU's successors.
Andrew Trick8823dec2012-03-14 04:00:41 +0000579void ScheduleDAGMI::releaseSuccessors(SUnit *SU) {
Andrew Trick02a80da2012-03-08 01:41:12 +0000580 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
581 I != E; ++I) {
582 releaseSucc(SU, &*I);
583 }
584}
585
Andrew Trick8823dec2012-03-14 04:00:41 +0000586/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. When
587/// NumSuccsLeft reaches zero, release the predecessor node.
Andrew Trick61f1a272012-05-24 22:11:09 +0000588///
589/// FIXME: Adjust PredSU height based on MinLatency.
Andrew Trick8823dec2012-03-14 04:00:41 +0000590void ScheduleDAGMI::releasePred(SUnit *SU, SDep *PredEdge) {
591 SUnit *PredSU = PredEdge->getSUnit();
592
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000593 if (PredEdge->isWeak()) {
594 --PredSU->WeakSuccsLeft;
Andrew Tricka7714a02012-11-12 19:40:10 +0000595 if (PredEdge->isCluster())
596 NextClusterPred = PredSU;
Andrew Trickf1ff84c2012-11-12 19:28:57 +0000597 return;
598 }
Andrew Trick8823dec2012-03-14 04:00:41 +0000599#ifndef NDEBUG
600 if (PredSU->NumSuccsLeft == 0) {
601 dbgs() << "*** Scheduling failed! ***\n";
602 PredSU->dump(this);
603 dbgs() << " has been released too many times!\n";
Craig Topperc0196b12014-04-14 00:51:57 +0000604 llvm_unreachable(nullptr);
Andrew Trick8823dec2012-03-14 04:00:41 +0000605 }
606#endif
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000607 // SU->BotReadyCycle was set to CurrCycle when it was scheduled. However,
608 // CurrCycle may have advanced since then.
609 if (PredSU->BotReadyCycle < SU->BotReadyCycle + PredEdge->getLatency())
610 PredSU->BotReadyCycle = SU->BotReadyCycle + PredEdge->getLatency();
611
Andrew Trick8823dec2012-03-14 04:00:41 +0000612 --PredSU->NumSuccsLeft;
613 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU)
614 SchedImpl->releaseBottomNode(PredSU);
615}
616
617/// releasePredecessors - Call releasePred on each of SU's predecessors.
618void ScheduleDAGMI::releasePredecessors(SUnit *SU) {
619 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
620 I != E; ++I) {
621 releasePred(SU, &*I);
622 }
623}
624
Andrew Trickd7f890e2013-12-28 21:56:47 +0000625/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
626/// crossing a scheduling boundary. [begin, end) includes all instructions in
627/// the region, including the boundary itself and single-instruction regions
628/// that don't get scheduled.
629void ScheduleDAGMI::enterRegion(MachineBasicBlock *bb,
630 MachineBasicBlock::iterator begin,
631 MachineBasicBlock::iterator end,
632 unsigned regioninstrs)
633{
634 ScheduleDAGInstrs::enterRegion(bb, begin, end, regioninstrs);
635
636 SchedImpl->initPolicy(begin, end, regioninstrs);
637}
638
Andrew Tricke833e1c2013-04-13 06:07:40 +0000639/// This is normally called from the main scheduler loop but may also be invoked
640/// by the scheduling strategy to perform additional code motion.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000641void ScheduleDAGMI::moveInstruction(
642 MachineInstr *MI, MachineBasicBlock::iterator InsertPos) {
Andrew Trick463b2f12012-05-17 18:35:03 +0000643 // Advance RegionBegin if the first instruction moves down.
Andrew Trick54f7def2012-03-21 04:12:10 +0000644 if (&*RegionBegin == MI)
Andrew Trick463b2f12012-05-17 18:35:03 +0000645 ++RegionBegin;
646
647 // Update the instruction stream.
Andrew Trick8823dec2012-03-14 04:00:41 +0000648 BB->splice(InsertPos, BB, MI);
Andrew Trick463b2f12012-05-17 18:35:03 +0000649
650 // Update LiveIntervals
Andrew Trickd7f890e2013-12-28 21:56:47 +0000651 if (LIS)
Duncan P. N. Exon Smithbe8f8c42016-02-27 20:14:29 +0000652 LIS->handleMove(*MI, /*UpdateFlags=*/true);
Andrew Trick463b2f12012-05-17 18:35:03 +0000653
654 // Recede RegionBegin if an instruction moves above the first.
Andrew Trick8823dec2012-03-14 04:00:41 +0000655 if (RegionBegin == InsertPos)
656 RegionBegin = MI;
657}
658
Andrew Trickde670c02012-03-21 04:12:07 +0000659bool ScheduleDAGMI::checkSchedLimit() {
660#ifndef NDEBUG
661 if (NumInstrsScheduled == MISchedCutoff && MISchedCutoff != ~0U) {
662 CurrentTop = CurrentBottom;
663 return false;
664 }
665 ++NumInstrsScheduled;
666#endif
667 return true;
668}
669
Andrew Trickd7f890e2013-12-28 21:56:47 +0000670/// Per-region scheduling driver, called back from
671/// MachineScheduler::runOnMachineFunction. This is a simplified driver that
672/// does not consider liveness or register pressure. It is useful for PostRA
673/// scheduling and potentially other custom schedulers.
674void ScheduleDAGMI::schedule() {
James Y Knighte72b0db2015-09-18 18:52:20 +0000675 DEBUG(dbgs() << "ScheduleDAGMI::schedule starting\n");
676 DEBUG(SchedImpl->dumpPolicy());
677
Andrew Trickd7f890e2013-12-28 21:56:47 +0000678 // Build the DAG.
679 buildSchedGraph(AA);
680
681 Topo.InitDAGTopologicalSorting();
682
683 postprocessDAG();
684
685 SmallVector<SUnit*, 8> TopRoots, BotRoots;
686 findRootsAndBiasEdges(TopRoots, BotRoots);
687
688 // Initialize the strategy before modifying the DAG.
689 // This may initialize a DFSResult to be used for queue priority.
690 SchedImpl->initialize(this);
691
692 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
693 SUnits[su].dumpAll(this));
694 if (ViewMISchedDAGs) viewGraph();
695
696 // Initialize ready queues now that the DAG and priority data are finalized.
697 initQueues(TopRoots, BotRoots);
698
699 bool IsTopNode = false;
James Y Knighte72b0db2015-09-18 18:52:20 +0000700 while (true) {
701 DEBUG(dbgs() << "** ScheduleDAGMI::schedule picking next node\n");
702 SUnit *SU = SchedImpl->pickNode(IsTopNode);
703 if (!SU) break;
704
Andrew Trickd7f890e2013-12-28 21:56:47 +0000705 assert(!SU->isScheduled && "Node already scheduled");
706 if (!checkSchedLimit())
707 break;
708
709 MachineInstr *MI = SU->getInstr();
710 if (IsTopNode) {
711 assert(SU->isTopReady() && "node still has unscheduled dependencies");
712 if (&*CurrentTop == MI)
713 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
714 else
715 moveInstruction(MI, CurrentTop);
Matthias Braunb550b762016-04-21 01:54:13 +0000716 } else {
Andrew Trickd7f890e2013-12-28 21:56:47 +0000717 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
718 MachineBasicBlock::iterator priorII =
719 priorNonDebug(CurrentBottom, CurrentTop);
720 if (&*priorII == MI)
721 CurrentBottom = priorII;
722 else {
723 if (&*CurrentTop == MI)
724 CurrentTop = nextIfDebug(++CurrentTop, priorII);
725 moveInstruction(MI, CurrentBottom);
726 CurrentBottom = MI;
727 }
728 }
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000729 // Notify the scheduling strategy before updating the DAG.
Andrew Trick491e34a2014-06-12 22:36:28 +0000730 // This sets the scheduled node's ReadyCycle to CurrCycle. When updateQueues
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000731 // runs, it can then use the accurate ReadyCycle time to determine whether
732 // newly released nodes can move to the readyQ.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000733 SchedImpl->schedNode(SU, IsTopNode);
Andrew Trick7f1ebbe2014-06-07 01:48:43 +0000734
735 updateQueues(SU, IsTopNode);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000736 }
737 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
738
739 placeDebugValues();
740
741 DEBUG({
742 unsigned BBNum = begin()->getParent()->getNumber();
743 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
744 dumpSchedule();
745 dbgs() << '\n';
746 });
747}
748
749/// Apply each ScheduleDAGMutation step in order.
750void ScheduleDAGMI::postprocessDAG() {
751 for (unsigned i = 0, e = Mutations.size(); i < e; ++i) {
752 Mutations[i]->apply(this);
753 }
754}
755
756void ScheduleDAGMI::
757findRootsAndBiasEdges(SmallVectorImpl<SUnit*> &TopRoots,
758 SmallVectorImpl<SUnit*> &BotRoots) {
759 for (std::vector<SUnit>::iterator
760 I = SUnits.begin(), E = SUnits.end(); I != E; ++I) {
761 SUnit *SU = &(*I);
762 assert(!SU->isBoundaryNode() && "Boundary node should not be in SUnits");
763
764 // Order predecessors so DFSResult follows the critical path.
765 SU->biasCriticalPath();
766
767 // A SUnit is ready to top schedule if it has no predecessors.
768 if (!I->NumPredsLeft)
769 TopRoots.push_back(SU);
770 // A SUnit is ready to bottom schedule if it has no successors.
771 if (!I->NumSuccsLeft)
772 BotRoots.push_back(SU);
773 }
774 ExitSU.biasCriticalPath();
775}
776
777/// Identify DAG roots and setup scheduler queues.
778void ScheduleDAGMI::initQueues(ArrayRef<SUnit*> TopRoots,
779 ArrayRef<SUnit*> BotRoots) {
Craig Topperc0196b12014-04-14 00:51:57 +0000780 NextClusterSucc = nullptr;
781 NextClusterPred = nullptr;
Andrew Trickd7f890e2013-12-28 21:56:47 +0000782
783 // Release all DAG roots for scheduling, not including EntrySU/ExitSU.
784 //
785 // Nodes with unreleased weak edges can still be roots.
786 // Release top roots in forward order.
787 for (SmallVectorImpl<SUnit*>::const_iterator
788 I = TopRoots.begin(), E = TopRoots.end(); I != E; ++I) {
789 SchedImpl->releaseTopNode(*I);
790 }
791 // Release bottom roots in reverse order so the higher priority nodes appear
792 // first. This is more natural and slightly more efficient.
793 for (SmallVectorImpl<SUnit*>::const_reverse_iterator
794 I = BotRoots.rbegin(), E = BotRoots.rend(); I != E; ++I) {
795 SchedImpl->releaseBottomNode(*I);
796 }
797
798 releaseSuccessors(&EntrySU);
799 releasePredecessors(&ExitSU);
800
801 SchedImpl->registerRoots();
802
803 // Advance past initial DebugValues.
804 CurrentTop = nextIfDebug(RegionBegin, RegionEnd);
805 CurrentBottom = RegionEnd;
806}
807
808/// Update scheduler queues after scheduling an instruction.
809void ScheduleDAGMI::updateQueues(SUnit *SU, bool IsTopNode) {
810 // Release dependent instructions for scheduling.
811 if (IsTopNode)
812 releaseSuccessors(SU);
813 else
814 releasePredecessors(SU);
815
816 SU->isScheduled = true;
817}
818
819/// Reinsert any remaining debug_values, just like the PostRA scheduler.
820void ScheduleDAGMI::placeDebugValues() {
821 // If first instruction was a DBG_VALUE then put it back.
822 if (FirstDbgValue) {
823 BB->splice(RegionBegin, BB, FirstDbgValue);
824 RegionBegin = FirstDbgValue;
825 }
826
827 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator
828 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) {
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000829 std::pair<MachineInstr *, MachineInstr *> P = *std::prev(DI);
Andrew Trickd7f890e2013-12-28 21:56:47 +0000830 MachineInstr *DbgValue = P.first;
831 MachineBasicBlock::iterator OrigPrevMI = P.second;
832 if (&*RegionBegin == DbgValue)
833 ++RegionBegin;
834 BB->splice(++OrigPrevMI, BB, DbgValue);
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000835 if (OrigPrevMI == std::prev(RegionEnd))
Andrew Trickd7f890e2013-12-28 21:56:47 +0000836 RegionEnd = DbgValue;
837 }
838 DbgValues.clear();
Craig Topperc0196b12014-04-14 00:51:57 +0000839 FirstDbgValue = nullptr;
Andrew Trickd7f890e2013-12-28 21:56:47 +0000840}
841
842#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
843void ScheduleDAGMI::dumpSchedule() const {
844 for (MachineBasicBlock::iterator MI = begin(), ME = end(); MI != ME; ++MI) {
845 if (SUnit *SU = getSUnit(&(*MI)))
846 SU->dump(this);
847 else
848 dbgs() << "Missing SUnit\n";
849 }
850}
851#endif
852
853//===----------------------------------------------------------------------===//
854// ScheduleDAGMILive - Base class for MachineInstr scheduling with LiveIntervals
855// preservation.
856//===----------------------------------------------------------------------===//
857
858ScheduleDAGMILive::~ScheduleDAGMILive() {
859 delete DFSResult;
860}
861
Andrew Trick88639922012-04-24 17:56:43 +0000862/// enterRegion - Called back from MachineScheduler::runOnMachineFunction after
863/// crossing a scheduling boundary. [begin, end) includes all instructions in
864/// the region, including the boundary itself and single-instruction regions
865/// that don't get scheduled.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000866void ScheduleDAGMILive::enterRegion(MachineBasicBlock *bb,
Andrew Trick88639922012-04-24 17:56:43 +0000867 MachineBasicBlock::iterator begin,
868 MachineBasicBlock::iterator end,
Andrew Tricka53e1012013-08-23 17:48:33 +0000869 unsigned regioninstrs)
Andrew Trick88639922012-04-24 17:56:43 +0000870{
Andrew Trickd7f890e2013-12-28 21:56:47 +0000871 // ScheduleDAGMI initializes SchedImpl's per-region policy.
872 ScheduleDAGMI::enterRegion(bb, begin, end, regioninstrs);
Andrew Trick4add42f2012-05-10 21:06:10 +0000873
874 // For convenience remember the end of the liveness region.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +0000875 LiveRegionEnd = (RegionEnd == bb->end()) ? RegionEnd : std::next(RegionEnd);
Andrew Trick75e411c2013-09-06 17:32:34 +0000876
Andrew Trickb248b4a2013-09-06 17:32:47 +0000877 SUPressureDiffs.clear();
878
Andrew Trick75e411c2013-09-06 17:32:34 +0000879 ShouldTrackPressure = SchedImpl->shouldTrackPressure();
Matthias Braund4f64092016-01-20 00:23:32 +0000880 ShouldTrackLaneMasks = SchedImpl->shouldTrackLaneMasks();
881
Matthias Braunf9acaca2016-05-31 22:38:06 +0000882 assert((!ShouldTrackLaneMasks || ShouldTrackPressure) &&
883 "ShouldTrackLaneMasks requires ShouldTrackPressure");
Andrew Trick4add42f2012-05-10 21:06:10 +0000884}
885
886// Setup the register pressure trackers for the top scheduled top and bottom
887// scheduled regions.
Andrew Trickd7f890e2013-12-28 21:56:47 +0000888void ScheduleDAGMILive::initRegPressure() {
Matthias Braund4f64092016-01-20 00:23:32 +0000889 TopRPTracker.init(&MF, RegClassInfo, LIS, BB, RegionBegin,
890 ShouldTrackLaneMasks, false);
891 BotRPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
892 ShouldTrackLaneMasks, false);
Andrew Trick4add42f2012-05-10 21:06:10 +0000893
894 // Close the RPTracker to finalize live ins.
895 RPTracker.closeRegion();
896
Andrew Trick9c17eab2013-07-30 19:59:12 +0000897 DEBUG(RPTracker.dump());
Andrew Trick79d3eec2012-05-24 22:11:14 +0000898
Andrew Trick4add42f2012-05-10 21:06:10 +0000899 // Initialize the live ins and live outs.
Matthias Braun3e86de12015-09-17 21:12:24 +0000900 TopRPTracker.addLiveRegs(RPTracker.getPressure().LiveInRegs);
901 BotRPTracker.addLiveRegs(RPTracker.getPressure().LiveOutRegs);
Andrew Trick4add42f2012-05-10 21:06:10 +0000902
903 // Close one end of the tracker so we can call
904 // getMaxUpward/DownwardPressureDelta before advancing across any
905 // instructions. This converts currently live regs into live ins/outs.
906 TopRPTracker.closeTop();
907 BotRPTracker.closeBottom();
908
Andrew Trick9c17eab2013-07-30 19:59:12 +0000909 BotRPTracker.initLiveThru(RPTracker);
910 if (!BotRPTracker.getLiveThru().empty()) {
911 TopRPTracker.initLiveThru(BotRPTracker.getLiveThru());
912 DEBUG(dbgs() << "Live Thru: ";
913 dumpRegSetPressure(BotRPTracker.getLiveThru(), TRI));
914 };
915
Andrew Trick2bc74c22013-08-30 04:36:57 +0000916 // For each live out vreg reduce the pressure change associated with other
917 // uses of the same vreg below the live-out reaching def.
Matthias Braun3e86de12015-09-17 21:12:24 +0000918 updatePressureDiffs(RPTracker.getPressure().LiveOutRegs);
Andrew Trick2bc74c22013-08-30 04:36:57 +0000919
Andrew Trick4add42f2012-05-10 21:06:10 +0000920 // Account for liveness generated by the region boundary.
Andrew Trick2bc74c22013-08-30 04:36:57 +0000921 if (LiveRegionEnd != RegionEnd) {
Matthias Braun5d458612016-01-20 00:23:26 +0000922 SmallVector<RegisterMaskPair, 8> LiveUses;
Andrew Trick2bc74c22013-08-30 04:36:57 +0000923 BotRPTracker.recede(&LiveUses);
924 updatePressureDiffs(LiveUses);
925 }
Andrew Trick4add42f2012-05-10 21:06:10 +0000926
Matthias Braune6edd482015-11-13 22:30:31 +0000927 DEBUG(
928 dbgs() << "Top Pressure:\n";
929 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
930 dbgs() << "Bottom Pressure:\n";
931 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
932 );
933
Andrew Trick4add42f2012-05-10 21:06:10 +0000934 assert(BotRPTracker.getPos() == RegionEnd && "Can't find the region bottom");
Andrew Trick22025772012-05-17 18:35:10 +0000935
936 // Cache the list of excess pressure sets in this region. This will also track
937 // the max pressure in the scheduled code for these sets.
938 RegionCriticalPSets.clear();
Jakub Staszakc641ada2013-01-25 21:44:27 +0000939 const std::vector<unsigned> &RegionPressure =
940 RPTracker.getPressure().MaxSetPressure;
Andrew Trick22025772012-05-17 18:35:10 +0000941 for (unsigned i = 0, e = RegionPressure.size(); i < e; ++i) {
Andrew Trick736dd9a2013-06-21 18:32:58 +0000942 unsigned Limit = RegClassInfo->getRegPressureSetLimit(i);
Andrew Trickb55db582013-06-21 18:33:01 +0000943 if (RegionPressure[i] > Limit) {
944 DEBUG(dbgs() << TRI->getRegPressureSetName(i)
945 << " Limit " << Limit
946 << " Actual " << RegionPressure[i] << "\n");
Andrew Trick1a831342013-08-30 03:49:48 +0000947 RegionCriticalPSets.push_back(PressureChange(i));
Andrew Trickb55db582013-06-21 18:33:01 +0000948 }
Andrew Trick22025772012-05-17 18:35:10 +0000949 }
950 DEBUG(dbgs() << "Excess PSets: ";
951 for (unsigned i = 0, e = RegionCriticalPSets.size(); i != e; ++i)
952 dbgs() << TRI->getRegPressureSetName(
Andrew Trick1a831342013-08-30 03:49:48 +0000953 RegionCriticalPSets[i].getPSet()) << " ";
Andrew Trick22025772012-05-17 18:35:10 +0000954 dbgs() << "\n");
955}
956
Andrew Trickd7f890e2013-12-28 21:56:47 +0000957void ScheduleDAGMILive::
Andrew Trickb248b4a2013-09-06 17:32:47 +0000958updateScheduledPressure(const SUnit *SU,
959 const std::vector<unsigned> &NewMaxPressure) {
960 const PressureDiff &PDiff = getPressureDiff(SU);
961 unsigned CritIdx = 0, CritEnd = RegionCriticalPSets.size();
962 for (PressureDiff::const_iterator I = PDiff.begin(), E = PDiff.end();
963 I != E; ++I) {
964 if (!I->isValid())
965 break;
966 unsigned ID = I->getPSet();
967 while (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() < ID)
968 ++CritIdx;
969 if (CritIdx != CritEnd && RegionCriticalPSets[CritIdx].getPSet() == ID) {
970 if ((int)NewMaxPressure[ID] > RegionCriticalPSets[CritIdx].getUnitInc()
971 && NewMaxPressure[ID] <= INT16_MAX)
972 RegionCriticalPSets[CritIdx].setUnitInc(NewMaxPressure[ID]);
973 }
974 unsigned Limit = RegClassInfo->getRegPressureSetLimit(ID);
975 if (NewMaxPressure[ID] >= Limit - 2) {
976 DEBUG(dbgs() << " " << TRI->getRegPressureSetName(ID) << ": "
Andrew Trick569dc65a2015-05-17 23:40:31 +0000977 << NewMaxPressure[ID]
978 << ((NewMaxPressure[ID] > Limit) ? " > " : " <= ") << Limit
979 << "(+ " << BotRPTracker.getLiveThru()[ID] << " livethru)\n");
Andrew Trickb248b4a2013-09-06 17:32:47 +0000980 }
Andrew Trick22025772012-05-17 18:35:10 +0000981 }
Andrew Trick88639922012-04-24 17:56:43 +0000982}
983
Andrew Trick2bc74c22013-08-30 04:36:57 +0000984/// Update the PressureDiff array for liveness after scheduling this
985/// instruction.
Matthias Braun5d458612016-01-20 00:23:26 +0000986void ScheduleDAGMILive::updatePressureDiffs(
987 ArrayRef<RegisterMaskPair> LiveUses) {
988 for (const RegisterMaskPair &P : LiveUses) {
Matthias Braun5d458612016-01-20 00:23:26 +0000989 unsigned Reg = P.RegUnit;
Matthias Braund4f64092016-01-20 00:23:32 +0000990 /// FIXME: Currently assuming single-use physregs.
Andrew Trick2bc74c22013-08-30 04:36:57 +0000991 if (!TRI->isVirtualRegister(Reg))
992 continue;
Andrew Trickffdbefb2013-09-06 17:32:39 +0000993
Matthias Braund4f64092016-01-20 00:23:32 +0000994 if (ShouldTrackLaneMasks) {
995 // If the register has just become live then other uses won't change
996 // this fact anymore => decrement pressure.
997 // If the register has just become dead then other uses make it come
998 // back to life => increment pressure.
999 bool Decrement = P.LaneMask != 0;
1000
1001 for (const VReg2SUnit &V2SU
1002 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1003 SUnit &SU = *V2SU.SU;
1004 if (SU.isScheduled || &SU == &ExitSU)
1005 continue;
1006
1007 PressureDiff &PDiff = getPressureDiff(&SU);
1008 PDiff.addPressureChange(Reg, Decrement, &MRI);
1009 DEBUG(
1010 dbgs() << " UpdateRegP: SU(" << SU.NodeNum << ") "
1011 << PrintReg(Reg, TRI) << ':' << PrintLaneMask(P.LaneMask)
1012 << ' ' << *SU.getInstr();
1013 dbgs() << " to ";
1014 PDiff.dump(*TRI);
1015 );
1016 }
1017 } else {
1018 assert(P.LaneMask != 0);
1019 DEBUG(dbgs() << " LiveReg: " << PrintVRegOrUnit(Reg, TRI) << "\n");
1020 // This may be called before CurrentBottom has been initialized. However,
1021 // BotRPTracker must have a valid position. We want the value live into the
1022 // instruction or live out of the block, so ask for the previous
1023 // instruction's live-out.
1024 const LiveInterval &LI = LIS->getInterval(Reg);
1025 VNInfo *VNI;
1026 MachineBasicBlock::const_iterator I =
1027 nextIfDebug(BotRPTracker.getPos(), BB->end());
1028 if (I == BB->end())
1029 VNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1030 else {
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001031 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*I));
Matthias Braund4f64092016-01-20 00:23:32 +00001032 VNI = LRQ.valueIn();
1033 }
1034 // RegisterPressureTracker guarantees that readsReg is true for LiveUses.
1035 assert(VNI && "No live value at use.");
1036 for (const VReg2SUnit &V2SU
1037 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1038 SUnit *SU = V2SU.SU;
1039 // If this use comes before the reaching def, it cannot be a last use,
1040 // so decrease its pressure change.
1041 if (!SU->isScheduled && SU != &ExitSU) {
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001042 LiveQueryResult LRQ =
1043 LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
Matthias Braund4f64092016-01-20 00:23:32 +00001044 if (LRQ.valueIn() == VNI) {
1045 PressureDiff &PDiff = getPressureDiff(SU);
1046 PDiff.addPressureChange(Reg, true, &MRI);
1047 DEBUG(
1048 dbgs() << " UpdateRegP: SU(" << SU->NodeNum << ") "
1049 << *SU->getInstr();
1050 dbgs() << " to ";
1051 PDiff.dump(*TRI);
1052 );
1053 }
Matthias Braun9198c672015-11-06 20:59:02 +00001054 }
Andrew Trick2bc74c22013-08-30 04:36:57 +00001055 }
1056 }
1057 }
1058}
1059
Andrew Trick8823dec2012-03-14 04:00:41 +00001060/// schedule - Called back from MachineScheduler::runOnMachineFunction
Andrew Trick88639922012-04-24 17:56:43 +00001061/// after setting up the current scheduling region. [RegionBegin, RegionEnd)
1062/// only includes instructions that have DAG nodes, not scheduling boundaries.
Andrew Trick7a8e1002012-09-11 00:39:15 +00001063///
1064/// This is a skeletal driver, with all the functionality pushed into helpers,
Nick Lewycky06b0ea22015-08-18 22:41:58 +00001065/// so that it can be easily extended by experimental schedulers. Generally,
Andrew Trick7a8e1002012-09-11 00:39:15 +00001066/// implementing MachineSchedStrategy should be sufficient to implement a new
1067/// scheduling algorithm. However, if a scheduler further subclasses
Andrew Trickd7f890e2013-12-28 21:56:47 +00001068/// ScheduleDAGMILive then it will want to override this virtual method in order
1069/// to update any specialized state.
1070void ScheduleDAGMILive::schedule() {
James Y Knighte72b0db2015-09-18 18:52:20 +00001071 DEBUG(dbgs() << "ScheduleDAGMILive::schedule starting\n");
1072 DEBUG(SchedImpl->dumpPolicy());
Andrew Trick7a8e1002012-09-11 00:39:15 +00001073 buildDAGWithRegPressure();
1074
Andrew Tricka7714a02012-11-12 19:40:10 +00001075 Topo.InitDAGTopologicalSorting();
1076
Andrew Tricka2733e92012-09-14 17:22:42 +00001077 postprocessDAG();
1078
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001079 SmallVector<SUnit*, 8> TopRoots, BotRoots;
1080 findRootsAndBiasEdges(TopRoots, BotRoots);
1081
1082 // Initialize the strategy before modifying the DAG.
1083 // This may initialize a DFSResult to be used for queue priority.
1084 SchedImpl->initialize(this);
1085
Matthias Braun9198c672015-11-06 20:59:02 +00001086 DEBUG(
1087 for (const SUnit &SU : SUnits) {
1088 SU.dumpAll(this);
1089 if (ShouldTrackPressure) {
1090 dbgs() << " Pressure Diff : ";
1091 getPressureDiff(&SU).dump(*TRI);
1092 }
1093 dbgs() << '\n';
1094 }
1095 );
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001096 if (ViewMISchedDAGs) viewGraph();
Andrew Trick7a8e1002012-09-11 00:39:15 +00001097
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001098 // Initialize ready queues now that the DAG and priority data are finalized.
1099 initQueues(TopRoots, BotRoots);
Andrew Trick7a8e1002012-09-11 00:39:15 +00001100
1101 bool IsTopNode = false;
James Y Knighte72b0db2015-09-18 18:52:20 +00001102 while (true) {
1103 DEBUG(dbgs() << "** ScheduleDAGMILive::schedule picking next node\n");
1104 SUnit *SU = SchedImpl->pickNode(IsTopNode);
1105 if (!SU) break;
1106
Andrew Trick984d98b2012-10-08 18:53:53 +00001107 assert(!SU->isScheduled && "Node already scheduled");
Andrew Trick7a8e1002012-09-11 00:39:15 +00001108 if (!checkSchedLimit())
1109 break;
1110
1111 scheduleMI(SU, IsTopNode);
1112
Andrew Trickd7f890e2013-12-28 21:56:47 +00001113 if (DFSResult) {
1114 unsigned SubtreeID = DFSResult->getSubtreeID(SU);
1115 if (!ScheduledTrees.test(SubtreeID)) {
1116 ScheduledTrees.set(SubtreeID);
1117 DFSResult->scheduleTree(SubtreeID);
1118 SchedImpl->scheduleTree(SubtreeID);
1119 }
1120 }
1121
1122 // Notify the scheduling strategy after updating the DAG.
1123 SchedImpl->schedNode(SU, IsTopNode);
Andrew Trick43adfb32015-03-27 06:10:13 +00001124
1125 updateQueues(SU, IsTopNode);
Andrew Trick7a8e1002012-09-11 00:39:15 +00001126 }
1127 assert(CurrentTop == CurrentBottom && "Nonempty unscheduled zone.");
1128
1129 placeDebugValues();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001130
1131 DEBUG({
Andrew Trickcf7e6972012-11-28 03:42:47 +00001132 unsigned BBNum = begin()->getParent()->getNumber();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001133 dbgs() << "*** Final schedule for BB#" << BBNum << " ***\n";
1134 dumpSchedule();
1135 dbgs() << '\n';
1136 });
Andrew Trick7a8e1002012-09-11 00:39:15 +00001137}
1138
1139/// Build the DAG and setup three register pressure trackers.
Andrew Trickd7f890e2013-12-28 21:56:47 +00001140void ScheduleDAGMILive::buildDAGWithRegPressure() {
Andrew Trickb6e74712013-09-04 20:59:59 +00001141 if (!ShouldTrackPressure) {
1142 RPTracker.reset();
1143 RegionCriticalPSets.clear();
1144 buildSchedGraph(AA);
1145 return;
1146 }
1147
Andrew Trick4add42f2012-05-10 21:06:10 +00001148 // Initialize the register pressure tracker used by buildSchedGraph.
Andrew Trick9c17eab2013-07-30 19:59:12 +00001149 RPTracker.init(&MF, RegClassInfo, LIS, BB, LiveRegionEnd,
Matthias Braund4f64092016-01-20 00:23:32 +00001150 ShouldTrackLaneMasks, /*TrackUntiedDefs=*/true);
Andrew Trick88639922012-04-24 17:56:43 +00001151
Andrew Trick4add42f2012-05-10 21:06:10 +00001152 // Account for liveness generate by the region boundary.
1153 if (LiveRegionEnd != RegionEnd)
1154 RPTracker.recede();
1155
1156 // Build the DAG, and compute current register pressure.
Matthias Braund4f64092016-01-20 00:23:32 +00001157 buildSchedGraph(AA, &RPTracker, &SUPressureDiffs, LIS, ShouldTrackLaneMasks);
Andrew Trick02a80da2012-03-08 01:41:12 +00001158
Andrew Trick4add42f2012-05-10 21:06:10 +00001159 // Initialize top/bottom trackers after computing region pressure.
1160 initRegPressure();
Andrew Trick7a8e1002012-09-11 00:39:15 +00001161}
Andrew Trick4add42f2012-05-10 21:06:10 +00001162
Andrew Trickd7f890e2013-12-28 21:56:47 +00001163void ScheduleDAGMILive::computeDFSResult() {
Andrew Trick44f750a2013-01-25 04:01:04 +00001164 if (!DFSResult)
1165 DFSResult = new SchedDFSResult(/*BottomU*/true, MinSubtreeSize);
1166 DFSResult->clear();
Andrew Trick44f750a2013-01-25 04:01:04 +00001167 ScheduledTrees.clear();
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00001168 DFSResult->resize(SUnits.size());
1169 DFSResult->compute(SUnits);
Andrew Trick44f750a2013-01-25 04:01:04 +00001170 ScheduledTrees.resize(DFSResult->getNumSubtrees());
1171}
1172
Andrew Trick483f4192013-08-29 18:04:49 +00001173/// Compute the max cyclic critical path through the DAG. The scheduling DAG
1174/// only provides the critical path for single block loops. To handle loops that
1175/// span blocks, we could use the vreg path latencies provided by
1176/// MachineTraceMetrics instead. However, MachineTraceMetrics is not currently
1177/// available for use in the scheduler.
1178///
1179/// The cyclic path estimation identifies a def-use pair that crosses the back
Andrew Trickef80f502013-08-30 02:02:12 +00001180/// edge and considers the depth and height of the nodes. For example, consider
Andrew Trick483f4192013-08-29 18:04:49 +00001181/// the following instruction sequence where each instruction has unit latency
1182/// and defines an epomymous virtual register:
1183///
1184/// a->b(a,c)->c(b)->d(c)->exit
1185///
1186/// The cyclic critical path is a two cycles: b->c->b
1187/// The acyclic critical path is four cycles: a->b->c->d->exit
1188/// LiveOutHeight = height(c) = len(c->d->exit) = 2
1189/// LiveOutDepth = depth(c) + 1 = len(a->b->c) + 1 = 3
1190/// LiveInHeight = height(b) + 1 = len(b->c->d->exit) + 1 = 4
1191/// LiveInDepth = depth(b) = len(a->b) = 1
1192///
1193/// LiveOutDepth - LiveInDepth = 3 - 1 = 2
1194/// LiveInHeight - LiveOutHeight = 4 - 2 = 2
1195/// CyclicCriticalPath = min(2, 2) = 2
Andrew Trickd7f890e2013-12-28 21:56:47 +00001196///
1197/// This could be relevant to PostRA scheduling, but is currently implemented
1198/// assuming LiveIntervals.
1199unsigned ScheduleDAGMILive::computeCyclicCriticalPath() {
Andrew Trick483f4192013-08-29 18:04:49 +00001200 // This only applies to single block loop.
1201 if (!BB->isSuccessor(BB))
1202 return 0;
1203
1204 unsigned MaxCyclicLatency = 0;
1205 // Visit each live out vreg def to find def/use pairs that cross iterations.
Matthias Braun5d458612016-01-20 00:23:26 +00001206 for (const RegisterMaskPair &P : RPTracker.getPressure().LiveOutRegs) {
1207 unsigned Reg = P.RegUnit;
Andrew Trick483f4192013-08-29 18:04:49 +00001208 if (!TRI->isVirtualRegister(Reg))
1209 continue;
1210 const LiveInterval &LI = LIS->getInterval(Reg);
1211 const VNInfo *DefVNI = LI.getVNInfoBefore(LIS->getMBBEndIdx(BB));
1212 if (!DefVNI)
1213 continue;
1214
1215 MachineInstr *DefMI = LIS->getInstructionFromIndex(DefVNI->def);
1216 const SUnit *DefSU = getSUnit(DefMI);
1217 if (!DefSU)
1218 continue;
1219
1220 unsigned LiveOutHeight = DefSU->getHeight();
1221 unsigned LiveOutDepth = DefSU->getDepth() + DefSU->Latency;
1222 // Visit all local users of the vreg def.
Matthias Braunb0c437b2015-10-29 03:57:17 +00001223 for (const VReg2SUnit &V2SU
1224 : make_range(VRegUses.find(Reg), VRegUses.end())) {
1225 SUnit *SU = V2SU.SU;
1226 if (SU == &ExitSU)
Andrew Trick483f4192013-08-29 18:04:49 +00001227 continue;
1228
1229 // Only consider uses of the phi.
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001230 LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(*SU->getInstr()));
Andrew Trick483f4192013-08-29 18:04:49 +00001231 if (!LRQ.valueIn()->isPHIDef())
1232 continue;
1233
1234 // Assume that a path spanning two iterations is a cycle, which could
1235 // overestimate in strange cases. This allows cyclic latency to be
1236 // estimated as the minimum slack of the vreg's depth or height.
1237 unsigned CyclicLatency = 0;
Matthias Braunb0c437b2015-10-29 03:57:17 +00001238 if (LiveOutDepth > SU->getDepth())
1239 CyclicLatency = LiveOutDepth - SU->getDepth();
Andrew Trick483f4192013-08-29 18:04:49 +00001240
Matthias Braunb0c437b2015-10-29 03:57:17 +00001241 unsigned LiveInHeight = SU->getHeight() + DefSU->Latency;
Andrew Trick483f4192013-08-29 18:04:49 +00001242 if (LiveInHeight > LiveOutHeight) {
1243 if (LiveInHeight - LiveOutHeight < CyclicLatency)
1244 CyclicLatency = LiveInHeight - LiveOutHeight;
Matthias Braunb550b762016-04-21 01:54:13 +00001245 } else
Andrew Trick483f4192013-08-29 18:04:49 +00001246 CyclicLatency = 0;
1247
1248 DEBUG(dbgs() << "Cyclic Path: SU(" << DefSU->NodeNum << ") -> SU("
Matthias Braunb0c437b2015-10-29 03:57:17 +00001249 << SU->NodeNum << ") = " << CyclicLatency << "c\n");
Andrew Trick483f4192013-08-29 18:04:49 +00001250 if (CyclicLatency > MaxCyclicLatency)
1251 MaxCyclicLatency = CyclicLatency;
1252 }
1253 }
1254 DEBUG(dbgs() << "Cyclic Critical Path: " << MaxCyclicLatency << "c\n");
1255 return MaxCyclicLatency;
1256}
1257
Krzysztof Parzyszek7ea9a522016-04-28 19:17:44 +00001258/// Release ExitSU predecessors and setup scheduler queues. Re-position
1259/// the Top RP tracker in case the region beginning has changed.
1260void ScheduleDAGMILive::initQueues(ArrayRef<SUnit*> TopRoots,
1261 ArrayRef<SUnit*> BotRoots) {
1262 ScheduleDAGMI::initQueues(TopRoots, BotRoots);
1263 if (ShouldTrackPressure) {
1264 assert(TopRPTracker.getPos() == RegionBegin && "bad initial Top tracker");
1265 TopRPTracker.setPos(CurrentTop);
1266 }
1267}
1268
Andrew Trick7a8e1002012-09-11 00:39:15 +00001269/// Move an instruction and update register pressure.
Andrew Trickd7f890e2013-12-28 21:56:47 +00001270void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
Andrew Trick7a8e1002012-09-11 00:39:15 +00001271 // Move the instruction to its new location in the instruction stream.
1272 MachineInstr *MI = SU->getInstr();
Andrew Trick02a80da2012-03-08 01:41:12 +00001273
Andrew Trick7a8e1002012-09-11 00:39:15 +00001274 if (IsTopNode) {
1275 assert(SU->isTopReady() && "node still has unscheduled dependencies");
1276 if (&*CurrentTop == MI)
1277 CurrentTop = nextIfDebug(++CurrentTop, CurrentBottom);
Andrew Trick8823dec2012-03-14 04:00:41 +00001278 else {
Andrew Trick7a8e1002012-09-11 00:39:15 +00001279 moveInstruction(MI, CurrentTop);
1280 TopRPTracker.setPos(MI);
Andrew Trick8823dec2012-03-14 04:00:41 +00001281 }
Andrew Trickc3ea0052012-04-24 18:04:37 +00001282
Andrew Trickb6e74712013-09-04 20:59:59 +00001283 if (ShouldTrackPressure) {
1284 // Update top scheduled pressure.
Matthias Braund4f64092016-01-20 00:23:32 +00001285 RegisterOperands RegOpers;
1286 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1287 if (ShouldTrackLaneMasks) {
1288 // Adjust liveness and add missing dead+read-undef flags.
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001289 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
Matthias Braund4f64092016-01-20 00:23:32 +00001290 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1291 } else {
1292 // Adjust for missing dead-def flags.
1293 RegOpers.detectDeadDefs(*MI, *LIS);
1294 }
1295
1296 TopRPTracker.advance(RegOpers);
Andrew Trickb6e74712013-09-04 20:59:59 +00001297 assert(TopRPTracker.getPos() == CurrentTop && "out of sync");
Matthias Braun9198c672015-11-06 20:59:02 +00001298 DEBUG(
1299 dbgs() << "Top Pressure:\n";
1300 dumpRegSetPressure(TopRPTracker.getRegSetPressureAtPos(), TRI);
1301 );
1302
Andrew Trickb248b4a2013-09-06 17:32:47 +00001303 updateScheduledPressure(SU, TopRPTracker.getPressure().MaxSetPressure);
Andrew Trickb6e74712013-09-04 20:59:59 +00001304 }
Matthias Braunb550b762016-04-21 01:54:13 +00001305 } else {
Andrew Trick7a8e1002012-09-11 00:39:15 +00001306 assert(SU->isBottomReady() && "node still has unscheduled dependencies");
1307 MachineBasicBlock::iterator priorII =
1308 priorNonDebug(CurrentBottom, CurrentTop);
1309 if (&*priorII == MI)
1310 CurrentBottom = priorII;
1311 else {
1312 if (&*CurrentTop == MI) {
1313 CurrentTop = nextIfDebug(++CurrentTop, priorII);
1314 TopRPTracker.setPos(CurrentTop);
1315 }
1316 moveInstruction(MI, CurrentBottom);
1317 CurrentBottom = MI;
1318 }
Andrew Trickb6e74712013-09-04 20:59:59 +00001319 if (ShouldTrackPressure) {
Matthias Braund4f64092016-01-20 00:23:32 +00001320 RegisterOperands RegOpers;
1321 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
1322 if (ShouldTrackLaneMasks) {
1323 // Adjust liveness and add missing dead+read-undef flags.
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001324 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
Matthias Braund4f64092016-01-20 00:23:32 +00001325 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
1326 } else {
1327 // Adjust for missing dead-def flags.
1328 RegOpers.detectDeadDefs(*MI, *LIS);
1329 }
1330
1331 BotRPTracker.recedeSkipDebugValues();
Matthias Braun5d458612016-01-20 00:23:26 +00001332 SmallVector<RegisterMaskPair, 8> LiveUses;
Matthias Braund4f64092016-01-20 00:23:32 +00001333 BotRPTracker.recede(RegOpers, &LiveUses);
Andrew Trickb6e74712013-09-04 20:59:59 +00001334 assert(BotRPTracker.getPos() == CurrentBottom && "out of sync");
Matthias Braun9198c672015-11-06 20:59:02 +00001335 DEBUG(
1336 dbgs() << "Bottom Pressure:\n";
1337 dumpRegSetPressure(BotRPTracker.getRegSetPressureAtPos(), TRI);
1338 );
1339
Andrew Trickb248b4a2013-09-06 17:32:47 +00001340 updateScheduledPressure(SU, BotRPTracker.getPressure().MaxSetPressure);
Andrew Trickb6e74712013-09-04 20:59:59 +00001341 updatePressureDiffs(LiveUses);
Andrew Trickb6e74712013-09-04 20:59:59 +00001342 }
Andrew Trick7a8e1002012-09-11 00:39:15 +00001343 }
1344}
1345
Andrew Trick263280242012-11-12 19:52:20 +00001346//===----------------------------------------------------------------------===//
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001347// BaseMemOpClusterMutation - DAG post-processing to cluster loads or stores.
Andrew Trick263280242012-11-12 19:52:20 +00001348//===----------------------------------------------------------------------===//
1349
Andrew Tricka7714a02012-11-12 19:40:10 +00001350namespace {
1351/// \brief Post-process the DAG to create cluster edges between neighboring
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001352/// loads or between neighboring stores.
1353class BaseMemOpClusterMutation : public ScheduleDAGMutation {
1354 struct MemOpInfo {
Andrew Tricka7714a02012-11-12 19:40:10 +00001355 SUnit *SU;
1356 unsigned BaseReg;
Chad Rosierc27a18f2016-03-09 16:00:35 +00001357 int64_t Offset;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001358 MemOpInfo(SUnit *su, unsigned reg, int64_t ofs)
1359 : SU(su), BaseReg(reg), Offset(ofs) {}
Benjamin Kramerb0f74b22014-03-07 21:35:39 +00001360
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001361 bool operator<(const MemOpInfo&RHS) const {
Benjamin Kramerb0f74b22014-03-07 21:35:39 +00001362 return std::tie(BaseReg, Offset) < std::tie(RHS.BaseReg, RHS.Offset);
1363 }
Andrew Tricka7714a02012-11-12 19:40:10 +00001364 };
Andrew Tricka7714a02012-11-12 19:40:10 +00001365
1366 const TargetInstrInfo *TII;
1367 const TargetRegisterInfo *TRI;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001368 bool IsLoad;
1369
Andrew Tricka7714a02012-11-12 19:40:10 +00001370public:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001371 BaseMemOpClusterMutation(const TargetInstrInfo *tii,
1372 const TargetRegisterInfo *tri, bool IsLoad)
1373 : TII(tii), TRI(tri), IsLoad(IsLoad) {}
Andrew Tricka7714a02012-11-12 19:40:10 +00001374
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001375 void apply(ScheduleDAGInstrs *DAGInstrs) override;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001376
Andrew Tricka7714a02012-11-12 19:40:10 +00001377protected:
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001378 void clusterNeighboringMemOps(ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG);
1379};
1380
1381class StoreClusterMutation : public BaseMemOpClusterMutation {
1382public:
1383 StoreClusterMutation(const TargetInstrInfo *tii,
1384 const TargetRegisterInfo *tri)
1385 : BaseMemOpClusterMutation(tii, tri, false) {}
1386};
1387
1388class LoadClusterMutation : public BaseMemOpClusterMutation {
1389public:
1390 LoadClusterMutation(const TargetInstrInfo *tii, const TargetRegisterInfo *tri)
1391 : BaseMemOpClusterMutation(tii, tri, true) {}
Andrew Tricka7714a02012-11-12 19:40:10 +00001392};
Alexander Kornienkof00654e2015-06-23 09:49:53 +00001393} // anonymous
Andrew Tricka7714a02012-11-12 19:40:10 +00001394
Tom Stellard68726a52016-08-19 19:59:18 +00001395namespace llvm {
1396
1397std::unique_ptr<ScheduleDAGMutation>
1398createLoadClusterDAGMutation(const TargetInstrInfo *TII,
1399 const TargetRegisterInfo *TRI) {
1400 return make_unique<LoadClusterMutation>(TII, TRI);
1401}
1402
1403std::unique_ptr<ScheduleDAGMutation>
1404createStoreClusterDAGMutation(const TargetInstrInfo *TII,
1405 const TargetRegisterInfo *TRI) {
1406 return make_unique<StoreClusterMutation>(TII, TRI);
1407}
1408
1409} // namespace llvm
1410
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001411void BaseMemOpClusterMutation::clusterNeighboringMemOps(
1412 ArrayRef<SUnit *> MemOps, ScheduleDAGMI *DAG) {
1413 SmallVector<MemOpInfo, 32> MemOpRecords;
1414 for (unsigned Idx = 0, End = MemOps.size(); Idx != End; ++Idx) {
1415 SUnit *SU = MemOps[Idx];
Andrew Tricka7714a02012-11-12 19:40:10 +00001416 unsigned BaseReg;
Chad Rosierc27a18f2016-03-09 16:00:35 +00001417 int64_t Offset;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001418 if (TII->getMemOpBaseRegImmOfs(*SU->getInstr(), BaseReg, Offset, TRI))
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001419 MemOpRecords.push_back(MemOpInfo(SU, BaseReg, Offset));
Andrew Tricka7714a02012-11-12 19:40:10 +00001420 }
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001421 if (MemOpRecords.size() < 2)
Andrew Tricka7714a02012-11-12 19:40:10 +00001422 return;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001423
1424 std::sort(MemOpRecords.begin(), MemOpRecords.end());
Andrew Tricka7714a02012-11-12 19:40:10 +00001425 unsigned ClusterLength = 1;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001426 for (unsigned Idx = 0, End = MemOpRecords.size(); Idx < (End - 1); ++Idx) {
1427 if (MemOpRecords[Idx].BaseReg != MemOpRecords[Idx+1].BaseReg) {
Andrew Tricka7714a02012-11-12 19:40:10 +00001428 ClusterLength = 1;
1429 continue;
1430 }
1431
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001432 SUnit *SUa = MemOpRecords[Idx].SU;
1433 SUnit *SUb = MemOpRecords[Idx+1].SU;
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001434 if (TII->shouldClusterMemOps(*SUa->getInstr(), *SUb->getInstr(),
1435 ClusterLength) &&
1436 DAG->addEdge(SUb, SDep(SUa, SDep::Cluster))) {
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001437 DEBUG(dbgs() << "Cluster ld/st SU(" << SUa->NodeNum << ") - SU("
Andrew Tricka7714a02012-11-12 19:40:10 +00001438 << SUb->NodeNum << ")\n");
1439 // Copy successor edges from SUa to SUb. Interleaving computation
1440 // dependent on SUa can prevent load combining due to register reuse.
1441 // Predecessor edges do not need to be copied from SUb to SUa since nearby
1442 // loads should have effectively the same inputs.
1443 for (SUnit::const_succ_iterator
1444 SI = SUa->Succs.begin(), SE = SUa->Succs.end(); SI != SE; ++SI) {
1445 if (SI->getSUnit() == SUb)
1446 continue;
1447 DEBUG(dbgs() << " Copy Succ SU(" << SI->getSUnit()->NodeNum << ")\n");
1448 DAG->addEdge(SI->getSUnit(), SDep(SUb, SDep::Artificial));
1449 }
1450 ++ClusterLength;
Matthias Braunb550b762016-04-21 01:54:13 +00001451 } else
Andrew Tricka7714a02012-11-12 19:40:10 +00001452 ClusterLength = 1;
1453 }
1454}
1455
1456/// \brief Callback from DAG postProcessing to create cluster edges for loads.
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001457void BaseMemOpClusterMutation::apply(ScheduleDAGInstrs *DAGInstrs) {
1458
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001459 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1460
Andrew Tricka7714a02012-11-12 19:40:10 +00001461 // Map DAG NodeNum to store chain ID.
1462 DenseMap<unsigned, unsigned> StoreChainIDs;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001463 // Map each store chain to a set of dependent MemOps.
Andrew Tricka7714a02012-11-12 19:40:10 +00001464 SmallVector<SmallVector<SUnit*,4>, 32> StoreChainDependents;
1465 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1466 SUnit *SU = &DAG->SUnits[Idx];
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001467 if ((IsLoad && !SU->getInstr()->mayLoad()) ||
1468 (!IsLoad && !SU->getInstr()->mayStore()))
Andrew Tricka7714a02012-11-12 19:40:10 +00001469 continue;
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001470
Andrew Tricka7714a02012-11-12 19:40:10 +00001471 unsigned ChainPredID = DAG->SUnits.size();
1472 for (SUnit::const_pred_iterator
1473 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) {
1474 if (PI->isCtrl()) {
1475 ChainPredID = PI->getSUnit()->NodeNum;
1476 break;
1477 }
1478 }
1479 // Check if this chain-like pred has been seen
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001480 // before. ChainPredID==MaxNodeID at the top of the schedule.
Andrew Tricka7714a02012-11-12 19:40:10 +00001481 unsigned NumChains = StoreChainDependents.size();
1482 std::pair<DenseMap<unsigned, unsigned>::iterator, bool> Result =
1483 StoreChainIDs.insert(std::make_pair(ChainPredID, NumChains));
1484 if (Result.second)
1485 StoreChainDependents.resize(NumChains + 1);
1486 StoreChainDependents[Result.first->second].push_back(SU);
1487 }
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001488
Andrew Tricka7714a02012-11-12 19:40:10 +00001489 // Iterate over the store chains.
1490 for (unsigned Idx = 0, End = StoreChainDependents.size(); Idx != End; ++Idx)
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00001491 clusterNeighboringMemOps(StoreChainDependents[Idx], DAG);
Andrew Tricka7714a02012-11-12 19:40:10 +00001492}
1493
Andrew Trick02a80da2012-03-08 01:41:12 +00001494//===----------------------------------------------------------------------===//
Andrew Trick263280242012-11-12 19:52:20 +00001495// MacroFusion - DAG post-processing to encourage fusion of macro ops.
1496//===----------------------------------------------------------------------===//
1497
1498namespace {
1499/// \brief Post-process the DAG to create cluster edges between instructions
1500/// that may be fused by the processor into a single operation.
1501class MacroFusion : public ScheduleDAGMutation {
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001502 const TargetInstrInfo &TII;
1503 const TargetRegisterInfo &TRI;
Andrew Trick263280242012-11-12 19:52:20 +00001504public:
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001505 MacroFusion(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI)
1506 : TII(TII), TRI(TRI) {}
Andrew Trick263280242012-11-12 19:52:20 +00001507
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001508 void apply(ScheduleDAGInstrs *DAGInstrs) override;
Andrew Trick263280242012-11-12 19:52:20 +00001509};
Alexander Kornienkof00654e2015-06-23 09:49:53 +00001510} // anonymous
Andrew Trick263280242012-11-12 19:52:20 +00001511
Tom Stellard68726a52016-08-19 19:59:18 +00001512namespace llvm {
1513
1514std::unique_ptr<ScheduleDAGMutation>
1515createMacroFusionDAGMutation(const TargetInstrInfo *TII,
1516 const TargetRegisterInfo *TRI) {
1517 return make_unique<MacroFusion>(*TII, *TRI);
1518}
1519
1520} // namespace llvm
1521
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001522/// Returns true if \p MI reads a register written by \p Other.
1523static bool HasDataDep(const TargetRegisterInfo &TRI, const MachineInstr &MI,
1524 const MachineInstr &Other) {
1525 for (const MachineOperand &MO : MI.uses()) {
1526 if (!MO.isReg() || !MO.readsReg())
1527 continue;
1528
1529 unsigned Reg = MO.getReg();
1530 if (Other.modifiesRegister(Reg, &TRI))
1531 return true;
1532 }
1533 return false;
1534}
1535
Andrew Trick263280242012-11-12 19:52:20 +00001536/// \brief Callback from DAG postProcessing to create cluster edges to encourage
1537/// fused operations.
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001538void MacroFusion::apply(ScheduleDAGInstrs *DAGInstrs) {
1539 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
1540
Andrew Trick263280242012-11-12 19:52:20 +00001541 // For now, assume targets can only fuse with the branch.
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001542 SUnit &ExitSU = DAG->ExitSU;
1543 MachineInstr *Branch = ExitSU.getInstr();
Andrew Trick263280242012-11-12 19:52:20 +00001544 if (!Branch)
1545 return;
1546
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001547 for (SUnit &SU : DAG->SUnits) {
1548 // SUnits with successors can't be schedule in front of the ExitSU.
1549 if (!SU.Succs.empty())
1550 continue;
1551 // We only care if the node writes to a register that the branch reads.
1552 MachineInstr *Pred = SU.getInstr();
1553 if (!HasDataDep(TRI, *Branch, *Pred))
1554 continue;
1555
Duncan P. N. Exon Smith9cfc75c2016-06-30 00:01:54 +00001556 if (!TII.shouldScheduleAdjacent(*Pred, *Branch))
Andrew Trick263280242012-11-12 19:52:20 +00001557 continue;
1558
1559 // Create a single weak edge from SU to ExitSU. The only effect is to cause
1560 // bottom-up scheduling to heavily prioritize the clustered SU. There is no
1561 // need to copy predecessor edges from ExitSU to SU, since top-down
1562 // scheduling cannot prioritize ExitSU anyway. To defer top-down scheduling
1563 // of SU, we could create an artificial edge from the deepest root, but it
1564 // hasn't been needed yet.
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001565 bool Success = DAG->addEdge(&ExitSU, SDep(&SU, SDep::Cluster));
Andrew Trick263280242012-11-12 19:52:20 +00001566 (void)Success;
1567 assert(Success && "No DAG nodes should be reachable from ExitSU");
1568
Matthias Braun2bd6dd82015-07-20 22:34:44 +00001569 DEBUG(dbgs() << "Macro Fuse SU(" << SU.NodeNum << ")\n");
Andrew Trick263280242012-11-12 19:52:20 +00001570 break;
1571 }
1572}
1573
1574//===----------------------------------------------------------------------===//
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001575// CopyConstrain - DAG post-processing to encourage copy elimination.
1576//===----------------------------------------------------------------------===//
1577
1578namespace {
1579/// \brief Post-process the DAG to create weak edges from all uses of a copy to
1580/// the one use that defines the copy's source vreg, most likely an induction
1581/// variable increment.
1582class CopyConstrain : public ScheduleDAGMutation {
1583 // Transient state.
1584 SlotIndex RegionBeginIdx;
Andrew Trick2e875172013-04-24 23:19:56 +00001585 // RegionEndIdx is the slot index of the last non-debug instruction in the
1586 // scheduling region. So we may have RegionBeginIdx == RegionEndIdx.
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001587 SlotIndex RegionEndIdx;
1588public:
1589 CopyConstrain(const TargetInstrInfo *, const TargetRegisterInfo *) {}
1590
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001591 void apply(ScheduleDAGInstrs *DAGInstrs) override;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001592
1593protected:
Andrew Trickd7f890e2013-12-28 21:56:47 +00001594 void constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG);
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001595};
Alexander Kornienkof00654e2015-06-23 09:49:53 +00001596} // anonymous
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001597
Tom Stellard68726a52016-08-19 19:59:18 +00001598namespace llvm {
1599
1600std::unique_ptr<ScheduleDAGMutation>
1601createCopyConstrainDAGMutation(const TargetInstrInfo *TII,
1602 const TargetRegisterInfo *TRI) {
1603 return make_unique<CopyConstrain>(TII, TRI);
1604}
1605
1606} // namespace llvm
1607
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001608/// constrainLocalCopy handles two possibilities:
1609/// 1) Local src:
1610/// I0: = dst
1611/// I1: src = ...
1612/// I2: = dst
1613/// I3: dst = src (copy)
1614/// (create pred->succ edges I0->I1, I2->I1)
1615///
1616/// 2) Local copy:
1617/// I0: dst = src (copy)
1618/// I1: = dst
1619/// I2: src = ...
1620/// I3: = dst
1621/// (create pred->succ edges I1->I2, I3->I2)
1622///
1623/// Although the MachineScheduler is currently constrained to single blocks,
1624/// this algorithm should handle extended blocks. An EBB is a set of
1625/// contiguously numbered blocks such that the previous block in the EBB is
1626/// always the single predecessor.
Andrew Trickd7f890e2013-12-28 21:56:47 +00001627void CopyConstrain::constrainLocalCopy(SUnit *CopySU, ScheduleDAGMILive *DAG) {
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001628 LiveIntervals *LIS = DAG->getLIS();
1629 MachineInstr *Copy = CopySU->getInstr();
1630
1631 // Check for pure vreg copies.
Matthias Braun7511abd2016-04-04 21:23:46 +00001632 const MachineOperand &SrcOp = Copy->getOperand(1);
1633 unsigned SrcReg = SrcOp.getReg();
1634 if (!TargetRegisterInfo::isVirtualRegister(SrcReg) || !SrcOp.readsReg())
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001635 return;
1636
Matthias Braun7511abd2016-04-04 21:23:46 +00001637 const MachineOperand &DstOp = Copy->getOperand(0);
1638 unsigned DstReg = DstOp.getReg();
1639 if (!TargetRegisterInfo::isVirtualRegister(DstReg) || DstOp.isDead())
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001640 return;
1641
1642 // Check if either the dest or source is local. If it's live across a back
1643 // edge, it's not local. Note that if both vregs are live across the back
1644 // edge, we cannot successfully contrain the copy without cyclic scheduling.
Michael Kuperstein54c61ed2015-01-19 07:30:47 +00001645 // If both the copy's source and dest are local live intervals, then we
1646 // should treat the dest as the global for the purpose of adding
1647 // constraints. This adds edges from source's other uses to the copy.
1648 unsigned LocalReg = SrcReg;
1649 unsigned GlobalReg = DstReg;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001650 LiveInterval *LocalLI = &LIS->getInterval(LocalReg);
1651 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx)) {
Michael Kuperstein54c61ed2015-01-19 07:30:47 +00001652 LocalReg = DstReg;
1653 GlobalReg = SrcReg;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001654 LocalLI = &LIS->getInterval(LocalReg);
1655 if (!LocalLI->isLocal(RegionBeginIdx, RegionEndIdx))
1656 return;
1657 }
1658 LiveInterval *GlobalLI = &LIS->getInterval(GlobalReg);
1659
1660 // Find the global segment after the start of the local LI.
1661 LiveInterval::iterator GlobalSegment = GlobalLI->find(LocalLI->beginIndex());
1662 // If GlobalLI does not overlap LocalLI->start, then a copy directly feeds a
1663 // local live range. We could create edges from other global uses to the local
1664 // start, but the coalescer should have already eliminated these cases, so
1665 // don't bother dealing with it.
1666 if (GlobalSegment == GlobalLI->end())
1667 return;
1668
1669 // If GlobalSegment is killed at the LocalLI->start, the call to find()
1670 // returned the next global segment. But if GlobalSegment overlaps with
1671 // LocalLI->start, then advance to the next segement. If a hole in GlobalLI
1672 // exists in LocalLI's vicinity, GlobalSegment will be the end of the hole.
1673 if (GlobalSegment->contains(LocalLI->beginIndex()))
1674 ++GlobalSegment;
1675
1676 if (GlobalSegment == GlobalLI->end())
1677 return;
1678
1679 // Check if GlobalLI contains a hole in the vicinity of LocalLI.
1680 if (GlobalSegment != GlobalLI->begin()) {
1681 // Two address defs have no hole.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00001682 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->end,
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001683 GlobalSegment->start)) {
1684 return;
1685 }
Andrew Trickd9761772013-07-30 19:59:08 +00001686 // If the prior global segment may be defined by the same two-address
1687 // instruction that also defines LocalLI, then can't make a hole here.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00001688 if (SlotIndex::isSameInstr(std::prev(GlobalSegment)->start,
Andrew Trickd9761772013-07-30 19:59:08 +00001689 LocalLI->beginIndex())) {
1690 return;
1691 }
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001692 // If GlobalLI has a prior segment, it must be live into the EBB. Otherwise
1693 // it would be a disconnected component in the live range.
Benjamin Kramerb6d0bd42014-03-02 12:27:27 +00001694 assert(std::prev(GlobalSegment)->start < LocalLI->beginIndex() &&
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001695 "Disconnected LRG within the scheduling region.");
1696 }
1697 MachineInstr *GlobalDef = LIS->getInstructionFromIndex(GlobalSegment->start);
1698 if (!GlobalDef)
1699 return;
1700
1701 SUnit *GlobalSU = DAG->getSUnit(GlobalDef);
1702 if (!GlobalSU)
1703 return;
1704
1705 // GlobalDef is the bottom of the GlobalLI hole. Open the hole by
1706 // constraining the uses of the last local def to precede GlobalDef.
1707 SmallVector<SUnit*,8> LocalUses;
1708 const VNInfo *LastLocalVN = LocalLI->getVNInfoBefore(LocalLI->endIndex());
1709 MachineInstr *LastLocalDef = LIS->getInstructionFromIndex(LastLocalVN->def);
1710 SUnit *LastLocalSU = DAG->getSUnit(LastLocalDef);
1711 for (SUnit::const_succ_iterator
1712 I = LastLocalSU->Succs.begin(), E = LastLocalSU->Succs.end();
1713 I != E; ++I) {
1714 if (I->getKind() != SDep::Data || I->getReg() != LocalReg)
1715 continue;
1716 if (I->getSUnit() == GlobalSU)
1717 continue;
1718 if (!DAG->canAddEdge(GlobalSU, I->getSUnit()))
1719 return;
1720 LocalUses.push_back(I->getSUnit());
1721 }
1722 // Open the top of the GlobalLI hole by constraining any earlier global uses
1723 // to precede the start of LocalLI.
1724 SmallVector<SUnit*,8> GlobalUses;
1725 MachineInstr *FirstLocalDef =
1726 LIS->getInstructionFromIndex(LocalLI->beginIndex());
1727 SUnit *FirstLocalSU = DAG->getSUnit(FirstLocalDef);
1728 for (SUnit::const_pred_iterator
1729 I = GlobalSU->Preds.begin(), E = GlobalSU->Preds.end(); I != E; ++I) {
1730 if (I->getKind() != SDep::Anti || I->getReg() != GlobalReg)
1731 continue;
1732 if (I->getSUnit() == FirstLocalSU)
1733 continue;
1734 if (!DAG->canAddEdge(FirstLocalSU, I->getSUnit()))
1735 return;
1736 GlobalUses.push_back(I->getSUnit());
1737 }
1738 DEBUG(dbgs() << "Constraining copy SU(" << CopySU->NodeNum << ")\n");
1739 // Add the weak edges.
1740 for (SmallVectorImpl<SUnit*>::const_iterator
1741 I = LocalUses.begin(), E = LocalUses.end(); I != E; ++I) {
1742 DEBUG(dbgs() << " Local use SU(" << (*I)->NodeNum << ") -> SU("
1743 << GlobalSU->NodeNum << ")\n");
1744 DAG->addEdge(GlobalSU, SDep(*I, SDep::Weak));
1745 }
1746 for (SmallVectorImpl<SUnit*>::const_iterator
1747 I = GlobalUses.begin(), E = GlobalUses.end(); I != E; ++I) {
1748 DEBUG(dbgs() << " Global use SU(" << (*I)->NodeNum << ") -> SU("
1749 << FirstLocalSU->NodeNum << ")\n");
1750 DAG->addEdge(FirstLocalSU, SDep(*I, SDep::Weak));
1751 }
1752}
1753
1754/// \brief Callback from DAG postProcessing to create weak edges to encourage
1755/// copy elimination.
Krzysztof Parzyszek5c61d112016-03-05 15:45:23 +00001756void CopyConstrain::apply(ScheduleDAGInstrs *DAGInstrs) {
1757 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
Andrew Trickd7f890e2013-12-28 21:56:47 +00001758 assert(DAG->hasVRegLiveness() && "Expect VRegs with LiveIntervals");
1759
Andrew Trick2e875172013-04-24 23:19:56 +00001760 MachineBasicBlock::iterator FirstPos = nextIfDebug(DAG->begin(), DAG->end());
1761 if (FirstPos == DAG->end())
1762 return;
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001763 RegionBeginIdx = DAG->getLIS()->getInstructionIndex(*FirstPos);
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001764 RegionEndIdx = DAG->getLIS()->getInstructionIndex(
Duncan P. N. Exon Smith3ac9cc62016-02-27 06:40:41 +00001765 *priorNonDebug(DAG->end(), DAG->begin()));
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001766
1767 for (unsigned Idx = 0, End = DAG->SUnits.size(); Idx != End; ++Idx) {
1768 SUnit *SU = &DAG->SUnits[Idx];
1769 if (!SU->getInstr()->isCopy())
1770 continue;
1771
Andrew Trickd7f890e2013-12-28 21:56:47 +00001772 constrainLocalCopy(SU, static_cast<ScheduleDAGMILive*>(DAG));
Andrew Trick85a1d4c2013-04-24 15:54:43 +00001773 }
1774}
1775
1776//===----------------------------------------------------------------------===//
Andrew Trickfc127d12013-12-07 05:59:44 +00001777// MachineSchedStrategy helpers used by GenericScheduler, GenericPostScheduler
1778// and possibly other custom schedulers.
Andrew Trickd14d7c22013-12-28 21:56:57 +00001779//===----------------------------------------------------------------------===//
Andrew Tricke1c034f2012-01-17 06:55:03 +00001780
Andrew Trick5a22df42013-12-05 17:56:02 +00001781static const unsigned InvalidCycle = ~0U;
1782
Andrew Trickfc127d12013-12-07 05:59:44 +00001783SchedBoundary::~SchedBoundary() { delete HazardRec; }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001784
Andrew Trickfc127d12013-12-07 05:59:44 +00001785void SchedBoundary::reset() {
1786 // A new HazardRec is created for each DAG and owned by SchedBoundary.
1787 // Destroying and reconstructing it is very expensive though. So keep
1788 // invalid, placeholder HazardRecs.
1789 if (HazardRec && HazardRec->isEnabled()) {
1790 delete HazardRec;
Craig Topperc0196b12014-04-14 00:51:57 +00001791 HazardRec = nullptr;
Andrew Trickfc127d12013-12-07 05:59:44 +00001792 }
1793 Available.clear();
1794 Pending.clear();
1795 CheckPending = false;
1796 NextSUs.clear();
1797 CurrCycle = 0;
1798 CurrMOps = 0;
1799 MinReadyCycle = UINT_MAX;
1800 ExpectedLatency = 0;
1801 DependentLatency = 0;
1802 RetiredMOps = 0;
1803 MaxExecutedResCount = 0;
1804 ZoneCritResIdx = 0;
1805 IsResourceLimited = false;
1806 ReservedCycles.clear();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001807#ifndef NDEBUG
Andrew Trickd14d7c22013-12-28 21:56:57 +00001808 // Track the maximum number of stall cycles that could arise either from the
1809 // latency of a DAG edge or the number of cycles that a processor resource is
1810 // reserved (SchedBoundary::ReservedCycles).
Andrew Trick7f1ebbe2014-06-07 01:48:43 +00001811 MaxObservedStall = 0;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001812#endif
Andrew Trickfc127d12013-12-07 05:59:44 +00001813 // Reserve a zero-count for invalid CritResIdx.
1814 ExecutedResCounts.resize(1);
1815 assert(!ExecutedResCounts[0] && "nonzero count for bad resource");
1816}
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001817
Andrew Trickfc127d12013-12-07 05:59:44 +00001818void SchedRemainder::
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001819init(ScheduleDAGMI *DAG, const TargetSchedModel *SchedModel) {
1820 reset();
1821 if (!SchedModel->hasInstrSchedModel())
1822 return;
1823 RemainingCounts.resize(SchedModel->getNumProcResourceKinds());
1824 for (std::vector<SUnit>::iterator
1825 I = DAG->SUnits.begin(), E = DAG->SUnits.end(); I != E; ++I) {
1826 const MCSchedClassDesc *SC = DAG->getSchedClass(&*I);
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001827 RemIssueCount += SchedModel->getNumMicroOps(I->getInstr(), SC)
1828 * SchedModel->getMicroOpFactor();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001829 for (TargetSchedModel::ProcResIter
1830 PI = SchedModel->getWriteProcResBegin(SC),
1831 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
1832 unsigned PIdx = PI->ProcResourceIdx;
1833 unsigned Factor = SchedModel->getResourceFactor(PIdx);
1834 RemainingCounts[PIdx] += (Factor * PI->Cycles);
1835 }
1836 }
1837}
1838
Andrew Trickfc127d12013-12-07 05:59:44 +00001839void SchedBoundary::
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001840init(ScheduleDAGMI *dag, const TargetSchedModel *smodel, SchedRemainder *rem) {
1841 reset();
1842 DAG = dag;
1843 SchedModel = smodel;
1844 Rem = rem;
Andrew Trick5a22df42013-12-05 17:56:02 +00001845 if (SchedModel->hasInstrSchedModel()) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001846 ExecutedResCounts.resize(SchedModel->getNumProcResourceKinds());
Andrew Trick5a22df42013-12-05 17:56:02 +00001847 ReservedCycles.resize(SchedModel->getNumProcResourceKinds(), InvalidCycle);
1848 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001849}
1850
Andrew Trick880e5732013-12-05 17:55:58 +00001851/// Compute the stall cycles based on this SUnit's ready time. Heuristics treat
1852/// these "soft stalls" differently than the hard stall cycles based on CPU
1853/// resources and computed by checkHazard(). A fully in-order model
1854/// (MicroOpBufferSize==0) will not make use of this since instructions are not
1855/// available for scheduling until they are ready. However, a weaker in-order
1856/// model may use this for heuristics. For example, if a processor has in-order
1857/// behavior when reading certain resources, this may come into play.
Andrew Trickfc127d12013-12-07 05:59:44 +00001858unsigned SchedBoundary::getLatencyStallCycles(SUnit *SU) {
Andrew Trick880e5732013-12-05 17:55:58 +00001859 if (!SU->isUnbuffered)
1860 return 0;
1861
1862 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
1863 if (ReadyCycle > CurrCycle)
1864 return ReadyCycle - CurrCycle;
1865 return 0;
1866}
1867
Andrew Trick5a22df42013-12-05 17:56:02 +00001868/// Compute the next cycle at which the given processor resource can be
1869/// scheduled.
Andrew Trickfc127d12013-12-07 05:59:44 +00001870unsigned SchedBoundary::
Andrew Trick5a22df42013-12-05 17:56:02 +00001871getNextResourceCycle(unsigned PIdx, unsigned Cycles) {
1872 unsigned NextUnreserved = ReservedCycles[PIdx];
1873 // If this resource has never been used, always return cycle zero.
1874 if (NextUnreserved == InvalidCycle)
1875 return 0;
1876 // For bottom-up scheduling add the cycles needed for the current operation.
1877 if (!isTop())
1878 NextUnreserved += Cycles;
1879 return NextUnreserved;
1880}
1881
Andrew Trick8c9e6722012-06-29 03:23:24 +00001882/// Does this SU have a hazard within the current instruction group.
1883///
1884/// The scheduler supports two modes of hazard recognition. The first is the
1885/// ScheduleHazardRecognizer API. It is a fully general hazard recognizer that
1886/// supports highly complicated in-order reservation tables
1887/// (ScoreboardHazardRecognizer) and arbitraty target-specific logic.
1888///
1889/// The second is a streamlined mechanism that checks for hazards based on
1890/// simple counters that the scheduler itself maintains. It explicitly checks
1891/// for instruction dispatch limitations, including the number of micro-ops that
1892/// can dispatch per cycle.
1893///
1894/// TODO: Also check whether the SU must start a new group.
Andrew Trickfc127d12013-12-07 05:59:44 +00001895bool SchedBoundary::checkHazard(SUnit *SU) {
Andrew Trickd14d7c22013-12-28 21:56:57 +00001896 if (HazardRec->isEnabled()
1897 && HazardRec->getHazardType(SU) != ScheduleHazardRecognizer::NoHazard) {
1898 return true;
1899 }
Andrew Trickdd79f0f2012-10-10 05:43:09 +00001900 unsigned uops = SchedModel->getNumMicroOps(SU->getInstr());
Andrew Tricke2ff5752013-06-15 04:49:49 +00001901 if ((CurrMOps > 0) && (CurrMOps + uops > SchedModel->getIssueWidth())) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001902 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") uops="
1903 << SchedModel->getNumMicroOps(SU->getInstr()) << '\n');
Andrew Trick8c9e6722012-06-29 03:23:24 +00001904 return true;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001905 }
Andrew Trick5a22df42013-12-05 17:56:02 +00001906 if (SchedModel->hasInstrSchedModel() && SU->hasReservedResource) {
1907 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
1908 for (TargetSchedModel::ProcResIter
1909 PI = SchedModel->getWriteProcResBegin(SC),
1910 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
Andrew Trick56327222014-06-27 04:57:05 +00001911 unsigned NRCycle = getNextResourceCycle(PI->ProcResourceIdx, PI->Cycles);
1912 if (NRCycle > CurrCycle) {
Andrew Trick040c0da2014-06-27 05:09:36 +00001913#ifndef NDEBUG
Chad Rosieraba845e2014-07-02 16:46:08 +00001914 MaxObservedStall = std::max(PI->Cycles, MaxObservedStall);
Andrew Trick040c0da2014-06-27 05:09:36 +00001915#endif
Andrew Trick56327222014-06-27 04:57:05 +00001916 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") "
1917 << SchedModel->getResourceName(PI->ProcResourceIdx)
1918 << "=" << NRCycle << "c\n");
Andrew Trick5a22df42013-12-05 17:56:02 +00001919 return true;
Andrew Trick56327222014-06-27 04:57:05 +00001920 }
Andrew Trick5a22df42013-12-05 17:56:02 +00001921 }
1922 }
Andrew Trick8c9e6722012-06-29 03:23:24 +00001923 return false;
1924}
1925
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001926// Find the unscheduled node in ReadySUs with the highest latency.
Andrew Trickfc127d12013-12-07 05:59:44 +00001927unsigned SchedBoundary::
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001928findMaxLatency(ArrayRef<SUnit*> ReadySUs) {
Craig Topperc0196b12014-04-14 00:51:57 +00001929 SUnit *LateSU = nullptr;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001930 unsigned RemLatency = 0;
1931 for (ArrayRef<SUnit*>::iterator I = ReadySUs.begin(), E = ReadySUs.end();
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001932 I != E; ++I) {
1933 unsigned L = getUnscheduledLatency(*I);
Andrew Trickf5b8ef22013-06-15 04:49:44 +00001934 if (L > RemLatency) {
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001935 RemLatency = L;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001936 LateSU = *I;
Andrew Trickf5b8ef22013-06-15 04:49:44 +00001937 }
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001938 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001939 if (LateSU) {
1940 DEBUG(dbgs() << Available.getName() << " RemLatency SU("
1941 << LateSU->NodeNum << ") " << RemLatency << "c\n");
Andrew Trickd6d5ad32012-12-18 20:52:56 +00001942 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001943 return RemLatency;
1944}
Andrew Trickf5b8ef22013-06-15 04:49:44 +00001945
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001946// Count resources in this zone and the remaining unscheduled
1947// instruction. Return the max count, scaled. Set OtherCritIdx to the critical
1948// resource index, or zero if the zone is issue limited.
Andrew Trickfc127d12013-12-07 05:59:44 +00001949unsigned SchedBoundary::
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001950getOtherResourceCount(unsigned &OtherCritIdx) {
Alexey Samsonov64c391d2013-07-19 08:55:18 +00001951 OtherCritIdx = 0;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001952 if (!SchedModel->hasInstrSchedModel())
1953 return 0;
1954
1955 unsigned OtherCritCount = Rem->RemIssueCount
1956 + (RetiredMOps * SchedModel->getMicroOpFactor());
1957 DEBUG(dbgs() << " " << Available.getName() << " + Remain MOps: "
1958 << OtherCritCount / SchedModel->getMicroOpFactor() << '\n');
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001959 for (unsigned PIdx = 1, PEnd = SchedModel->getNumProcResourceKinds();
1960 PIdx != PEnd; ++PIdx) {
1961 unsigned OtherCount = getResourceCount(PIdx) + Rem->RemainingCounts[PIdx];
1962 if (OtherCount > OtherCritCount) {
1963 OtherCritCount = OtherCount;
1964 OtherCritIdx = PIdx;
1965 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001966 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001967 if (OtherCritIdx) {
1968 DEBUG(dbgs() << " " << Available.getName() << " + Remain CritRes: "
1969 << OtherCritCount / SchedModel->getResourceFactor(OtherCritIdx)
Andrew Trickfc127d12013-12-07 05:59:44 +00001970 << " " << SchedModel->getResourceName(OtherCritIdx) << "\n");
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001971 }
1972 return OtherCritCount;
1973}
1974
Andrew Trickfc127d12013-12-07 05:59:44 +00001975void SchedBoundary::releaseNode(SUnit *SU, unsigned ReadyCycle) {
Andrew Trick7f1ebbe2014-06-07 01:48:43 +00001976 assert(SU->getInstr() && "Scheduled SUnit must have instr");
1977
1978#ifndef NDEBUG
Andrew Trick491e34a2014-06-12 22:36:28 +00001979 // ReadyCycle was been bumped up to the CurrCycle when this node was
1980 // scheduled, but CurrCycle may have been eagerly advanced immediately after
1981 // scheduling, so may now be greater than ReadyCycle.
1982 if (ReadyCycle > CurrCycle)
1983 MaxObservedStall = std::max(ReadyCycle - CurrCycle, MaxObservedStall);
Andrew Trick7f1ebbe2014-06-07 01:48:43 +00001984#endif
1985
Andrew Trick61f1a272012-05-24 22:11:09 +00001986 if (ReadyCycle < MinReadyCycle)
1987 MinReadyCycle = ReadyCycle;
1988
1989 // Check for interlocks first. For the purpose of other heuristics, an
1990 // instruction that cannot issue appears as if it's not in the ReadyQueue.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00001991 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
Matthias Braun6493bc22016-04-22 19:09:17 +00001992 if ((!IsBuffered && ReadyCycle > CurrCycle) || checkHazard(SU) ||
1993 Available.size() >= ReadyListLimit)
Andrew Trick61f1a272012-05-24 22:11:09 +00001994 Pending.push(SU);
1995 else
1996 Available.push(SU);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00001997
1998 // Record this node as an immediate dependent of the scheduled node.
1999 NextSUs.insert(SU);
Andrew Trick61f1a272012-05-24 22:11:09 +00002000}
2001
Andrew Trickfc127d12013-12-07 05:59:44 +00002002void SchedBoundary::releaseTopNode(SUnit *SU) {
2003 if (SU->isScheduled)
2004 return;
2005
Andrew Trickfc127d12013-12-07 05:59:44 +00002006 releaseNode(SU, SU->TopReadyCycle);
2007}
2008
2009void SchedBoundary::releaseBottomNode(SUnit *SU) {
2010 if (SU->isScheduled)
2011 return;
2012
Andrew Trickfc127d12013-12-07 05:59:44 +00002013 releaseNode(SU, SU->BotReadyCycle);
2014}
2015
Andrew Trick61f1a272012-05-24 22:11:09 +00002016/// Move the boundary of scheduled code by one cycle.
Andrew Trickfc127d12013-12-07 05:59:44 +00002017void SchedBoundary::bumpCycle(unsigned NextCycle) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002018 if (SchedModel->getMicroOpBufferSize() == 0) {
2019 assert(MinReadyCycle < UINT_MAX && "MinReadyCycle uninitialized");
2020 if (MinReadyCycle > NextCycle)
2021 NextCycle = MinReadyCycle;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002022 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002023 // Update the current micro-ops, which will issue in the next cycle.
2024 unsigned DecMOps = SchedModel->getIssueWidth() * (NextCycle - CurrCycle);
2025 CurrMOps = (CurrMOps <= DecMOps) ? 0 : CurrMOps - DecMOps;
2026
2027 // Decrement DependentLatency based on the next cycle.
Andrew Trickf5b8ef22013-06-15 04:49:44 +00002028 if ((NextCycle - CurrCycle) > DependentLatency)
2029 DependentLatency = 0;
2030 else
2031 DependentLatency -= (NextCycle - CurrCycle);
Andrew Trick61f1a272012-05-24 22:11:09 +00002032
2033 if (!HazardRec->isEnabled()) {
Andrew Trick45446062012-06-05 21:11:27 +00002034 // Bypass HazardRec virtual calls.
Andrew Trick61f1a272012-05-24 22:11:09 +00002035 CurrCycle = NextCycle;
Matthias Braunb550b762016-04-21 01:54:13 +00002036 } else {
Andrew Trick45446062012-06-05 21:11:27 +00002037 // Bypass getHazardType calls in case of long latency.
Andrew Trick61f1a272012-05-24 22:11:09 +00002038 for (; CurrCycle != NextCycle; ++CurrCycle) {
2039 if (isTop())
2040 HazardRec->AdvanceCycle();
2041 else
2042 HazardRec->RecedeCycle();
2043 }
2044 }
2045 CheckPending = true;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002046 unsigned LFactor = SchedModel->getLatencyFactor();
2047 IsResourceLimited =
2048 (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2049 > (int)LFactor;
Andrew Trick61f1a272012-05-24 22:11:09 +00002050
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002051 DEBUG(dbgs() << "Cycle: " << CurrCycle << ' ' << Available.getName() << '\n');
2052}
2053
Andrew Trickfc127d12013-12-07 05:59:44 +00002054void SchedBoundary::incExecutedResources(unsigned PIdx, unsigned Count) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002055 ExecutedResCounts[PIdx] += Count;
2056 if (ExecutedResCounts[PIdx] > MaxExecutedResCount)
2057 MaxExecutedResCount = ExecutedResCounts[PIdx];
Andrew Trick61f1a272012-05-24 22:11:09 +00002058}
2059
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002060/// Add the given processor resource to this scheduled zone.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002061///
2062/// \param Cycles indicates the number of consecutive (non-pipelined) cycles
2063/// during which this resource is consumed.
2064///
2065/// \return the next cycle at which the instruction may execute without
2066/// oversubscribing resources.
Andrew Trickfc127d12013-12-07 05:59:44 +00002067unsigned SchedBoundary::
Andrew Trick5a22df42013-12-05 17:56:02 +00002068countResource(unsigned PIdx, unsigned Cycles, unsigned NextCycle) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002069 unsigned Factor = SchedModel->getResourceFactor(PIdx);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002070 unsigned Count = Factor * Cycles;
Andrew Trickfc127d12013-12-07 05:59:44 +00002071 DEBUG(dbgs() << " " << SchedModel->getResourceName(PIdx)
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002072 << " +" << Cycles << "x" << Factor << "u\n");
2073
2074 // Update Executed resources counts.
2075 incExecutedResources(PIdx, Count);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002076 assert(Rem->RemainingCounts[PIdx] >= Count && "resource double counted");
2077 Rem->RemainingCounts[PIdx] -= Count;
2078
Andrew Trickb13ef172013-07-19 00:20:07 +00002079 // Check if this resource exceeds the current critical resource. If so, it
2080 // becomes the critical resource.
2081 if (ZoneCritResIdx != PIdx && (getResourceCount(PIdx) > getCriticalCount())) {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002082 ZoneCritResIdx = PIdx;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002083 DEBUG(dbgs() << " *** Critical resource "
Andrew Trickfc127d12013-12-07 05:59:44 +00002084 << SchedModel->getResourceName(PIdx) << ": "
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002085 << getResourceCount(PIdx) / SchedModel->getLatencyFactor() << "c\n");
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002086 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002087 // For reserved resources, record the highest cycle using the resource.
2088 unsigned NextAvailable = getNextResourceCycle(PIdx, Cycles);
2089 if (NextAvailable > CurrCycle) {
2090 DEBUG(dbgs() << " Resource conflict: "
2091 << SchedModel->getProcResource(PIdx)->Name << " reserved until @"
2092 << NextAvailable << "\n");
2093 }
2094 return NextAvailable;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002095}
2096
Andrew Trick45446062012-06-05 21:11:27 +00002097/// Move the boundary of scheduled code by one SUnit.
Andrew Trickfc127d12013-12-07 05:59:44 +00002098void SchedBoundary::bumpNode(SUnit *SU) {
Andrew Trick45446062012-06-05 21:11:27 +00002099 // Update the reservation table.
2100 if (HazardRec->isEnabled()) {
2101 if (!isTop() && SU->isCall) {
2102 // Calls are scheduled with their preceding instructions. For bottom-up
2103 // scheduling, clear the pipeline state before emitting.
2104 HazardRec->Reset();
2105 }
2106 HazardRec->EmitInstruction(SU);
2107 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002108 // checkHazard should prevent scheduling multiple instructions per cycle that
2109 // exceed the issue width.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002110 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2111 unsigned IncMOps = SchedModel->getNumMicroOps(SU->getInstr());
Daniel Jasper0d92abd2013-12-06 08:58:22 +00002112 assert(
2113 (CurrMOps == 0 || (CurrMOps + IncMOps) <= SchedModel->getIssueWidth()) &&
Andrew Trickf7760a22013-12-06 17:19:20 +00002114 "Cannot schedule this instruction's MicroOps in the current cycle.");
Andrew Trick5a22df42013-12-05 17:56:02 +00002115
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002116 unsigned ReadyCycle = (isTop() ? SU->TopReadyCycle : SU->BotReadyCycle);
2117 DEBUG(dbgs() << " Ready @" << ReadyCycle << "c\n");
2118
Andrew Trick5a22df42013-12-05 17:56:02 +00002119 unsigned NextCycle = CurrCycle;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002120 switch (SchedModel->getMicroOpBufferSize()) {
2121 case 0:
2122 assert(ReadyCycle <= CurrCycle && "Broken PendingQueue");
2123 break;
2124 case 1:
2125 if (ReadyCycle > NextCycle) {
2126 NextCycle = ReadyCycle;
2127 DEBUG(dbgs() << " *** Stall until: " << ReadyCycle << "\n");
2128 }
2129 break;
2130 default:
2131 // We don't currently model the OOO reorder buffer, so consider all
Andrew Trick880e5732013-12-05 17:55:58 +00002132 // scheduled MOps to be "retired". We do loosely model in-order resource
2133 // latency. If this instruction uses an in-order resource, account for any
2134 // likely stall cycles.
2135 if (SU->isUnbuffered && ReadyCycle > NextCycle)
2136 NextCycle = ReadyCycle;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002137 break;
2138 }
2139 RetiredMOps += IncMOps;
2140
2141 // Update resource counts and critical resource.
2142 if (SchedModel->hasInstrSchedModel()) {
2143 unsigned DecRemIssue = IncMOps * SchedModel->getMicroOpFactor();
2144 assert(Rem->RemIssueCount >= DecRemIssue && "MOps double counted");
2145 Rem->RemIssueCount -= DecRemIssue;
2146 if (ZoneCritResIdx) {
2147 // Scale scheduled micro-ops for comparing with the critical resource.
2148 unsigned ScaledMOps =
2149 RetiredMOps * SchedModel->getMicroOpFactor();
2150
2151 // If scaled micro-ops are now more than the previous critical resource by
2152 // a full cycle, then micro-ops issue becomes critical.
2153 if ((int)(ScaledMOps - getResourceCount(ZoneCritResIdx))
2154 >= (int)SchedModel->getLatencyFactor()) {
2155 ZoneCritResIdx = 0;
2156 DEBUG(dbgs() << " *** Critical resource NumMicroOps: "
2157 << ScaledMOps / SchedModel->getLatencyFactor() << "c\n");
2158 }
2159 }
2160 for (TargetSchedModel::ProcResIter
2161 PI = SchedModel->getWriteProcResBegin(SC),
2162 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2163 unsigned RCycle =
Andrew Trick5a22df42013-12-05 17:56:02 +00002164 countResource(PI->ProcResourceIdx, PI->Cycles, NextCycle);
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002165 if (RCycle > NextCycle)
2166 NextCycle = RCycle;
2167 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002168 if (SU->hasReservedResource) {
2169 // For reserved resources, record the highest cycle using the resource.
2170 // For top-down scheduling, this is the cycle in which we schedule this
2171 // instruction plus the number of cycles the operations reserves the
2172 // resource. For bottom-up is it simply the instruction's cycle.
2173 for (TargetSchedModel::ProcResIter
2174 PI = SchedModel->getWriteProcResBegin(SC),
2175 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2176 unsigned PIdx = PI->ProcResourceIdx;
Andrew Trickd14d7c22013-12-28 21:56:57 +00002177 if (SchedModel->getProcResource(PIdx)->BufferSize == 0) {
Chad Rosieraba845e2014-07-02 16:46:08 +00002178 if (isTop()) {
2179 ReservedCycles[PIdx] =
2180 std::max(getNextResourceCycle(PIdx, 0), NextCycle + PI->Cycles);
2181 }
2182 else
2183 ReservedCycles[PIdx] = NextCycle;
Andrew Trickd14d7c22013-12-28 21:56:57 +00002184 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002185 }
2186 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002187 }
2188 // Update ExpectedLatency and DependentLatency.
2189 unsigned &TopLatency = isTop() ? ExpectedLatency : DependentLatency;
2190 unsigned &BotLatency = isTop() ? DependentLatency : ExpectedLatency;
2191 if (SU->getDepth() > TopLatency) {
2192 TopLatency = SU->getDepth();
2193 DEBUG(dbgs() << " " << Available.getName()
2194 << " TopLatency SU(" << SU->NodeNum << ") " << TopLatency << "c\n");
2195 }
2196 if (SU->getHeight() > BotLatency) {
2197 BotLatency = SU->getHeight();
2198 DEBUG(dbgs() << " " << Available.getName()
2199 << " BotLatency SU(" << SU->NodeNum << ") " << BotLatency << "c\n");
2200 }
2201 // If we stall for any reason, bump the cycle.
2202 if (NextCycle > CurrCycle) {
2203 bumpCycle(NextCycle);
Matthias Braunb550b762016-04-21 01:54:13 +00002204 } else {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002205 // After updating ZoneCritResIdx and ExpectedLatency, check if we're
Alp Tokercb402912014-01-24 17:20:08 +00002206 // resource limited. If a stall occurred, bumpCycle does this.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002207 unsigned LFactor = SchedModel->getLatencyFactor();
2208 IsResourceLimited =
2209 (int)(getCriticalCount() - (getScheduledLatency() * LFactor))
2210 > (int)LFactor;
2211 }
Andrew Trick5a22df42013-12-05 17:56:02 +00002212 // Update CurrMOps after calling bumpCycle to handle stalls, since bumpCycle
2213 // resets CurrMOps. Loop to handle instructions with more MOps than issue in
2214 // one cycle. Since we commonly reach the max MOps here, opportunistically
2215 // bump the cycle to avoid uselessly checking everything in the readyQ.
2216 CurrMOps += IncMOps;
2217 while (CurrMOps >= SchedModel->getIssueWidth()) {
Andrew Trick5a22df42013-12-05 17:56:02 +00002218 DEBUG(dbgs() << " *** Max MOps " << CurrMOps
2219 << " at cycle " << CurrCycle << '\n');
Andrew Trickd14d7c22013-12-28 21:56:57 +00002220 bumpCycle(++NextCycle);
Andrew Trick5a22df42013-12-05 17:56:02 +00002221 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002222 DEBUG(dumpScheduledState());
Andrew Trick45446062012-06-05 21:11:27 +00002223}
2224
Andrew Trick61f1a272012-05-24 22:11:09 +00002225/// Release pending ready nodes in to the available queue. This makes them
2226/// visible to heuristics.
Andrew Trickfc127d12013-12-07 05:59:44 +00002227void SchedBoundary::releasePending() {
Andrew Trick61f1a272012-05-24 22:11:09 +00002228 // If the available queue is empty, it is safe to reset MinReadyCycle.
2229 if (Available.empty())
2230 MinReadyCycle = UINT_MAX;
2231
2232 // Check to see if any of the pending instructions are ready to issue. If
2233 // so, add them to the available queue.
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002234 bool IsBuffered = SchedModel->getMicroOpBufferSize() != 0;
Andrew Trick61f1a272012-05-24 22:11:09 +00002235 for (unsigned i = 0, e = Pending.size(); i != e; ++i) {
2236 SUnit *SU = *(Pending.begin()+i);
Andrew Trick45446062012-06-05 21:11:27 +00002237 unsigned ReadyCycle = isTop() ? SU->TopReadyCycle : SU->BotReadyCycle;
Andrew Trick61f1a272012-05-24 22:11:09 +00002238
2239 if (ReadyCycle < MinReadyCycle)
2240 MinReadyCycle = ReadyCycle;
2241
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002242 if (!IsBuffered && ReadyCycle > CurrCycle)
Andrew Trick61f1a272012-05-24 22:11:09 +00002243 continue;
2244
Andrew Trick8c9e6722012-06-29 03:23:24 +00002245 if (checkHazard(SU))
Andrew Trick61f1a272012-05-24 22:11:09 +00002246 continue;
2247
Matthias Braun6493bc22016-04-22 19:09:17 +00002248 if (Available.size() >= ReadyListLimit)
2249 break;
2250
Andrew Trick61f1a272012-05-24 22:11:09 +00002251 Available.push(SU);
2252 Pending.remove(Pending.begin()+i);
2253 --i; --e;
2254 }
2255 CheckPending = false;
2256}
2257
2258/// Remove SU from the ready set for this boundary.
Andrew Trickfc127d12013-12-07 05:59:44 +00002259void SchedBoundary::removeReady(SUnit *SU) {
Andrew Trick61f1a272012-05-24 22:11:09 +00002260 if (Available.isInQueue(SU))
2261 Available.remove(Available.find(SU));
2262 else {
2263 assert(Pending.isInQueue(SU) && "bad ready count");
2264 Pending.remove(Pending.find(SU));
2265 }
2266}
2267
2268/// If this queue only has one ready candidate, return it. As a side effect,
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002269/// defer any nodes that now hit a hazard, and advance the cycle until at least
2270/// one node is ready. If multiple instructions are ready, return NULL.
Andrew Trickfc127d12013-12-07 05:59:44 +00002271SUnit *SchedBoundary::pickOnlyChoice() {
Andrew Trick61f1a272012-05-24 22:11:09 +00002272 if (CheckPending)
2273 releasePending();
2274
Andrew Tricke2ff5752013-06-15 04:49:49 +00002275 if (CurrMOps > 0) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002276 // Defer any ready instrs that now have a hazard.
2277 for (ReadyQueue::iterator I = Available.begin(); I != Available.end();) {
2278 if (checkHazard(*I)) {
2279 Pending.push(*I);
2280 I = Available.remove(I);
2281 continue;
2282 }
2283 ++I;
2284 }
2285 }
Andrew Trick61f1a272012-05-24 22:11:09 +00002286 for (unsigned i = 0; Available.empty(); ++i) {
Chad Rosieraba845e2014-07-02 16:46:08 +00002287// FIXME: Re-enable assert once PR20057 is resolved.
2288// assert(i <= (HazardRec->getMaxLookAhead() + MaxObservedStall) &&
2289// "permanent hazard");
2290 (void)i;
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002291 bumpCycle(CurrCycle + 1);
Andrew Trick61f1a272012-05-24 22:11:09 +00002292 releasePending();
2293 }
Matthias Braund29d31e2016-06-23 21:27:38 +00002294
2295 DEBUG(Pending.dump());
2296 DEBUG(Available.dump());
2297
Andrew Trick61f1a272012-05-24 22:11:09 +00002298 if (Available.size() == 1)
2299 return *Available.begin();
Craig Topperc0196b12014-04-14 00:51:57 +00002300 return nullptr;
Andrew Trick61f1a272012-05-24 22:11:09 +00002301}
2302
Andrew Trick8e8415f2013-06-15 05:46:47 +00002303#ifndef NDEBUG
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002304// This is useful information to dump after bumpNode.
2305// Note that the Queue contents are more useful before pickNodeFromQueue.
Andrew Trickfc127d12013-12-07 05:59:44 +00002306void SchedBoundary::dumpScheduledState() {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002307 unsigned ResFactor;
2308 unsigned ResCount;
2309 if (ZoneCritResIdx) {
2310 ResFactor = SchedModel->getResourceFactor(ZoneCritResIdx);
2311 ResCount = getResourceCount(ZoneCritResIdx);
Matthias Braunb550b762016-04-21 01:54:13 +00002312 } else {
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002313 ResFactor = SchedModel->getMicroOpFactor();
2314 ResCount = RetiredMOps * SchedModel->getMicroOpFactor();
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002315 }
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002316 unsigned LFactor = SchedModel->getLatencyFactor();
2317 dbgs() << Available.getName() << " @" << CurrCycle << "c\n"
2318 << " Retired: " << RetiredMOps;
2319 dbgs() << "\n Executed: " << getExecutedCount() / LFactor << "c";
2320 dbgs() << "\n Critical: " << ResCount / LFactor << "c, "
Andrew Trickfc127d12013-12-07 05:59:44 +00002321 << ResCount / ResFactor << " "
2322 << SchedModel->getResourceName(ZoneCritResIdx)
Andrew Trickf78e7fa2013-06-15 05:39:19 +00002323 << "\n ExpectedLatency: " << ExpectedLatency << "c\n"
2324 << (IsResourceLimited ? " - Resource" : " - Latency")
2325 << " limited.\n";
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002326}
Andrew Trick8e8415f2013-06-15 05:46:47 +00002327#endif
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002328
Andrew Trickfc127d12013-12-07 05:59:44 +00002329//===----------------------------------------------------------------------===//
Andrew Trickd14d7c22013-12-28 21:56:57 +00002330// GenericScheduler - Generic implementation of MachineSchedStrategy.
Andrew Trickfc127d12013-12-07 05:59:44 +00002331//===----------------------------------------------------------------------===//
2332
Andrew Trickd14d7c22013-12-28 21:56:57 +00002333void GenericSchedulerBase::SchedCandidate::
2334initResourceDelta(const ScheduleDAGMI *DAG,
2335 const TargetSchedModel *SchedModel) {
2336 if (!Policy.ReduceResIdx && !Policy.DemandResIdx)
2337 return;
2338
2339 const MCSchedClassDesc *SC = DAG->getSchedClass(SU);
2340 for (TargetSchedModel::ProcResIter
2341 PI = SchedModel->getWriteProcResBegin(SC),
2342 PE = SchedModel->getWriteProcResEnd(SC); PI != PE; ++PI) {
2343 if (PI->ProcResourceIdx == Policy.ReduceResIdx)
2344 ResDelta.CritResources += PI->Cycles;
2345 if (PI->ProcResourceIdx == Policy.DemandResIdx)
2346 ResDelta.DemandedResources += PI->Cycles;
2347 }
2348}
2349
2350/// Set the CandPolicy given a scheduling zone given the current resources and
2351/// latencies inside and outside the zone.
Matthias Braunb550b762016-04-21 01:54:13 +00002352void GenericSchedulerBase::setPolicy(CandPolicy &Policy, bool IsPostRA,
Andrew Trickd14d7c22013-12-28 21:56:57 +00002353 SchedBoundary &CurrZone,
2354 SchedBoundary *OtherZone) {
Eric Christopher572e03a2015-06-19 01:53:21 +00002355 // Apply preemptive heuristics based on the total latency and resources
Andrew Trickd14d7c22013-12-28 21:56:57 +00002356 // inside and outside this zone. Potential stalls should be considered before
2357 // following this policy.
2358
2359 // Compute remaining latency. We need this both to determine whether the
2360 // overall schedule has become latency-limited and whether the instructions
2361 // outside this zone are resource or latency limited.
2362 //
2363 // The "dependent" latency is updated incrementally during scheduling as the
2364 // max height/depth of scheduled nodes minus the cycles since it was
2365 // scheduled:
2366 // DLat = max (N.depth - (CurrCycle - N.ReadyCycle) for N in Zone
2367 //
2368 // The "independent" latency is the max ready queue depth:
2369 // ILat = max N.depth for N in Available|Pending
2370 //
2371 // RemainingLatency is the greater of independent and dependent latency.
2372 unsigned RemLatency = CurrZone.getDependentLatency();
2373 RemLatency = std::max(RemLatency,
2374 CurrZone.findMaxLatency(CurrZone.Available.elements()));
2375 RemLatency = std::max(RemLatency,
2376 CurrZone.findMaxLatency(CurrZone.Pending.elements()));
2377
2378 // Compute the critical resource outside the zone.
Andrew Trick7afe4812013-12-28 22:25:57 +00002379 unsigned OtherCritIdx = 0;
Andrew Trickd14d7c22013-12-28 21:56:57 +00002380 unsigned OtherCount =
2381 OtherZone ? OtherZone->getOtherResourceCount(OtherCritIdx) : 0;
2382
2383 bool OtherResLimited = false;
2384 if (SchedModel->hasInstrSchedModel()) {
2385 unsigned LFactor = SchedModel->getLatencyFactor();
2386 OtherResLimited = (int)(OtherCount - (RemLatency * LFactor)) > (int)LFactor;
2387 }
2388 // Schedule aggressively for latency in PostRA mode. We don't check for
2389 // acyclic latency during PostRA, and highly out-of-order processors will
2390 // skip PostRA scheduling.
2391 if (!OtherResLimited) {
2392 if (IsPostRA || (RemLatency + CurrZone.getCurrCycle() > Rem.CriticalPath)) {
2393 Policy.ReduceLatency |= true;
2394 DEBUG(dbgs() << " " << CurrZone.Available.getName()
2395 << " RemainingLatency " << RemLatency << " + "
2396 << CurrZone.getCurrCycle() << "c > CritPath "
2397 << Rem.CriticalPath << "\n");
2398 }
2399 }
2400 // If the same resource is limiting inside and outside the zone, do nothing.
2401 if (CurrZone.getZoneCritResIdx() == OtherCritIdx)
2402 return;
2403
2404 DEBUG(
2405 if (CurrZone.isResourceLimited()) {
2406 dbgs() << " " << CurrZone.Available.getName() << " ResourceLimited: "
2407 << SchedModel->getResourceName(CurrZone.getZoneCritResIdx())
2408 << "\n";
2409 }
2410 if (OtherResLimited)
2411 dbgs() << " RemainingLimit: "
2412 << SchedModel->getResourceName(OtherCritIdx) << "\n";
2413 if (!CurrZone.isResourceLimited() && !OtherResLimited)
2414 dbgs() << " Latency limited both directions.\n");
2415
2416 if (CurrZone.isResourceLimited() && !Policy.ReduceResIdx)
2417 Policy.ReduceResIdx = CurrZone.getZoneCritResIdx();
2418
2419 if (OtherResLimited)
2420 Policy.DemandResIdx = OtherCritIdx;
2421}
2422
2423#ifndef NDEBUG
2424const char *GenericSchedulerBase::getReasonStr(
2425 GenericSchedulerBase::CandReason Reason) {
2426 switch (Reason) {
2427 case NoCand: return "NOCAND ";
Matthias Braun49cb6e92016-05-27 22:14:26 +00002428 case Only1: return "ONLY1 ";
2429 case PhysRegCopy: return "PREG-COPY ";
Andrew Trickd14d7c22013-12-28 21:56:57 +00002430 case RegExcess: return "REG-EXCESS";
2431 case RegCritical: return "REG-CRIT ";
2432 case Stall: return "STALL ";
2433 case Cluster: return "CLUSTER ";
2434 case Weak: return "WEAK ";
2435 case RegMax: return "REG-MAX ";
2436 case ResourceReduce: return "RES-REDUCE";
2437 case ResourceDemand: return "RES-DEMAND";
2438 case TopDepthReduce: return "TOP-DEPTH ";
2439 case TopPathReduce: return "TOP-PATH ";
2440 case BotHeightReduce:return "BOT-HEIGHT";
2441 case BotPathReduce: return "BOT-PATH ";
2442 case NextDefUse: return "DEF-USE ";
2443 case NodeOrder: return "ORDER ";
2444 };
2445 llvm_unreachable("Unknown reason!");
2446}
2447
2448void GenericSchedulerBase::traceCandidate(const SchedCandidate &Cand) {
2449 PressureChange P;
2450 unsigned ResIdx = 0;
2451 unsigned Latency = 0;
2452 switch (Cand.Reason) {
2453 default:
2454 break;
2455 case RegExcess:
2456 P = Cand.RPDelta.Excess;
2457 break;
2458 case RegCritical:
2459 P = Cand.RPDelta.CriticalMax;
2460 break;
2461 case RegMax:
2462 P = Cand.RPDelta.CurrentMax;
2463 break;
2464 case ResourceReduce:
2465 ResIdx = Cand.Policy.ReduceResIdx;
2466 break;
2467 case ResourceDemand:
2468 ResIdx = Cand.Policy.DemandResIdx;
2469 break;
2470 case TopDepthReduce:
2471 Latency = Cand.SU->getDepth();
2472 break;
2473 case TopPathReduce:
2474 Latency = Cand.SU->getHeight();
2475 break;
2476 case BotHeightReduce:
2477 Latency = Cand.SU->getHeight();
2478 break;
2479 case BotPathReduce:
2480 Latency = Cand.SU->getDepth();
2481 break;
2482 }
James Y Knighte72b0db2015-09-18 18:52:20 +00002483 dbgs() << " Cand SU(" << Cand.SU->NodeNum << ") " << getReasonStr(Cand.Reason);
Andrew Trickd14d7c22013-12-28 21:56:57 +00002484 if (P.isValid())
2485 dbgs() << " " << TRI->getRegPressureSetName(P.getPSet())
2486 << ":" << P.getUnitInc() << " ";
2487 else
2488 dbgs() << " ";
2489 if (ResIdx)
2490 dbgs() << " " << SchedModel->getProcResource(ResIdx)->Name << " ";
2491 else
2492 dbgs() << " ";
2493 if (Latency)
2494 dbgs() << " " << Latency << " cycles ";
2495 else
2496 dbgs() << " ";
2497 dbgs() << '\n';
2498}
2499#endif
2500
2501/// Return true if this heuristic determines order.
2502static bool tryLess(int TryVal, int CandVal,
2503 GenericSchedulerBase::SchedCandidate &TryCand,
2504 GenericSchedulerBase::SchedCandidate &Cand,
2505 GenericSchedulerBase::CandReason Reason) {
2506 if (TryVal < CandVal) {
2507 TryCand.Reason = Reason;
2508 return true;
2509 }
2510 if (TryVal > CandVal) {
2511 if (Cand.Reason > Reason)
2512 Cand.Reason = Reason;
2513 return true;
2514 }
Andrew Trickd14d7c22013-12-28 21:56:57 +00002515 return false;
2516}
2517
2518static bool tryGreater(int TryVal, int CandVal,
2519 GenericSchedulerBase::SchedCandidate &TryCand,
2520 GenericSchedulerBase::SchedCandidate &Cand,
2521 GenericSchedulerBase::CandReason Reason) {
2522 if (TryVal > CandVal) {
2523 TryCand.Reason = Reason;
2524 return true;
2525 }
2526 if (TryVal < CandVal) {
2527 if (Cand.Reason > Reason)
2528 Cand.Reason = Reason;
2529 return true;
2530 }
Andrew Trickd14d7c22013-12-28 21:56:57 +00002531 return false;
2532}
2533
2534static bool tryLatency(GenericSchedulerBase::SchedCandidate &TryCand,
2535 GenericSchedulerBase::SchedCandidate &Cand,
2536 SchedBoundary &Zone) {
2537 if (Zone.isTop()) {
2538 if (Cand.SU->getDepth() > Zone.getScheduledLatency()) {
2539 if (tryLess(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2540 TryCand, Cand, GenericSchedulerBase::TopDepthReduce))
2541 return true;
2542 }
2543 if (tryGreater(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2544 TryCand, Cand, GenericSchedulerBase::TopPathReduce))
2545 return true;
Matthias Braunb550b762016-04-21 01:54:13 +00002546 } else {
Andrew Trickd14d7c22013-12-28 21:56:57 +00002547 if (Cand.SU->getHeight() > Zone.getScheduledLatency()) {
2548 if (tryLess(TryCand.SU->getHeight(), Cand.SU->getHeight(),
2549 TryCand, Cand, GenericSchedulerBase::BotHeightReduce))
2550 return true;
2551 }
2552 if (tryGreater(TryCand.SU->getDepth(), Cand.SU->getDepth(),
2553 TryCand, Cand, GenericSchedulerBase::BotPathReduce))
2554 return true;
2555 }
2556 return false;
2557}
2558
Matthias Braun49cb6e92016-05-27 22:14:26 +00002559static void tracePick(GenericSchedulerBase::CandReason Reason, bool IsTop) {
2560 DEBUG(dbgs() << "Pick " << (IsTop ? "Top " : "Bot ")
2561 << GenericSchedulerBase::getReasonStr(Reason) << '\n');
2562}
2563
Matthias Braun6ad3d052016-06-25 00:23:00 +00002564static void tracePick(const GenericSchedulerBase::SchedCandidate &Cand) {
2565 tracePick(Cand.Reason, Cand.AtTop);
Andrew Trickd14d7c22013-12-28 21:56:57 +00002566}
2567
Andrew Trickfc127d12013-12-07 05:59:44 +00002568void GenericScheduler::initialize(ScheduleDAGMI *dag) {
Andrew Trickd7f890e2013-12-28 21:56:47 +00002569 assert(dag->hasVRegLiveness() &&
2570 "(PreRA)GenericScheduler needs vreg liveness");
2571 DAG = static_cast<ScheduleDAGMILive*>(dag);
Andrew Trickfc127d12013-12-07 05:59:44 +00002572 SchedModel = DAG->getSchedModel();
2573 TRI = DAG->TRI;
2574
2575 Rem.init(DAG, SchedModel);
2576 Top.init(DAG, SchedModel, &Rem);
2577 Bot.init(DAG, SchedModel, &Rem);
2578
2579 // Initialize resource counts.
2580
2581 // Initialize the HazardRecognizers. If itineraries don't exist, are empty, or
2582 // are disabled, then these HazardRecs will be disabled.
2583 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
Andrew Trickfc127d12013-12-07 05:59:44 +00002584 if (!Top.HazardRec) {
2585 Top.HazardRec =
Eric Christopher99556d72014-10-14 06:56:25 +00002586 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Eric Christopherd9134482014-08-04 21:25:23 +00002587 Itin, DAG);
Andrew Trickfc127d12013-12-07 05:59:44 +00002588 }
2589 if (!Bot.HazardRec) {
2590 Bot.HazardRec =
Eric Christopher99556d72014-10-14 06:56:25 +00002591 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Eric Christopherd9134482014-08-04 21:25:23 +00002592 Itin, DAG);
Andrew Trickfc127d12013-12-07 05:59:44 +00002593 }
Matthias Brauncc676c42016-06-25 02:03:36 +00002594 TopCand.SU = nullptr;
2595 BotCand.SU = nullptr;
Andrew Trickfc127d12013-12-07 05:59:44 +00002596}
2597
2598/// Initialize the per-region scheduling policy.
2599void GenericScheduler::initPolicy(MachineBasicBlock::iterator Begin,
2600 MachineBasicBlock::iterator End,
2601 unsigned NumRegionInstrs) {
Eric Christopher99556d72014-10-14 06:56:25 +00002602 const MachineFunction &MF = *Begin->getParent()->getParent();
2603 const TargetLowering *TLI = MF.getSubtarget().getTargetLowering();
Andrew Trickfc127d12013-12-07 05:59:44 +00002604
2605 // Avoid setting up the register pressure tracker for small regions to save
2606 // compile time. As a rough heuristic, only track pressure when the number of
2607 // schedulable instructions exceeds half the integer register file.
Andrew Trick350ff2c2014-01-21 21:27:37 +00002608 RegionPolicy.ShouldTrackPressure = true;
Andrew Trick46753512014-01-22 03:38:55 +00002609 for (unsigned VT = MVT::i32; VT > (unsigned)MVT::i1; --VT) {
2610 MVT::SimpleValueType LegalIntVT = (MVT::SimpleValueType)VT;
2611 if (TLI->isTypeLegal(LegalIntVT)) {
Andrew Trick350ff2c2014-01-21 21:27:37 +00002612 unsigned NIntRegs = Context->RegClassInfo->getNumAllocatableRegs(
Andrew Trick46753512014-01-22 03:38:55 +00002613 TLI->getRegClassFor(LegalIntVT));
Andrew Trick350ff2c2014-01-21 21:27:37 +00002614 RegionPolicy.ShouldTrackPressure = NumRegionInstrs > (NIntRegs / 2);
2615 }
2616 }
Andrew Trickfc127d12013-12-07 05:59:44 +00002617
2618 // For generic targets, we default to bottom-up, because it's simpler and more
2619 // compile-time optimizations have been implemented in that direction.
2620 RegionPolicy.OnlyBottomUp = true;
2621
2622 // Allow the subtarget to override default policy.
Duncan P. N. Exon Smith63298722016-07-01 00:23:27 +00002623 MF.getSubtarget().overrideSchedPolicy(RegionPolicy, NumRegionInstrs);
Andrew Trickfc127d12013-12-07 05:59:44 +00002624
2625 // After subtarget overrides, apply command line options.
2626 if (!EnableRegPressure)
2627 RegionPolicy.ShouldTrackPressure = false;
2628
2629 // Check -misched-topdown/bottomup can force or unforce scheduling direction.
2630 // e.g. -misched-bottomup=false allows scheduling in both directions.
2631 assert((!ForceTopDown || !ForceBottomUp) &&
2632 "-misched-topdown incompatible with -misched-bottomup");
2633 if (ForceBottomUp.getNumOccurrences() > 0) {
2634 RegionPolicy.OnlyBottomUp = ForceBottomUp;
2635 if (RegionPolicy.OnlyBottomUp)
2636 RegionPolicy.OnlyTopDown = false;
2637 }
2638 if (ForceTopDown.getNumOccurrences() > 0) {
2639 RegionPolicy.OnlyTopDown = ForceTopDown;
2640 if (RegionPolicy.OnlyTopDown)
2641 RegionPolicy.OnlyBottomUp = false;
2642 }
2643}
2644
James Y Knighte72b0db2015-09-18 18:52:20 +00002645void GenericScheduler::dumpPolicy() {
2646 dbgs() << "GenericScheduler RegionPolicy: "
2647 << " ShouldTrackPressure=" << RegionPolicy.ShouldTrackPressure
2648 << " OnlyTopDown=" << RegionPolicy.OnlyTopDown
2649 << " OnlyBottomUp=" << RegionPolicy.OnlyBottomUp
2650 << "\n";
2651}
2652
Andrew Trickfc127d12013-12-07 05:59:44 +00002653/// Set IsAcyclicLatencyLimited if the acyclic path is longer than the cyclic
2654/// critical path by more cycles than it takes to drain the instruction buffer.
2655/// We estimate an upper bounds on in-flight instructions as:
2656///
2657/// CyclesPerIteration = max( CyclicPath, Loop-Resource-Height )
2658/// InFlightIterations = AcyclicPath / CyclesPerIteration
2659/// InFlightResources = InFlightIterations * LoopResources
2660///
2661/// TODO: Check execution resources in addition to IssueCount.
2662void GenericScheduler::checkAcyclicLatency() {
2663 if (Rem.CyclicCritPath == 0 || Rem.CyclicCritPath >= Rem.CriticalPath)
2664 return;
2665
2666 // Scaled number of cycles per loop iteration.
2667 unsigned IterCount =
2668 std::max(Rem.CyclicCritPath * SchedModel->getLatencyFactor(),
2669 Rem.RemIssueCount);
2670 // Scaled acyclic critical path.
2671 unsigned AcyclicCount = Rem.CriticalPath * SchedModel->getLatencyFactor();
2672 // InFlightCount = (AcyclicPath / IterCycles) * InstrPerLoop
2673 unsigned InFlightCount =
2674 (AcyclicCount * Rem.RemIssueCount + IterCount-1) / IterCount;
2675 unsigned BufferLimit =
2676 SchedModel->getMicroOpBufferSize() * SchedModel->getMicroOpFactor();
2677
2678 Rem.IsAcyclicLatencyLimited = InFlightCount > BufferLimit;
2679
2680 DEBUG(dbgs() << "IssueCycles="
2681 << Rem.RemIssueCount / SchedModel->getLatencyFactor() << "c "
2682 << "IterCycles=" << IterCount / SchedModel->getLatencyFactor()
2683 << "c NumIters=" << (AcyclicCount + IterCount-1) / IterCount
2684 << " InFlight=" << InFlightCount / SchedModel->getMicroOpFactor()
2685 << "m BufferLim=" << SchedModel->getMicroOpBufferSize() << "m\n";
2686 if (Rem.IsAcyclicLatencyLimited)
2687 dbgs() << " ACYCLIC LATENCY LIMIT\n");
2688}
2689
2690void GenericScheduler::registerRoots() {
2691 Rem.CriticalPath = DAG->ExitSU.getDepth();
2692
2693 // Some roots may not feed into ExitSU. Check all of them in case.
2694 for (std::vector<SUnit*>::const_iterator
2695 I = Bot.Available.begin(), E = Bot.Available.end(); I != E; ++I) {
2696 if ((*I)->getDepth() > Rem.CriticalPath)
2697 Rem.CriticalPath = (*I)->getDepth();
2698 }
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +00002699 DEBUG(dbgs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << '\n');
2700 if (DumpCriticalPathLength) {
2701 errs() << "Critical Path(GS-RR ): " << Rem.CriticalPath << " \n";
2702 }
Andrew Trickfc127d12013-12-07 05:59:44 +00002703
2704 if (EnableCyclicPath) {
2705 Rem.CyclicCritPath = DAG->computeCyclicCriticalPath();
2706 checkAcyclicLatency();
2707 }
2708}
2709
Andrew Trick1a831342013-08-30 03:49:48 +00002710static bool tryPressure(const PressureChange &TryP,
2711 const PressureChange &CandP,
Andrew Trickd14d7c22013-12-28 21:56:57 +00002712 GenericSchedulerBase::SchedCandidate &TryCand,
2713 GenericSchedulerBase::SchedCandidate &Cand,
Tom Stellard5ce53062015-12-16 18:31:01 +00002714 GenericSchedulerBase::CandReason Reason,
2715 const TargetRegisterInfo *TRI,
2716 const MachineFunction &MF) {
Andrew Trickb1a45b62013-08-30 04:27:29 +00002717 // If one candidate decreases and the other increases, go with it.
2718 // Invalid candidates have UnitInc==0.
Hal Finkel7a87f8a2014-10-10 17:06:20 +00002719 if (tryGreater(TryP.getUnitInc() < 0, CandP.getUnitInc() < 0, TryCand, Cand,
2720 Reason)) {
Andrew Trickb1a45b62013-08-30 04:27:29 +00002721 return true;
2722 }
Matthias Braun6ad3d052016-06-25 00:23:00 +00002723 // Do not compare the magnitude of pressure changes between top and bottom
2724 // boundary.
2725 if (Cand.AtTop != TryCand.AtTop)
2726 return false;
2727
2728 // If both candidates affect the same set in the same boundary, go with the
2729 // smallest increase.
2730 unsigned TryPSet = TryP.getPSetOrMax();
2731 unsigned CandPSet = CandP.getPSetOrMax();
2732 if (TryPSet == CandPSet) {
2733 return tryLess(TryP.getUnitInc(), CandP.getUnitInc(), TryCand, Cand,
2734 Reason);
2735 }
Tom Stellard5ce53062015-12-16 18:31:01 +00002736
2737 int TryRank = TryP.isValid() ? TRI->getRegPressureSetScore(MF, TryPSet) :
2738 std::numeric_limits<int>::max();
2739
2740 int CandRank = CandP.isValid() ? TRI->getRegPressureSetScore(MF, CandPSet) :
2741 std::numeric_limits<int>::max();
2742
Andrew Trick401b6952013-07-25 07:26:35 +00002743 // If the candidates are decreasing pressure, reverse priority.
Andrew Trick1a831342013-08-30 03:49:48 +00002744 if (TryP.getUnitInc() < 0)
Andrew Trick401b6952013-07-25 07:26:35 +00002745 std::swap(TryRank, CandRank);
2746 return tryGreater(TryRank, CandRank, TryCand, Cand, Reason);
2747}
2748
Andrew Tricka7714a02012-11-12 19:40:10 +00002749static unsigned getWeakLeft(const SUnit *SU, bool isTop) {
2750 return (isTop) ? SU->WeakPredsLeft : SU->WeakSuccsLeft;
2751}
2752
Andrew Tricke833e1c2013-04-13 06:07:40 +00002753/// Minimize physical register live ranges. Regalloc wants them adjacent to
2754/// their physreg def/use.
2755///
2756/// FIXME: This is an unnecessary check on the critical path. Most are root/leaf
2757/// copies which can be prescheduled. The rest (e.g. x86 MUL) could be bundled
2758/// with the operation that produces or consumes the physreg. We'll do this when
2759/// regalloc has support for parallel copies.
2760static int biasPhysRegCopy(const SUnit *SU, bool isTop) {
2761 const MachineInstr *MI = SU->getInstr();
2762 if (!MI->isCopy())
2763 return 0;
2764
2765 unsigned ScheduledOper = isTop ? 1 : 0;
2766 unsigned UnscheduledOper = isTop ? 0 : 1;
2767 // If we have already scheduled the physreg produce/consumer, immediately
2768 // schedule the copy.
2769 if (TargetRegisterInfo::isPhysicalRegister(
2770 MI->getOperand(ScheduledOper).getReg()))
2771 return 1;
2772 // If the physreg is at the boundary, defer it. Otherwise schedule it
2773 // immediately to free the dependent. We can hoist the copy later.
2774 bool AtBoundary = isTop ? !SU->NumSuccsLeft : !SU->NumPredsLeft;
2775 if (TargetRegisterInfo::isPhysicalRegister(
2776 MI->getOperand(UnscheduledOper).getReg()))
2777 return AtBoundary ? -1 : 1;
2778 return 0;
2779}
2780
Matthias Braun4f573772016-04-22 19:10:15 +00002781void GenericScheduler::initCandidate(SchedCandidate &Cand, SUnit *SU,
2782 bool AtTop,
2783 const RegPressureTracker &RPTracker,
2784 RegPressureTracker &TempTracker) {
2785 Cand.SU = SU;
Matthias Braun6ad3d052016-06-25 00:23:00 +00002786 Cand.AtTop = AtTop;
Matthias Braun4f573772016-04-22 19:10:15 +00002787 if (DAG->isTrackingPressure()) {
2788 if (AtTop) {
2789 TempTracker.getMaxDownwardPressureDelta(
2790 Cand.SU->getInstr(),
2791 Cand.RPDelta,
2792 DAG->getRegionCriticalPSets(),
2793 DAG->getRegPressure().MaxSetPressure);
2794 } else {
2795 if (VerifyScheduling) {
2796 TempTracker.getMaxUpwardPressureDelta(
2797 Cand.SU->getInstr(),
2798 &DAG->getPressureDiff(Cand.SU),
2799 Cand.RPDelta,
2800 DAG->getRegionCriticalPSets(),
2801 DAG->getRegPressure().MaxSetPressure);
2802 } else {
2803 RPTracker.getUpwardPressureDelta(
2804 Cand.SU->getInstr(),
2805 DAG->getPressureDiff(Cand.SU),
2806 Cand.RPDelta,
2807 DAG->getRegionCriticalPSets(),
2808 DAG->getRegPressure().MaxSetPressure);
2809 }
2810 }
2811 }
2812 DEBUG(if (Cand.RPDelta.Excess.isValid())
2813 dbgs() << " Try SU(" << Cand.SU->NodeNum << ") "
2814 << TRI->getRegPressureSetName(Cand.RPDelta.Excess.getPSet())
2815 << ":" << Cand.RPDelta.Excess.getUnitInc() << "\n");
2816}
2817
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002818/// Apply a set of heursitics to a new candidate. Heuristics are currently
2819/// hierarchical. This may be more efficient than a graduated cost model because
2820/// we don't need to evaluate all aspects of the model for each node in the
2821/// queue. But it's really done to make the heuristics easier to debug and
2822/// statistically analyze.
2823///
2824/// \param Cand provides the policy and current best candidate.
2825/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002826/// \param Zone describes the scheduled zone that we are extending, or nullptr
2827// if Cand is from a different zone than TryCand.
Andrew Trick665d3ec2013-09-19 23:10:59 +00002828void GenericScheduler::tryCandidate(SchedCandidate &Cand,
Andrew Trickbb1247b2013-12-05 17:55:47 +00002829 SchedCandidate &TryCand,
Matthias Braun6ad3d052016-06-25 00:23:00 +00002830 SchedBoundary *Zone) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002831 // Initialize the candidate if needed.
2832 if (!Cand.isValid()) {
2833 TryCand.Reason = NodeOrder;
2834 return;
2835 }
Andrew Tricke833e1c2013-04-13 06:07:40 +00002836
Matthias Braun6ad3d052016-06-25 00:23:00 +00002837 if (tryGreater(biasPhysRegCopy(TryCand.SU, TryCand.AtTop),
2838 biasPhysRegCopy(Cand.SU, Cand.AtTop),
Andrew Tricke833e1c2013-04-13 06:07:40 +00002839 TryCand, Cand, PhysRegCopy))
2840 return;
2841
Andrew Tricke02d5da2015-05-17 23:40:27 +00002842 // Avoid exceeding the target's limit.
Andrew Trick66c3dfb2013-09-04 21:00:11 +00002843 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.Excess,
2844 Cand.RPDelta.Excess,
Tom Stellard5ce53062015-12-16 18:31:01 +00002845 TryCand, Cand, RegExcess, TRI,
2846 DAG->MF))
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002847 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002848
2849 // Avoid increasing the max critical pressure in the scheduled region.
Andrew Trick66c3dfb2013-09-04 21:00:11 +00002850 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CriticalMax,
2851 Cand.RPDelta.CriticalMax,
Tom Stellard5ce53062015-12-16 18:31:01 +00002852 TryCand, Cand, RegCritical, TRI,
2853 DAG->MF))
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002854 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002855
Matthias Braun6ad3d052016-06-25 00:23:00 +00002856 // We only compare a subset of features when comparing nodes between
2857 // Top and Bottom boundary. Some properties are simply incomparable, in many
2858 // other instances we should only override the other boundary if something
2859 // is a clear good pick on one boundary. Skip heuristics that are more
2860 // "tie-breaking" in nature.
2861 bool SameBoundary = Zone != nullptr;
2862 if (SameBoundary) {
2863 // For loops that are acyclic path limited, aggressively schedule for
2864 // latency. This can result in very long dependence chains scheduled in
2865 // sequence, so once every cycle (when CurrMOps == 0), switch to normal
2866 // heuristics.
2867 if (Rem.IsAcyclicLatencyLimited && !Zone->getCurrMOps() &&
2868 tryLatency(TryCand, Cand, *Zone))
2869 return;
Andrew Trickddffae92013-09-06 17:32:36 +00002870
Matthias Braun6ad3d052016-06-25 00:23:00 +00002871 // Prioritize instructions that read unbuffered resources by stall cycles.
2872 if (tryLess(Zone->getLatencyStallCycles(TryCand.SU),
2873 Zone->getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
2874 return;
2875 }
Andrew Trick880e5732013-12-05 17:55:58 +00002876
Andrew Tricka7714a02012-11-12 19:40:10 +00002877 // Keep clustered nodes together to encourage downstream peephole
2878 // optimizations which may reduce resource requirements.
2879 //
2880 // This is a best effort to set things up for a post-RA pass. Optimizations
2881 // like generating loads of multiple registers should ideally be done within
2882 // the scheduler pass by combining the loads during DAG postprocessing.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002883 const SUnit *CandNextClusterSU =
2884 Cand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2885 const SUnit *TryCandNextClusterSU =
2886 TryCand.AtTop ? DAG->getNextClusterSucc() : DAG->getNextClusterPred();
2887 if (tryGreater(TryCand.SU == TryCandNextClusterSU,
2888 Cand.SU == CandNextClusterSU,
Andrew Tricka7714a02012-11-12 19:40:10 +00002889 TryCand, Cand, Cluster))
2890 return;
Andrew Trick85a1d4c2013-04-24 15:54:43 +00002891
Matthias Braun6ad3d052016-06-25 00:23:00 +00002892 if (SameBoundary) {
2893 // Weak edges are for clustering and other constraints.
2894 if (tryLess(getWeakLeft(TryCand.SU, TryCand.AtTop),
2895 getWeakLeft(Cand.SU, Cand.AtTop),
2896 TryCand, Cand, Weak))
2897 return;
Andrew Tricka7714a02012-11-12 19:40:10 +00002898 }
Matthias Braun6ad3d052016-06-25 00:23:00 +00002899
Andrew Trick71f08a32013-06-17 21:45:13 +00002900 // Avoid increasing the max pressure of the entire region.
Andrew Trick66c3dfb2013-09-04 21:00:11 +00002901 if (DAG->isTrackingPressure() && tryPressure(TryCand.RPDelta.CurrentMax,
2902 Cand.RPDelta.CurrentMax,
Tom Stellard5ce53062015-12-16 18:31:01 +00002903 TryCand, Cand, RegMax, TRI,
2904 DAG->MF))
Andrew Trick71f08a32013-06-17 21:45:13 +00002905 return;
2906
Matthias Braun6ad3d052016-06-25 00:23:00 +00002907 if (SameBoundary) {
2908 // Avoid critical resource consumption and balance the schedule.
2909 TryCand.initResourceDelta(DAG, SchedModel);
2910 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
2911 TryCand, Cand, ResourceReduce))
2912 return;
2913 if (tryGreater(TryCand.ResDelta.DemandedResources,
2914 Cand.ResDelta.DemandedResources,
2915 TryCand, Cand, ResourceDemand))
2916 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002917
Matthias Braun6ad3d052016-06-25 00:23:00 +00002918 // Avoid serializing long latency dependence chains.
2919 // For acyclic path limited loops, latency was already checked above.
2920 if (!RegionPolicy.DisableLatencyHeuristic && TryCand.Policy.ReduceLatency &&
2921 !Rem.IsAcyclicLatencyLimited && tryLatency(TryCand, Cand, *Zone))
2922 return;
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002923
Matthias Braun6ad3d052016-06-25 00:23:00 +00002924 // Prefer immediate defs/users of the last scheduled instruction. This is a
2925 // local pressure avoidance strategy that also makes the machine code
2926 // readable.
2927 if (tryGreater(Zone->isNextSU(TryCand.SU), Zone->isNextSU(Cand.SU),
2928 TryCand, Cand, NextDefUse))
2929 return;
Andrew Tricka7714a02012-11-12 19:40:10 +00002930
Matthias Braun6ad3d052016-06-25 00:23:00 +00002931 // Fall through to original instruction order.
2932 if ((Zone->isTop() && TryCand.SU->NodeNum < Cand.SU->NodeNum)
2933 || (!Zone->isTop() && TryCand.SU->NodeNum > Cand.SU->NodeNum)) {
2934 TryCand.Reason = NodeOrder;
2935 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002936 }
2937}
Andrew Trick419eae22012-05-10 21:06:19 +00002938
Andrew Trickc573cd92013-09-06 17:32:44 +00002939/// Pick the best candidate from the queue.
Andrew Trick7ee9de52012-05-10 21:06:16 +00002940///
2941/// TODO: getMaxPressureDelta results can be mostly cached for each SUnit during
2942/// DAG building. To adjust for the current scheduling location we need to
2943/// maintain the number of vreg uses remaining to be top-scheduled.
Andrew Trick665d3ec2013-09-19 23:10:59 +00002944void GenericScheduler::pickNodeFromQueue(SchedBoundary &Zone,
Matthias Braun6ad3d052016-06-25 00:23:00 +00002945 const CandPolicy &ZonePolicy,
Andrew Trickbb1247b2013-12-05 17:55:47 +00002946 const RegPressureTracker &RPTracker,
2947 SchedCandidate &Cand) {
Andrew Trick7ee9de52012-05-10 21:06:16 +00002948 // getMaxPressureDelta temporarily modifies the tracker.
2949 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
2950
Matthias Braund29d31e2016-06-23 21:27:38 +00002951 ReadyQueue &Q = Zone.Available;
Andrew Trickdd375dd2012-05-24 22:11:03 +00002952 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
Andrew Trick7ee9de52012-05-10 21:06:16 +00002953
Matthias Braun6ad3d052016-06-25 00:23:00 +00002954 SchedCandidate TryCand(ZonePolicy);
Matthias Braun4f573772016-04-22 19:10:15 +00002955 initCandidate(TryCand, *I, Zone.isTop(), RPTracker, TempTracker);
Matthias Braun6ad3d052016-06-25 00:23:00 +00002956 // Pass SchedBoundary only when comparing nodes from the same boundary.
2957 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
2958 tryCandidate(Cand, TryCand, ZoneArg);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002959 if (TryCand.Reason != NoCand) {
2960 // Initialize resource delta if needed in case future heuristics query it.
2961 if (TryCand.ResDelta == SchedResourceDelta())
2962 TryCand.initResourceDelta(DAG, SchedModel);
2963 Cand.setBest(TryCand);
Andrew Trick419d4912013-04-05 00:31:29 +00002964 DEBUG(traceCandidate(Cand));
Andrew Trick22025772012-05-17 18:35:10 +00002965 }
Andrew Trick7ee9de52012-05-10 21:06:16 +00002966 }
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002967}
2968
Andrew Trick22025772012-05-17 18:35:10 +00002969/// Pick the best candidate node from either the top or bottom queue.
Andrew Trick665d3ec2013-09-19 23:10:59 +00002970SUnit *GenericScheduler::pickNodeBidirectional(bool &IsTopNode) {
Andrew Trick22025772012-05-17 18:35:10 +00002971 // Schedule as far as possible in the direction of no choice. This is most
2972 // efficient, but also provides the best heuristics for CriticalPSets.
Andrew Trick61f1a272012-05-24 22:11:09 +00002973 if (SUnit *SU = Bot.pickOnlyChoice()) {
Andrew Trick22025772012-05-17 18:35:10 +00002974 IsTopNode = false;
Matthias Braun49cb6e92016-05-27 22:14:26 +00002975 tracePick(Only1, false);
Andrew Trick61f1a272012-05-24 22:11:09 +00002976 return SU;
Andrew Trick22025772012-05-17 18:35:10 +00002977 }
Andrew Trick61f1a272012-05-24 22:11:09 +00002978 if (SUnit *SU = Top.pickOnlyChoice()) {
Andrew Trick22025772012-05-17 18:35:10 +00002979 IsTopNode = true;
Matthias Braun49cb6e92016-05-27 22:14:26 +00002980 tracePick(Only1, true);
Andrew Trick61f1a272012-05-24 22:11:09 +00002981 return SU;
Andrew Trick22025772012-05-17 18:35:10 +00002982 }
Andrew Trickfc127d12013-12-07 05:59:44 +00002983 // Set the bottom-up policy based on the state of the current bottom zone and
2984 // the instructions outside the zone, including the top zone.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002985 CandPolicy BotPolicy;
2986 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
Andrew Trickfc127d12013-12-07 05:59:44 +00002987 // Set the top-down policy based on the state of the current top zone and
2988 // the instructions outside the zone, including the bottom zone.
Matthias Braun6ad3d052016-06-25 00:23:00 +00002989 CandPolicy TopPolicy;
2990 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
Andrew Trick3ca33ac2012-11-07 07:05:09 +00002991
Matthias Brauncc676c42016-06-25 02:03:36 +00002992 // See if BotCand is still valid (because we previously scheduled from Top).
Matthias Braund29d31e2016-06-23 21:27:38 +00002993 DEBUG(dbgs() << "Picking from Bot:\n");
Matthias Brauncc676c42016-06-25 02:03:36 +00002994 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
2995 BotCand.Policy != BotPolicy) {
2996 BotCand.reset(CandPolicy());
2997 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
2998 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
2999 } else {
3000 DEBUG(traceCandidate(BotCand));
3001#ifndef NDEBUG
3002 if (VerifyScheduling) {
3003 SchedCandidate TCand;
3004 TCand.reset(CandPolicy());
3005 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), TCand);
3006 assert(TCand.SU == BotCand.SU &&
3007 "Last pick result should correspond to re-picking right now");
3008 }
3009#endif
3010 }
Andrew Trick22025772012-05-17 18:35:10 +00003011
Andrew Trick22025772012-05-17 18:35:10 +00003012 // Check if the top Q has a better candidate.
Matthias Braund29d31e2016-06-23 21:27:38 +00003013 DEBUG(dbgs() << "Picking from Top:\n");
Matthias Brauncc676c42016-06-25 02:03:36 +00003014 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
3015 TopCand.Policy != TopPolicy) {
3016 TopCand.reset(CandPolicy());
3017 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
3018 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
3019 } else {
3020 DEBUG(traceCandidate(TopCand));
3021#ifndef NDEBUG
3022 if (VerifyScheduling) {
3023 SchedCandidate TCand;
3024 TCand.reset(CandPolicy());
3025 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TCand);
3026 assert(TCand.SU == TopCand.SU &&
3027 "Last pick result should correspond to re-picking right now");
3028 }
3029#endif
3030 }
3031
3032 // Pick best from BotCand and TopCand.
3033 assert(BotCand.isValid());
3034 assert(TopCand.isValid());
3035 SchedCandidate Cand = BotCand;
3036 TopCand.Reason = NoCand;
3037 tryCandidate(Cand, TopCand, nullptr);
3038 if (TopCand.Reason != NoCand) {
3039 Cand.setBest(TopCand);
3040 DEBUG(traceCandidate(Cand));
3041 }
Andrew Trick22025772012-05-17 18:35:10 +00003042
Matthias Braun6ad3d052016-06-25 00:23:00 +00003043 IsTopNode = Cand.AtTop;
3044 tracePick(Cand);
3045 return Cand.SU;
Andrew Trick22025772012-05-17 18:35:10 +00003046}
3047
3048/// Pick the best node to balance the schedule. Implements MachineSchedStrategy.
Andrew Trick665d3ec2013-09-19 23:10:59 +00003049SUnit *GenericScheduler::pickNode(bool &IsTopNode) {
Andrew Trick7ee9de52012-05-10 21:06:16 +00003050 if (DAG->top() == DAG->bottom()) {
Andrew Trick61f1a272012-05-24 22:11:09 +00003051 assert(Top.Available.empty() && Top.Pending.empty() &&
3052 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
Craig Topperc0196b12014-04-14 00:51:57 +00003053 return nullptr;
Andrew Trick7ee9de52012-05-10 21:06:16 +00003054 }
Andrew Trick7ee9de52012-05-10 21:06:16 +00003055 SUnit *SU;
Andrew Trick984d98b2012-10-08 18:53:53 +00003056 do {
Andrew Trick75e411c2013-09-06 17:32:34 +00003057 if (RegionPolicy.OnlyTopDown) {
Andrew Trick984d98b2012-10-08 18:53:53 +00003058 SU = Top.pickOnlyChoice();
3059 if (!SU) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003060 CandPolicy NoPolicy;
Matthias Brauncc676c42016-06-25 02:03:36 +00003061 TopCand.reset(NoPolicy);
Matthias Braun6ad3d052016-06-25 00:23:00 +00003062 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
Andrew Trick1ab16d92013-09-04 21:00:13 +00003063 assert(TopCand.Reason != NoCand && "failed to find a candidate");
Matthias Braun6ad3d052016-06-25 00:23:00 +00003064 tracePick(TopCand);
Andrew Trick984d98b2012-10-08 18:53:53 +00003065 SU = TopCand.SU;
3066 }
3067 IsTopNode = true;
Matthias Braunb550b762016-04-21 01:54:13 +00003068 } else if (RegionPolicy.OnlyBottomUp) {
Andrew Trick984d98b2012-10-08 18:53:53 +00003069 SU = Bot.pickOnlyChoice();
3070 if (!SU) {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003071 CandPolicy NoPolicy;
Matthias Brauncc676c42016-06-25 02:03:36 +00003072 BotCand.reset(NoPolicy);
Matthias Braun6ad3d052016-06-25 00:23:00 +00003073 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
Andrew Trick1ab16d92013-09-04 21:00:13 +00003074 assert(BotCand.Reason != NoCand && "failed to find a candidate");
Matthias Braun6ad3d052016-06-25 00:23:00 +00003075 tracePick(BotCand);
Andrew Trick984d98b2012-10-08 18:53:53 +00003076 SU = BotCand.SU;
3077 }
3078 IsTopNode = false;
Matthias Braunb550b762016-04-21 01:54:13 +00003079 } else {
Andrew Trick3ca33ac2012-11-07 07:05:09 +00003080 SU = pickNodeBidirectional(IsTopNode);
Andrew Trick984d98b2012-10-08 18:53:53 +00003081 }
3082 } while (SU->isScheduled);
3083
Andrew Trick61f1a272012-05-24 22:11:09 +00003084 if (SU->isTopReady())
3085 Top.removeReady(SU);
3086 if (SU->isBottomReady())
3087 Bot.removeReady(SU);
Andrew Trick4e7f6a72012-05-25 02:02:39 +00003088
Andrew Trick1f0bb692013-04-13 06:07:49 +00003089 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
Andrew Trick7ee9de52012-05-10 21:06:16 +00003090 return SU;
3091}
3092
Andrew Trick665d3ec2013-09-19 23:10:59 +00003093void GenericScheduler::reschedulePhysRegCopies(SUnit *SU, bool isTop) {
Andrew Tricke833e1c2013-04-13 06:07:40 +00003094
3095 MachineBasicBlock::iterator InsertPos = SU->getInstr();
3096 if (!isTop)
3097 ++InsertPos;
3098 SmallVectorImpl<SDep> &Deps = isTop ? SU->Preds : SU->Succs;
3099
3100 // Find already scheduled copies with a single physreg dependence and move
3101 // them just above the scheduled instruction.
3102 for (SmallVectorImpl<SDep>::iterator I = Deps.begin(), E = Deps.end();
3103 I != E; ++I) {
3104 if (I->getKind() != SDep::Data || !TRI->isPhysicalRegister(I->getReg()))
3105 continue;
3106 SUnit *DepSU = I->getSUnit();
3107 if (isTop ? DepSU->Succs.size() > 1 : DepSU->Preds.size() > 1)
3108 continue;
3109 MachineInstr *Copy = DepSU->getInstr();
3110 if (!Copy->isCopy())
3111 continue;
3112 DEBUG(dbgs() << " Rescheduling physreg copy ";
3113 I->getSUnit()->dump(DAG));
3114 DAG->moveInstruction(Copy, InsertPos);
3115 }
3116}
3117
Andrew Trick61f1a272012-05-24 22:11:09 +00003118/// Update the scheduler's state after scheduling a node. This is the same node
Andrew Trickd14d7c22013-12-28 21:56:57 +00003119/// that was just returned by pickNode(). However, ScheduleDAGMILive needs to
3120/// update it's state based on the current cycle before MachineSchedStrategy
3121/// does.
Andrew Tricke833e1c2013-04-13 06:07:40 +00003122///
3123/// FIXME: Eventually, we may bundle physreg copies rather than rescheduling
3124/// them here. See comments in biasPhysRegCopy.
Andrew Trick665d3ec2013-09-19 23:10:59 +00003125void GenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
Andrew Trick45446062012-06-05 21:11:27 +00003126 if (IsTopNode) {
Andrew Trickfc127d12013-12-07 05:59:44 +00003127 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
Andrew Trickce27bb92012-06-29 03:23:22 +00003128 Top.bumpNode(SU);
Andrew Tricke833e1c2013-04-13 06:07:40 +00003129 if (SU->hasPhysRegUses)
3130 reschedulePhysRegCopies(SU, true);
Matthias Braunb550b762016-04-21 01:54:13 +00003131 } else {
Andrew Trickfc127d12013-12-07 05:59:44 +00003132 SU->BotReadyCycle = std::max(SU->BotReadyCycle, Bot.getCurrCycle());
Andrew Trickce27bb92012-06-29 03:23:22 +00003133 Bot.bumpNode(SU);
Andrew Tricke833e1c2013-04-13 06:07:40 +00003134 if (SU->hasPhysRegDefs)
3135 reschedulePhysRegCopies(SU, false);
Andrew Trick61f1a272012-05-24 22:11:09 +00003136 }
3137}
3138
Andrew Trick8823dec2012-03-14 04:00:41 +00003139/// Create the standard converging machine scheduler. This will be used as the
3140/// default scheduler if the target does not set a default.
Andrew Trickd14d7c22013-12-28 21:56:57 +00003141static ScheduleDAGInstrs *createGenericSchedLive(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003142 ScheduleDAGMILive *DAG = new ScheduleDAGMILive(C, make_unique<GenericScheduler>(C));
Andrew Tricka7714a02012-11-12 19:40:10 +00003143 // Register DAG post-processors.
Andrew Trick85a1d4c2013-04-24 15:54:43 +00003144 //
3145 // FIXME: extend the mutation API to allow earlier mutations to instantiate
3146 // data and pass it to later mutations. Have a single mutation that gathers
3147 // the interesting nodes in one pass.
Tom Stellard68726a52016-08-19 19:59:18 +00003148 DAG->addMutation(createCopyConstrainDAGMutation(DAG->TII, DAG->TRI));
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00003149 if (EnableMemOpCluster) {
3150 if (DAG->TII->enableClusterLoads())
Tom Stellard68726a52016-08-19 19:59:18 +00003151 DAG->addMutation(createLoadClusterDAGMutation(DAG->TII, DAG->TRI));
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00003152 if (DAG->TII->enableClusterStores())
Tom Stellard68726a52016-08-19 19:59:18 +00003153 DAG->addMutation(createStoreClusterDAGMutation(DAG->TII, DAG->TRI));
Jun Bum Lim4c5bd582016-04-15 14:58:38 +00003154 }
Andrew Trick263280242012-11-12 19:52:20 +00003155 if (EnableMacroFusion)
Tom Stellard68726a52016-08-19 19:59:18 +00003156 DAG->addMutation(createMacroFusionDAGMutation(DAG->TII, DAG->TRI));
Andrew Tricka7714a02012-11-12 19:40:10 +00003157 return DAG;
Andrew Tricke1c034f2012-01-17 06:55:03 +00003158}
Andrew Trickd14d7c22013-12-28 21:56:57 +00003159
Andrew Tricke1c034f2012-01-17 06:55:03 +00003160static MachineSchedRegistry
Andrew Trick665d3ec2013-09-19 23:10:59 +00003161GenericSchedRegistry("converge", "Standard converging scheduler.",
Andrew Trickd14d7c22013-12-28 21:56:57 +00003162 createGenericSchedLive);
3163
3164//===----------------------------------------------------------------------===//
3165// PostGenericScheduler - Generic PostRA implementation of MachineSchedStrategy.
3166//===----------------------------------------------------------------------===//
3167
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003168void PostGenericScheduler::initialize(ScheduleDAGMI *Dag) {
3169 DAG = Dag;
3170 SchedModel = DAG->getSchedModel();
3171 TRI = DAG->TRI;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003172
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003173 Rem.init(DAG, SchedModel);
3174 Top.init(DAG, SchedModel, &Rem);
3175 BotRoots.clear();
Andrew Trickd14d7c22013-12-28 21:56:57 +00003176
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003177 // Initialize the HazardRecognizers. If itineraries don't exist, are empty,
3178 // or are disabled, then these HazardRecs will be disabled.
3179 const InstrItineraryData *Itin = SchedModel->getInstrItineraries();
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003180 if (!Top.HazardRec) {
3181 Top.HazardRec =
Eric Christopher99556d72014-10-14 06:56:25 +00003182 DAG->MF.getSubtarget().getInstrInfo()->CreateTargetMIHazardRecognizer(
Eric Christopherd9134482014-08-04 21:25:23 +00003183 Itin, DAG);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003184 }
Andrew Trick3ccf71d2014-06-04 07:06:18 +00003185}
Andrew Trickd14d7c22013-12-28 21:56:57 +00003186
Andrew Trickd14d7c22013-12-28 21:56:57 +00003187
3188void PostGenericScheduler::registerRoots() {
3189 Rem.CriticalPath = DAG->ExitSU.getDepth();
3190
3191 // Some roots may not feed into ExitSU. Check all of them in case.
3192 for (SmallVectorImpl<SUnit*>::const_iterator
3193 I = BotRoots.begin(), E = BotRoots.end(); I != E; ++I) {
3194 if ((*I)->getDepth() > Rem.CriticalPath)
3195 Rem.CriticalPath = (*I)->getDepth();
3196 }
Gerolf Hoflehnerb5220dc2014-08-07 21:49:44 +00003197 DEBUG(dbgs() << "Critical Path: (PGS-RR) " << Rem.CriticalPath << '\n');
3198 if (DumpCriticalPathLength) {
3199 errs() << "Critical Path(PGS-RR ): " << Rem.CriticalPath << " \n";
3200 }
Andrew Trickd14d7c22013-12-28 21:56:57 +00003201}
3202
3203/// Apply a set of heursitics to a new candidate for PostRA scheduling.
3204///
3205/// \param Cand provides the policy and current best candidate.
3206/// \param TryCand refers to the next SUnit candidate, otherwise uninitialized.
3207void PostGenericScheduler::tryCandidate(SchedCandidate &Cand,
3208 SchedCandidate &TryCand) {
3209
3210 // Initialize the candidate if needed.
3211 if (!Cand.isValid()) {
3212 TryCand.Reason = NodeOrder;
3213 return;
3214 }
3215
3216 // Prioritize instructions that read unbuffered resources by stall cycles.
3217 if (tryLess(Top.getLatencyStallCycles(TryCand.SU),
3218 Top.getLatencyStallCycles(Cand.SU), TryCand, Cand, Stall))
3219 return;
3220
3221 // Avoid critical resource consumption and balance the schedule.
3222 if (tryLess(TryCand.ResDelta.CritResources, Cand.ResDelta.CritResources,
3223 TryCand, Cand, ResourceReduce))
3224 return;
3225 if (tryGreater(TryCand.ResDelta.DemandedResources,
3226 Cand.ResDelta.DemandedResources,
3227 TryCand, Cand, ResourceDemand))
3228 return;
3229
3230 // Avoid serializing long latency dependence chains.
3231 if (Cand.Policy.ReduceLatency && tryLatency(TryCand, Cand, Top)) {
3232 return;
3233 }
3234
3235 // Fall through to original instruction order.
3236 if (TryCand.SU->NodeNum < Cand.SU->NodeNum)
3237 TryCand.Reason = NodeOrder;
3238}
3239
3240void PostGenericScheduler::pickNodeFromQueue(SchedCandidate &Cand) {
3241 ReadyQueue &Q = Top.Available;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003242 for (ReadyQueue::iterator I = Q.begin(), E = Q.end(); I != E; ++I) {
3243 SchedCandidate TryCand(Cand.Policy);
3244 TryCand.SU = *I;
Matthias Braun6ad3d052016-06-25 00:23:00 +00003245 TryCand.AtTop = true;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003246 TryCand.initResourceDelta(DAG, SchedModel);
3247 tryCandidate(Cand, TryCand);
3248 if (TryCand.Reason != NoCand) {
3249 Cand.setBest(TryCand);
3250 DEBUG(traceCandidate(Cand));
3251 }
3252 }
3253}
3254
3255/// Pick the next node to schedule.
3256SUnit *PostGenericScheduler::pickNode(bool &IsTopNode) {
3257 if (DAG->top() == DAG->bottom()) {
3258 assert(Top.Available.empty() && Top.Pending.empty() && "ReadyQ garbage");
Craig Topperc0196b12014-04-14 00:51:57 +00003259 return nullptr;
Andrew Trickd14d7c22013-12-28 21:56:57 +00003260 }
3261 SUnit *SU;
3262 do {
3263 SU = Top.pickOnlyChoice();
Matthias Braun49cb6e92016-05-27 22:14:26 +00003264 if (SU) {
3265 tracePick(Only1, true);
3266 } else {
Andrew Trickd14d7c22013-12-28 21:56:57 +00003267 CandPolicy NoPolicy;
3268 SchedCandidate TopCand(NoPolicy);
3269 // Set the top-down policy based on the state of the current top zone and
3270 // the instructions outside the zone, including the bottom zone.
Craig Topperc0196b12014-04-14 00:51:57 +00003271 setPolicy(TopCand.Policy, /*IsPostRA=*/true, Top, nullptr);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003272 pickNodeFromQueue(TopCand);
3273 assert(TopCand.Reason != NoCand && "failed to find a candidate");
Matthias Braun6ad3d052016-06-25 00:23:00 +00003274 tracePick(TopCand);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003275 SU = TopCand.SU;
3276 }
3277 } while (SU->isScheduled);
3278
3279 IsTopNode = true;
3280 Top.removeReady(SU);
3281
3282 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
3283 return SU;
3284}
3285
3286/// Called after ScheduleDAGMI has scheduled an instruction and updated
3287/// scheduled/remaining flags in the DAG nodes.
3288void PostGenericScheduler::schedNode(SUnit *SU, bool IsTopNode) {
3289 SU->TopReadyCycle = std::max(SU->TopReadyCycle, Top.getCurrCycle());
3290 Top.bumpNode(SU);
3291}
3292
3293/// Create a generic scheduler with no vreg liveness or DAG mutation passes.
3294static ScheduleDAGInstrs *createGenericSchedPostRA(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003295 return new ScheduleDAGMI(C, make_unique<PostGenericScheduler>(C), /*IsPostRA=*/true);
Andrew Trickd14d7c22013-12-28 21:56:57 +00003296}
Andrew Tricke1c034f2012-01-17 06:55:03 +00003297
3298//===----------------------------------------------------------------------===//
Andrew Trick90f711d2012-10-15 18:02:27 +00003299// ILP Scheduler. Currently for experimental analysis of heuristics.
3300//===----------------------------------------------------------------------===//
3301
3302namespace {
3303/// \brief Order nodes by the ILP metric.
3304struct ILPOrder {
Andrew Trick44f750a2013-01-25 04:01:04 +00003305 const SchedDFSResult *DFSResult;
3306 const BitVector *ScheduledTrees;
Andrew Trick90f711d2012-10-15 18:02:27 +00003307 bool MaximizeILP;
3308
Craig Topperc0196b12014-04-14 00:51:57 +00003309 ILPOrder(bool MaxILP)
3310 : DFSResult(nullptr), ScheduledTrees(nullptr), MaximizeILP(MaxILP) {}
Andrew Trick90f711d2012-10-15 18:02:27 +00003311
3312 /// \brief Apply a less-than relation on node priority.
Andrew Trick48d392e2012-11-28 05:13:28 +00003313 ///
3314 /// (Return true if A comes after B in the Q.)
Andrew Trick90f711d2012-10-15 18:02:27 +00003315 bool operator()(const SUnit *A, const SUnit *B) const {
Andrew Trick48d392e2012-11-28 05:13:28 +00003316 unsigned SchedTreeA = DFSResult->getSubtreeID(A);
3317 unsigned SchedTreeB = DFSResult->getSubtreeID(B);
3318 if (SchedTreeA != SchedTreeB) {
3319 // Unscheduled trees have lower priority.
3320 if (ScheduledTrees->test(SchedTreeA) != ScheduledTrees->test(SchedTreeB))
3321 return ScheduledTrees->test(SchedTreeB);
3322
3323 // Trees with shallower connections have have lower priority.
3324 if (DFSResult->getSubtreeLevel(SchedTreeA)
3325 != DFSResult->getSubtreeLevel(SchedTreeB)) {
3326 return DFSResult->getSubtreeLevel(SchedTreeA)
3327 < DFSResult->getSubtreeLevel(SchedTreeB);
3328 }
3329 }
Andrew Trick90f711d2012-10-15 18:02:27 +00003330 if (MaximizeILP)
Andrew Trick48d392e2012-11-28 05:13:28 +00003331 return DFSResult->getILP(A) < DFSResult->getILP(B);
Andrew Trick90f711d2012-10-15 18:02:27 +00003332 else
Andrew Trick48d392e2012-11-28 05:13:28 +00003333 return DFSResult->getILP(A) > DFSResult->getILP(B);
Andrew Trick90f711d2012-10-15 18:02:27 +00003334 }
3335};
3336
3337/// \brief Schedule based on the ILP metric.
3338class ILPScheduler : public MachineSchedStrategy {
Andrew Trickd7f890e2013-12-28 21:56:47 +00003339 ScheduleDAGMILive *DAG;
Andrew Trick90f711d2012-10-15 18:02:27 +00003340 ILPOrder Cmp;
3341
3342 std::vector<SUnit*> ReadyQ;
3343public:
Craig Topperc0196b12014-04-14 00:51:57 +00003344 ILPScheduler(bool MaximizeILP): DAG(nullptr), Cmp(MaximizeILP) {}
Andrew Trick90f711d2012-10-15 18:02:27 +00003345
Craig Topper4584cd52014-03-07 09:26:03 +00003346 void initialize(ScheduleDAGMI *dag) override {
Andrew Trickd7f890e2013-12-28 21:56:47 +00003347 assert(dag->hasVRegLiveness() && "ILPScheduler needs vreg liveness");
3348 DAG = static_cast<ScheduleDAGMILive*>(dag);
Andrew Tricke2c3f5c2013-01-25 06:33:57 +00003349 DAG->computeDFSResult();
Andrew Trick44f750a2013-01-25 04:01:04 +00003350 Cmp.DFSResult = DAG->getDFSResult();
3351 Cmp.ScheduledTrees = &DAG->getScheduledTrees();
Andrew Trick90f711d2012-10-15 18:02:27 +00003352 ReadyQ.clear();
Andrew Trick90f711d2012-10-15 18:02:27 +00003353 }
3354
Craig Topper4584cd52014-03-07 09:26:03 +00003355 void registerRoots() override {
Benjamin Krameraa598b32012-11-29 14:36:26 +00003356 // Restore the heap in ReadyQ with the updated DFS results.
3357 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
Andrew Trick90f711d2012-10-15 18:02:27 +00003358 }
3359
3360 /// Implement MachineSchedStrategy interface.
3361 /// -----------------------------------------
3362
Andrew Trick48d392e2012-11-28 05:13:28 +00003363 /// Callback to select the highest priority node from the ready Q.
Craig Topper4584cd52014-03-07 09:26:03 +00003364 SUnit *pickNode(bool &IsTopNode) override {
Craig Topperc0196b12014-04-14 00:51:57 +00003365 if (ReadyQ.empty()) return nullptr;
Matt Arsenault4ab769f2013-03-21 00:57:21 +00003366 std::pop_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
Andrew Trick90f711d2012-10-15 18:02:27 +00003367 SUnit *SU = ReadyQ.back();
3368 ReadyQ.pop_back();
3369 IsTopNode = false;
Andrew Trick1f0bb692013-04-13 06:07:49 +00003370 DEBUG(dbgs() << "Pick node " << "SU(" << SU->NodeNum << ") "
Andrew Trick44f750a2013-01-25 04:01:04 +00003371 << " ILP: " << DAG->getDFSResult()->getILP(SU)
3372 << " Tree: " << DAG->getDFSResult()->getSubtreeID(SU) << " @"
3373 << DAG->getDFSResult()->getSubtreeLevel(
Andrew Trick1f0bb692013-04-13 06:07:49 +00003374 DAG->getDFSResult()->getSubtreeID(SU)) << '\n'
3375 << "Scheduling " << *SU->getInstr());
Andrew Trick90f711d2012-10-15 18:02:27 +00003376 return SU;
3377 }
3378
Andrew Trick44f750a2013-01-25 04:01:04 +00003379 /// \brief Scheduler callback to notify that a new subtree is scheduled.
Craig Topper4584cd52014-03-07 09:26:03 +00003380 void scheduleTree(unsigned SubtreeID) override {
Andrew Trick44f750a2013-01-25 04:01:04 +00003381 std::make_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3382 }
3383
Andrew Trick48d392e2012-11-28 05:13:28 +00003384 /// Callback after a node is scheduled. Mark a newly scheduled tree, notify
3385 /// DFSResults, and resort the priority Q.
Craig Topper4584cd52014-03-07 09:26:03 +00003386 void schedNode(SUnit *SU, bool IsTopNode) override {
Andrew Trick48d392e2012-11-28 05:13:28 +00003387 assert(!IsTopNode && "SchedDFSResult needs bottom-up");
Andrew Trick48d392e2012-11-28 05:13:28 +00003388 }
Andrew Trick90f711d2012-10-15 18:02:27 +00003389
Craig Topper4584cd52014-03-07 09:26:03 +00003390 void releaseTopNode(SUnit *) override { /*only called for top roots*/ }
Andrew Trick90f711d2012-10-15 18:02:27 +00003391
Craig Topper4584cd52014-03-07 09:26:03 +00003392 void releaseBottomNode(SUnit *SU) override {
Andrew Trick90f711d2012-10-15 18:02:27 +00003393 ReadyQ.push_back(SU);
3394 std::push_heap(ReadyQ.begin(), ReadyQ.end(), Cmp);
3395 }
3396};
3397} // namespace
3398
3399static ScheduleDAGInstrs *createILPMaxScheduler(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003400 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(true));
Andrew Trick90f711d2012-10-15 18:02:27 +00003401}
3402static ScheduleDAGInstrs *createILPMinScheduler(MachineSchedContext *C) {
David Blaikie422b93d2014-04-21 20:32:32 +00003403 return new ScheduleDAGMILive(C, make_unique<ILPScheduler>(false));
Andrew Trick90f711d2012-10-15 18:02:27 +00003404}
3405static MachineSchedRegistry ILPMaxRegistry(
3406 "ilpmax", "Schedule bottom-up for max ILP", createILPMaxScheduler);
3407static MachineSchedRegistry ILPMinRegistry(
3408 "ilpmin", "Schedule bottom-up for min ILP", createILPMinScheduler);
3409
3410//===----------------------------------------------------------------------===//
Andrew Trick63440872012-01-14 02:17:06 +00003411// Machine Instruction Shuffler for Correctness Testing
3412//===----------------------------------------------------------------------===//
3413
Andrew Tricke77e84e2012-01-13 06:30:30 +00003414#ifndef NDEBUG
3415namespace {
Andrew Trick8823dec2012-03-14 04:00:41 +00003416/// Apply a less-than relation on the node order, which corresponds to the
3417/// instruction order prior to scheduling. IsReverse implements greater-than.
3418template<bool IsReverse>
3419struct SUnitOrder {
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003420 bool operator()(SUnit *A, SUnit *B) const {
Andrew Trick8823dec2012-03-14 04:00:41 +00003421 if (IsReverse)
3422 return A->NodeNum > B->NodeNum;
3423 else
3424 return A->NodeNum < B->NodeNum;
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003425 }
3426};
3427
Andrew Tricke77e84e2012-01-13 06:30:30 +00003428/// Reorder instructions as much as possible.
Andrew Trick8823dec2012-03-14 04:00:41 +00003429class InstructionShuffler : public MachineSchedStrategy {
3430 bool IsAlternating;
3431 bool IsTopDown;
3432
3433 // Using a less-than relation (SUnitOrder<false>) for the TopQ priority
3434 // gives nodes with a higher number higher priority causing the latest
3435 // instructions to be scheduled first.
3436 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<false> >
3437 TopQ;
3438 // When scheduling bottom-up, use greater-than as the queue priority.
3439 PriorityQueue<SUnit*, std::vector<SUnit*>, SUnitOrder<true> >
3440 BottomQ;
Andrew Tricke77e84e2012-01-13 06:30:30 +00003441public:
Andrew Trick8823dec2012-03-14 04:00:41 +00003442 InstructionShuffler(bool alternate, bool topdown)
3443 : IsAlternating(alternate), IsTopDown(topdown) {}
Andrew Tricke77e84e2012-01-13 06:30:30 +00003444
Craig Topper9d74a5a2014-04-29 07:58:41 +00003445 void initialize(ScheduleDAGMI*) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003446 TopQ.clear();
3447 BottomQ.clear();
3448 }
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003449
Andrew Trick8823dec2012-03-14 04:00:41 +00003450 /// Implement MachineSchedStrategy interface.
3451 /// -----------------------------------------
3452
Craig Topper9d74a5a2014-04-29 07:58:41 +00003453 SUnit *pickNode(bool &IsTopNode) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003454 SUnit *SU;
3455 if (IsTopDown) {
3456 do {
Craig Topperc0196b12014-04-14 00:51:57 +00003457 if (TopQ.empty()) return nullptr;
Andrew Trick8823dec2012-03-14 04:00:41 +00003458 SU = TopQ.top();
3459 TopQ.pop();
3460 } while (SU->isScheduled);
3461 IsTopNode = true;
Matthias Braunb550b762016-04-21 01:54:13 +00003462 } else {
Andrew Trick8823dec2012-03-14 04:00:41 +00003463 do {
Craig Topperc0196b12014-04-14 00:51:57 +00003464 if (BottomQ.empty()) return nullptr;
Andrew Trick8823dec2012-03-14 04:00:41 +00003465 SU = BottomQ.top();
3466 BottomQ.pop();
3467 } while (SU->isScheduled);
3468 IsTopNode = false;
3469 }
3470 if (IsAlternating)
3471 IsTopDown = !IsTopDown;
Andrew Trick7ccdc5c2012-01-17 06:55:07 +00003472 return SU;
3473 }
3474
Craig Topper9d74a5a2014-04-29 07:58:41 +00003475 void schedNode(SUnit *SU, bool IsTopNode) override {}
Andrew Trick61f1a272012-05-24 22:11:09 +00003476
Craig Topper9d74a5a2014-04-29 07:58:41 +00003477 void releaseTopNode(SUnit *SU) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003478 TopQ.push(SU);
3479 }
Craig Topper9d74a5a2014-04-29 07:58:41 +00003480 void releaseBottomNode(SUnit *SU) override {
Andrew Trick8823dec2012-03-14 04:00:41 +00003481 BottomQ.push(SU);
Andrew Tricke77e84e2012-01-13 06:30:30 +00003482 }
3483};
3484} // namespace
3485
Andrew Trick02a80da2012-03-08 01:41:12 +00003486static ScheduleDAGInstrs *createInstructionShuffler(MachineSchedContext *C) {
Andrew Trick8823dec2012-03-14 04:00:41 +00003487 bool Alternate = !ForceTopDown && !ForceBottomUp;
3488 bool TopDown = !ForceBottomUp;
Benjamin Kramer05e7a842012-03-14 11:26:37 +00003489 assert((TopDown || !ForceTopDown) &&
Andrew Trick8823dec2012-03-14 04:00:41 +00003490 "-misched-topdown incompatible with -misched-bottomup");
David Blaikie422b93d2014-04-21 20:32:32 +00003491 return new ScheduleDAGMILive(C, make_unique<InstructionShuffler>(Alternate, TopDown));
Andrew Tricke77e84e2012-01-13 06:30:30 +00003492}
Andrew Trick8823dec2012-03-14 04:00:41 +00003493static MachineSchedRegistry ShufflerRegistry(
3494 "shuffle", "Shuffle machine instructions alternating directions",
3495 createInstructionShuffler);
Andrew Tricke77e84e2012-01-13 06:30:30 +00003496#endif // !NDEBUG
Andrew Trickea9fd952013-01-25 07:45:29 +00003497
3498//===----------------------------------------------------------------------===//
Andrew Trickd7f890e2013-12-28 21:56:47 +00003499// GraphWriter support for ScheduleDAGMILive.
Andrew Trickea9fd952013-01-25 07:45:29 +00003500//===----------------------------------------------------------------------===//
3501
3502#ifndef NDEBUG
3503namespace llvm {
3504
3505template<> struct GraphTraits<
3506 ScheduleDAGMI*> : public GraphTraits<ScheduleDAG*> {};
3507
3508template<>
3509struct DOTGraphTraits<ScheduleDAGMI*> : public DefaultDOTGraphTraits {
3510
3511 DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
3512
3513 static std::string getGraphName(const ScheduleDAG *G) {
3514 return G->MF.getName();
3515 }
3516
3517 static bool renderGraphFromBottomUp() {
3518 return true;
3519 }
3520
3521 static bool isNodeHidden(const SUnit *Node) {
Matthias Braund78ee542015-09-17 21:09:59 +00003522 if (ViewMISchedCutoff == 0)
3523 return false;
3524 return (Node->Preds.size() > ViewMISchedCutoff
3525 || Node->Succs.size() > ViewMISchedCutoff);
Andrew Trickea9fd952013-01-25 07:45:29 +00003526 }
3527
Andrew Trickea9fd952013-01-25 07:45:29 +00003528 /// If you want to override the dot attributes printed for a particular
3529 /// edge, override this method.
3530 static std::string getEdgeAttributes(const SUnit *Node,
3531 SUnitIterator EI,
3532 const ScheduleDAG *Graph) {
3533 if (EI.isArtificialDep())
3534 return "color=cyan,style=dashed";
3535 if (EI.isCtrlDep())
3536 return "color=blue,style=dashed";
3537 return "";
3538 }
3539
3540 static std::string getNodeLabel(const SUnit *SU, const ScheduleDAG *G) {
Alp Tokere69170a2014-06-26 22:52:05 +00003541 std::string Str;
3542 raw_string_ostream SS(Str);
Andrew Trickd7f890e2013-12-28 21:56:47 +00003543 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3544 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
Craig Topperc0196b12014-04-14 00:51:57 +00003545 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
Andrew Trick7609b7d2013-09-06 17:32:42 +00003546 SS << "SU:" << SU->NodeNum;
3547 if (DFS)
3548 SS << " I:" << DFS->getNumInstrs(SU);
Andrew Trickea9fd952013-01-25 07:45:29 +00003549 return SS.str();
3550 }
3551 static std::string getNodeDescription(const SUnit *SU, const ScheduleDAG *G) {
3552 return G->getGraphNodeLabel(SU);
3553 }
3554
Andrew Trickd7f890e2013-12-28 21:56:47 +00003555 static std::string getNodeAttributes(const SUnit *N, const ScheduleDAG *G) {
Andrew Trickea9fd952013-01-25 07:45:29 +00003556 std::string Str("shape=Mrecord");
Andrew Trickd7f890e2013-12-28 21:56:47 +00003557 const ScheduleDAGMI *DAG = static_cast<const ScheduleDAGMI*>(G);
3558 const SchedDFSResult *DFS = DAG->hasVRegLiveness() ?
Craig Topperc0196b12014-04-14 00:51:57 +00003559 static_cast<const ScheduleDAGMILive*>(G)->getDFSResult() : nullptr;
Andrew Trickea9fd952013-01-25 07:45:29 +00003560 if (DFS) {
3561 Str += ",style=filled,fillcolor=\"#";
3562 Str += DOT::getColorString(DFS->getSubtreeID(N));
3563 Str += '"';
3564 }
3565 return Str;
3566 }
3567};
3568} // namespace llvm
3569#endif // NDEBUG
3570
3571/// viewGraph - Pop up a ghostview window with the reachable parts of the DAG
3572/// rendered using 'dot'.
3573///
3574void ScheduleDAGMI::viewGraph(const Twine &Name, const Twine &Title) {
3575#ifndef NDEBUG
3576 ViewGraph(this, Name, false, Title);
3577#else
3578 errs() << "ScheduleDAGMI::viewGraph is only available in debug builds on "
3579 << "systems with Graphviz or gv!\n";
3580#endif // NDEBUG
3581}
3582
3583/// Out-of-line implementation with no arguments is handy for gdb.
3584void ScheduleDAGMI::viewGraph() {
3585 viewGraph(getDAGName(), "Scheduling-Units Graph for " + getDAGName());
3586}