blob: d2883a8d99890fc79b1bd56cd6040a5536535842 [file] [log] [blame]
Tom Stellard0d23ebe2016-08-29 19:42:52 +00001//===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This contains a MachineSchedStrategy implementation for maximizing wave
12/// occupancy on GCN hardware.
13//===----------------------------------------------------------------------===//
14
15#include "GCNSchedStrategy.h"
16#include "AMDGPUSubtarget.h"
17#include "SIInstrInfo.h"
18#include "SIMachineFunctionInfo.h"
19#include "SIRegisterInfo.h"
20#include "llvm/CodeGen/RegisterClassInfo.h"
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +000021#include "llvm/Support/MathExtras.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000022
Evandro Menezes0cd23f562017-07-11 22:08:28 +000023#define DEBUG_TYPE "machine-scheduler"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000024
25using namespace llvm;
26
27GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
28 const MachineSchedContext *C) :
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +000029 GenericScheduler(C), TargetOccupancy(0), MF(nullptr) { }
Tom Stellard0d23ebe2016-08-29 19:42:52 +000030
31static unsigned getMaxWaves(unsigned SGPRs, unsigned VGPRs,
32 const MachineFunction &MF) {
33
34 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
35 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
36 unsigned MinRegOccupancy = std::min(ST.getOccupancyWithNumSGPRs(SGPRs),
37 ST.getOccupancyWithNumVGPRs(VGPRs));
38 return std::min(MinRegOccupancy,
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +000039 ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
Matthias Braunf1caa282017-12-15 22:22:58 +000040 MF.getFunction()));
Tom Stellard0d23ebe2016-08-29 19:42:52 +000041}
42
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000043void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
44 GenericScheduler::initialize(DAG);
45
46 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
47
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +000048 MF = &DAG->MF;
49
50 const SISubtarget &ST = MF->getSubtarget<SISubtarget>();
51
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000052 // FIXME: This is also necessary, because some passes that run after
53 // scheduling and before regalloc increase register pressure.
54 const int ErrorMargin = 3;
55
56 SGPRExcessLimit = Context->RegClassInfo
57 ->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin;
58 VGPRExcessLimit = Context->RegClassInfo
59 ->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin;
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +000060 if (TargetOccupancy) {
61 SGPRCriticalLimit = ST.getMaxNumSGPRs(TargetOccupancy, true);
62 VGPRCriticalLimit = ST.getMaxNumVGPRs(TargetOccupancy);
63 } else {
64 SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
65 SRI->getSGPRPressureSet());
66 VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
67 SRI->getVGPRPressureSet());
68 }
69
70 SGPRCriticalLimit -= ErrorMargin;
71 VGPRCriticalLimit -= ErrorMargin;
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000072}
73
Tom Stellard0d23ebe2016-08-29 19:42:52 +000074void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
75 bool AtTop, const RegPressureTracker &RPTracker,
76 const SIRegisterInfo *SRI,
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000077 unsigned SGPRPressure,
78 unsigned VGPRPressure) {
Tom Stellard0d23ebe2016-08-29 19:42:52 +000079
80 Cand.SU = SU;
81 Cand.AtTop = AtTop;
82
83 // getDownwardPressure() and getUpwardPressure() make temporary changes to
Hiroshi Inoue290adb32018-01-22 05:54:46 +000084 // the tracker, so we need to pass those function a non-const copy.
Tom Stellard0d23ebe2016-08-29 19:42:52 +000085 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
86
87 std::vector<unsigned> Pressure;
88 std::vector<unsigned> MaxPressure;
89
90 if (AtTop)
91 TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
92 else {
93 // FIXME: I think for bottom up scheduling, the register pressure is cached
94 // and can be retrieved by DAG->getPressureDif(SU).
95 TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
96 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +000097
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000098 unsigned NewSGPRPressure = Pressure[SRI->getSGPRPressureSet()];
99 unsigned NewVGPRPressure = Pressure[SRI->getVGPRPressureSet()];
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000100
101 // If two instructions increase the pressure of different register sets
102 // by the same amount, the generic scheduler will prefer to schedule the
103 // instruction that increases the set with the least amount of registers,
104 // which in our case would be SGPRs. This is rarely what we want, so
105 // when we report excess/critical register pressure, we do it either
106 // only for VGPRs or only for SGPRs.
107
108 // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000109 const unsigned MaxVGPRPressureInc = 16;
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000110 bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
111 bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
112
113
114 // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
115 // to increase the likelihood we don't go over the limits. We should improve
116 // the analysis to look through dependencies to find the path with the least
117 // register pressure.
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000118
119 // We only need to update the RPDelata for instructions that increase
120 // register pressure. Instructions that decrease or keep reg pressure
121 // the same will be marked as RegExcess in tryCandidate() when they
122 // are compared with instructions that increase the register pressure.
123 if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
124 Cand.RPDelta.Excess = PressureChange(SRI->getVGPRPressureSet());
125 Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
126 }
127
128 if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
129 Cand.RPDelta.Excess = PressureChange(SRI->getSGPRPressureSet());
Valery Pykhtin75d1de92017-01-26 10:51:47 +0000130 Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000131 }
132
133 // Register pressure is considered 'CRITICAL' if it is approaching a value
134 // that would reduce the wave occupancy for the execution unit. When
135 // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both
136 // has the same cost, so we don't need to prefer one over the other.
137
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000138 int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
139 int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
140
141 if (SGPRDelta >= 0 || VGPRDelta >= 0) {
142 if (SGPRDelta > VGPRDelta) {
143 Cand.RPDelta.CriticalMax = PressureChange(SRI->getSGPRPressureSet());
144 Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
145 } else {
146 Cand.RPDelta.CriticalMax = PressureChange(SRI->getVGPRPressureSet());
147 Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
148 }
149 }
150}
151
152// This function is mostly cut and pasted from
153// GenericScheduler::pickNodeFromQueue()
154void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
155 const CandPolicy &ZonePolicy,
156 const RegPressureTracker &RPTracker,
157 SchedCandidate &Cand) {
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000158 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
159 ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos();
160 unsigned SGPRPressure = Pressure[SRI->getSGPRPressureSet()];
161 unsigned VGPRPressure = Pressure[SRI->getVGPRPressureSet()];
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000162 ReadyQueue &Q = Zone.Available;
163 for (SUnit *SU : Q) {
164
165 SchedCandidate TryCand(ZonePolicy);
166 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000167 SGPRPressure, VGPRPressure);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000168 // Pass SchedBoundary only when comparing nodes from the same boundary.
169 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
170 GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg);
171 if (TryCand.Reason != NoCand) {
172 // Initialize resource delta if needed in case future heuristics query it.
173 if (TryCand.ResDelta == SchedResourceDelta())
174 TryCand.initResourceDelta(Zone.DAG, SchedModel);
175 Cand.setBest(TryCand);
176 }
177 }
178}
179
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000180// This function is mostly cut and pasted from
181// GenericScheduler::pickNodeBidirectional()
182SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
183 // Schedule as far as possible in the direction of no choice. This is most
184 // efficient, but also provides the best heuristics for CriticalPSets.
185 if (SUnit *SU = Bot.pickOnlyChoice()) {
186 IsTopNode = false;
187 return SU;
188 }
189 if (SUnit *SU = Top.pickOnlyChoice()) {
190 IsTopNode = true;
191 return SU;
192 }
193 // Set the bottom-up policy based on the state of the current bottom zone and
194 // the instructions outside the zone, including the top zone.
195 CandPolicy BotPolicy;
196 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
197 // Set the top-down policy based on the state of the current top zone and
198 // the instructions outside the zone, including the bottom zone.
199 CandPolicy TopPolicy;
200 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
201
202 // See if BotCand is still valid (because we previously scheduled from Top).
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000203 LLVM_DEBUG(dbgs() << "Picking from Bot:\n");
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000204 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
205 BotCand.Policy != BotPolicy) {
206 BotCand.reset(CandPolicy());
207 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
208 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
209 } else {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000210 LLVM_DEBUG(traceCandidate(BotCand));
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000211 }
212
213 // Check if the top Q has a better candidate.
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000214 LLVM_DEBUG(dbgs() << "Picking from Top:\n");
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000215 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
216 TopCand.Policy != TopPolicy) {
217 TopCand.reset(CandPolicy());
218 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
219 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
220 } else {
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000221 LLVM_DEBUG(traceCandidate(TopCand));
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000222 }
223
224 // Pick best from BotCand and TopCand.
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000225 LLVM_DEBUG(dbgs() << "Top Cand: "; traceCandidate(TopCand);
226 dbgs() << "Bot Cand: "; traceCandidate(BotCand););
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000227 SchedCandidate Cand;
228 if (TopCand.Reason == BotCand.Reason) {
229 Cand = BotCand;
230 GenericSchedulerBase::CandReason TopReason = TopCand.Reason;
231 TopCand.Reason = NoCand;
232 GenericScheduler::tryCandidate(Cand, TopCand, nullptr);
233 if (TopCand.Reason != NoCand) {
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000234 Cand.setBest(TopCand);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000235 } else {
236 TopCand.Reason = TopReason;
237 }
238 } else {
239 if (TopCand.Reason == RegExcess && TopCand.RPDelta.Excess.getUnitInc() <= 0) {
240 Cand = TopCand;
241 } else if (BotCand.Reason == RegExcess && BotCand.RPDelta.Excess.getUnitInc() <= 0) {
242 Cand = BotCand;
243 } else if (TopCand.Reason == RegCritical && TopCand.RPDelta.CriticalMax.getUnitInc() <= 0) {
244 Cand = TopCand;
245 } else if (BotCand.Reason == RegCritical && BotCand.RPDelta.CriticalMax.getUnitInc() <= 0) {
246 Cand = BotCand;
247 } else {
Stanislav Mekhanoshin79da2a72017-03-11 00:29:27 +0000248 if (BotCand.Reason > TopCand.Reason) {
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000249 Cand = TopCand;
250 } else {
251 Cand = BotCand;
252 }
253 }
254 }
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000255 LLVM_DEBUG(dbgs() << "Picking: "; traceCandidate(Cand););
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000256
257 IsTopNode = Cand.AtTop;
258 return Cand.SU;
259}
260
261// This function is mostly cut and pasted from
262// GenericScheduler::pickNode()
263SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
264 if (DAG->top() == DAG->bottom()) {
265 assert(Top.Available.empty() && Top.Pending.empty() &&
266 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
267 return nullptr;
268 }
269 SUnit *SU;
270 do {
271 if (RegionPolicy.OnlyTopDown) {
272 SU = Top.pickOnlyChoice();
273 if (!SU) {
274 CandPolicy NoPolicy;
275 TopCand.reset(NoPolicy);
276 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
277 assert(TopCand.Reason != NoCand && "failed to find a candidate");
278 SU = TopCand.SU;
279 }
280 IsTopNode = true;
281 } else if (RegionPolicy.OnlyBottomUp) {
282 SU = Bot.pickOnlyChoice();
283 if (!SU) {
284 CandPolicy NoPolicy;
285 BotCand.reset(NoPolicy);
286 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
287 assert(BotCand.Reason != NoCand && "failed to find a candidate");
288 SU = BotCand.SU;
289 }
290 IsTopNode = false;
291 } else {
292 SU = pickNodeBidirectional(IsTopNode);
293 }
294 } while (SU->isScheduled);
295
296 if (SU->isTopReady())
297 Top.removeReady(SU);
298 if (SU->isBottomReady())
299 Bot.removeReady(SU);
300
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000301 LLVM_DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") "
302 << *SU->getInstr());
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000303 return SU;
304}
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000305
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000306GCNScheduleDAGMILive::GCNScheduleDAGMILive(MachineSchedContext *C,
307 std::unique_ptr<MachineSchedStrategy> S) :
308 ScheduleDAGMILive(C, std::move(S)),
309 ST(MF.getSubtarget<SISubtarget>()),
310 MFI(*MF.getInfo<SIMachineFunctionInfo>()),
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +0000311 StartingOccupancy(MFI.getOccupancy()),
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000312 MinOccupancy(StartingOccupancy), Stage(0), RegionIdx(0) {
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000313
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000314 LLVM_DEBUG(dbgs() << "Starting occupancy is " << StartingOccupancy << ".\n");
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000315}
316
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000317void GCNScheduleDAGMILive::schedule() {
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000318 if (Stage == 0) {
319 // Just record regions at the first pass.
320 Regions.push_back(std::make_pair(RegionBegin, RegionEnd));
321 return;
322 }
323
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000324 std::vector<MachineInstr*> Unsched;
325 Unsched.reserve(NumRegionInstrs);
Matt Arsenault9a60c3e2017-12-05 03:09:23 +0000326 for (auto &I : *this) {
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000327 Unsched.push_back(&I);
Matt Arsenault9a60c3e2017-12-05 03:09:23 +0000328 }
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000329
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000330 GCNRegPressure PressureBefore;
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000331 if (LIS) {
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000332 PressureBefore = Pressure[RegionIdx];
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000333
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000334 LLVM_DEBUG(dbgs() << "Pressure before scheduling:\nRegion live-ins:";
335 GCNRPTracker::printLiveRegs(dbgs(), LiveIns[RegionIdx], MRI);
336 dbgs() << "Region live-in pressure: ";
337 llvm::getRegPressure(MRI, LiveIns[RegionIdx]).print(dbgs());
338 dbgs() << "Region register pressure: ";
339 PressureBefore.print(dbgs()));
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000340 }
341
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000342 ScheduleDAGMILive::schedule();
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000343 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
Stanislav Mekhanoshinb933c3f2017-03-28 21:48:54 +0000344
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000345 if (!LIS)
346 return;
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000347
348 // Check the results of scheduling.
349 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000350 auto PressureAfter = getRealRegPressure();
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000351
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000352 LLVM_DEBUG(dbgs() << "Pressure after scheduling: ";
353 PressureAfter.print(dbgs()));
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000354
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000355 if (PressureAfter.getSGPRNum() <= S.SGPRCriticalLimit &&
356 PressureAfter.getVGPRNum() <= S.VGPRCriticalLimit) {
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000357 Pressure[RegionIdx] = PressureAfter;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000358 LLVM_DEBUG(dbgs() << "Pressure in desired limits, done.\n");
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000359 return;
360 }
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000361 unsigned WavesAfter = getMaxWaves(PressureAfter.getSGPRNum(),
362 PressureAfter.getVGPRNum(), MF);
363 unsigned WavesBefore = getMaxWaves(PressureBefore.getSGPRNum(),
364 PressureBefore.getVGPRNum(), MF);
Stanislav Mekhanoshin7012c242018-05-12 01:41:56 +0000365 WavesAfter = std::min(WavesAfter, MFI.getMaxWavesPerEU());
366 WavesBefore = std::min(WavesBefore, MFI.getMaxWavesPerEU());
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000367 LLVM_DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore
368 << ", after " << WavesAfter << ".\n");
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000369
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000370 // We could not keep current target occupancy because of the just scheduled
371 // region. Record new occupancy for next scheduling cycle.
372 unsigned NewOccupancy = std::max(WavesAfter, WavesBefore);
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000373 // Allow memory bound functions to drop to 4 waves if not limited by an
374 // attribute.
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000375 if (WavesAfter < WavesBefore && WavesAfter < MinOccupancy &&
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +0000376 WavesAfter >= MFI.getMinAllowedOccupancy()) {
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000377 LLVM_DEBUG(dbgs() << "Function is memory bound, allow occupancy drop up to "
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +0000378 << MFI.getMinAllowedOccupancy() << " waves\n");
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000379 NewOccupancy = WavesAfter;
380 }
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000381 if (NewOccupancy < MinOccupancy) {
382 MinOccupancy = NewOccupancy;
Stanislav Mekhanoshind4b500c2018-05-31 05:36:04 +0000383 MFI.limitOccupancy(MinOccupancy);
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000384 LLVM_DEBUG(dbgs() << "Occupancy lowered for the function to "
385 << MinOccupancy << ".\n");
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000386 }
387
Stanislav Mekhanoshin1c538422018-05-25 17:25:12 +0000388 if (WavesAfter >= MinOccupancy) {
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000389 Pressure[RegionIdx] = PressureAfter;
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000390 return;
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000391 }
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000392
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000393 LLVM_DEBUG(dbgs() << "Attempting to revert scheduling.\n");
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000394 RegionEnd = RegionBegin;
395 for (MachineInstr *MI : Unsched) {
Shiva Chen801bf7e2018-05-09 02:42:00 +0000396 if (MI->isDebugInstr())
Matt Arsenault9a60c3e2017-12-05 03:09:23 +0000397 continue;
398
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000399 if (MI->getIterator() != RegionEnd) {
400 BB->remove(MI);
401 BB->insert(RegionEnd, MI);
Shiva Chen801bf7e2018-05-09 02:42:00 +0000402 if (!MI->isDebugInstr())
Yaxun Liuc41e2f62017-12-15 03:56:57 +0000403 LIS->handleMove(*MI, true);
Stanislav Mekhanoshin080889c2017-02-28 16:26:27 +0000404 }
405 // Reset read-undef flags and update them later.
406 for (auto &Op : MI->operands())
407 if (Op.isReg() && Op.isDef())
408 Op.setIsUndef(false);
409 RegisterOperands RegOpers;
410 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
Shiva Chen801bf7e2018-05-09 02:42:00 +0000411 if (!MI->isDebugInstr()) {
Yaxun Liuc41e2f62017-12-15 03:56:57 +0000412 if (ShouldTrackLaneMasks) {
413 // Adjust liveness and add missing dead+read-undef flags.
414 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
415 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
416 } else {
417 // Adjust for missing dead-def flags.
418 RegOpers.detectDeadDefs(*MI, *LIS);
419 }
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000420 }
421 RegionEnd = MI->getIterator();
422 ++RegionEnd;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000423 LLVM_DEBUG(dbgs() << "Scheduling " << *MI);
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000424 }
425 RegionBegin = Unsched.front()->getIterator();
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000426 Regions[RegionIdx] = std::make_pair(RegionBegin, RegionEnd);
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000427
428 placeDebugValues();
429}
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000430
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000431GCNRegPressure GCNScheduleDAGMILive::getRealRegPressure() const {
432 GCNDownwardRPTracker RPTracker(*LIS);
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000433 RPTracker.advance(begin(), end(), &LiveIns[RegionIdx]);
Stanislav Mekhanoshin464cecf2017-05-16 15:43:52 +0000434 return RPTracker.moveMaxPressure();
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000435}
436
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000437void GCNScheduleDAGMILive::computeBlockPressure(const MachineBasicBlock *MBB) {
438 GCNDownwardRPTracker RPTracker(*LIS);
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000439
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000440 // If the block has the only successor then live-ins of that successor are
441 // live-outs of the current block. We can reuse calculated live set if the
442 // successor will be sent to scheduling past current block.
443 const MachineBasicBlock *OnlySucc = nullptr;
444 if (MBB->succ_size() == 1 && !(*MBB->succ_begin())->empty()) {
445 SlotIndexes *Ind = LIS->getSlotIndexes();
446 if (Ind->getMBBStartIdx(MBB) < Ind->getMBBStartIdx(*MBB->succ_begin()))
447 OnlySucc = *MBB->succ_begin();
Stanislav Mekhanoshin357d3db2017-02-28 19:20:33 +0000448 }
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000449
450 // Scheduler sends regions from the end of the block upwards.
451 size_t CurRegion = RegionIdx;
452 for (size_t E = Regions.size(); CurRegion != E; ++CurRegion)
453 if (Regions[CurRegion].first->getParent() != MBB)
454 break;
455 --CurRegion;
456
457 auto I = MBB->begin();
458 auto LiveInIt = MBBLiveIns.find(MBB);
459 if (LiveInIt != MBBLiveIns.end()) {
460 auto LiveIn = std::move(LiveInIt->second);
461 RPTracker.reset(*MBB->begin(), &LiveIn);
462 MBBLiveIns.erase(LiveInIt);
463 } else {
464 I = Regions[CurRegion].first;
465 RPTracker.reset(*I);
466 }
467
468 for ( ; ; ) {
469 I = RPTracker.getNext();
470
471 if (Regions[CurRegion].first == I) {
472 LiveIns[CurRegion] = RPTracker.getLiveRegs();
473 RPTracker.clearMaxPressure();
474 }
475
476 if (Regions[CurRegion].second == I) {
477 Pressure[CurRegion] = RPTracker.moveMaxPressure();
478 if (CurRegion-- == RegionIdx)
479 break;
480 }
481 RPTracker.advanceToNext();
482 RPTracker.advanceBeforeNext();
483 }
484
485 if (OnlySucc) {
486 if (I != MBB->end()) {
487 RPTracker.advanceToNext();
488 RPTracker.advance(MBB->end());
489 }
490 RPTracker.reset(*OnlySucc->begin(), &RPTracker.getLiveRegs());
491 RPTracker.advanceBeforeNext();
492 MBBLiveIns[OnlySucc] = RPTracker.moveLiveRegs();
493 }
494}
495
496void GCNScheduleDAGMILive::finalizeSchedule() {
497 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000498 LLVM_DEBUG(dbgs() << "All regions recorded, starting actual scheduling.\n");
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000499
500 LiveIns.resize(Regions.size());
501 Pressure.resize(Regions.size());
502
503 do {
504 Stage++;
505 RegionIdx = 0;
506 MachineBasicBlock *MBB = nullptr;
507
508 if (Stage > 1) {
509 // Retry function scheduling if we found resulting occupancy and it is
510 // lower than used for first pass scheduling. This will give more freedom
511 // to schedule low register pressure blocks.
512 // Code is partially copied from MachineSchedulerBase::scheduleRegions().
513
514 if (!LIS || StartingOccupancy <= MinOccupancy)
515 break;
516
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000517 LLVM_DEBUG(
518 dbgs()
519 << "Retrying function scheduling with lowest recorded occupancy "
520 << MinOccupancy << ".\n");
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000521
522 S.setTargetOccupancy(MinOccupancy);
523 }
524
525 for (auto Region : Regions) {
526 RegionBegin = Region.first;
527 RegionEnd = Region.second;
528
529 if (RegionBegin->getParent() != MBB) {
530 if (MBB) finishBlock();
531 MBB = RegionBegin->getParent();
532 startBlock(MBB);
533 if (Stage == 1)
534 computeBlockPressure(MBB);
535 }
536
537 unsigned NumRegionInstrs = std::distance(begin(), end());
538 enterRegion(MBB, begin(), end(), NumRegionInstrs);
539
540 // Skip empty scheduling regions (0 or 1 schedulable instructions).
541 if (begin() == end() || begin() == std::prev(end())) {
542 exitRegion();
543 continue;
544 }
545
Nicola Zaghend34e60c2018-05-14 12:53:11 +0000546 LLVM_DEBUG(dbgs() << "********** MI Scheduling **********\n");
547 LLVM_DEBUG(dbgs() << MF.getName() << ":" << printMBBReference(*MBB) << " "
548 << MBB->getName() << "\n From: " << *begin()
549 << " To: ";
550 if (RegionEnd != MBB->end()) dbgs() << *RegionEnd;
551 else dbgs() << "End";
552 dbgs() << " RegionInstrs: " << NumRegionInstrs << '\n');
Stanislav Mekhanoshinb1086072017-05-16 16:11:26 +0000553
554 schedule();
555
556 exitRegion();
557 ++RegionIdx;
558 }
559 finishBlock();
560
561 } while (Stage < 2);
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000562}