blob: 758a3ad4ad53bbe3757daa27fd9363e234746520 [file] [log] [blame]
Tom Stellard0d23ebe2016-08-29 19:42:52 +00001//===-- GCNSchedStrategy.cpp - GCN Scheduler Strategy ---------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// This contains a MachineSchedStrategy implementation for maximizing wave
12/// occupancy on GCN hardware.
13//===----------------------------------------------------------------------===//
14
15#include "GCNSchedStrategy.h"
16#include "AMDGPUSubtarget.h"
17#include "SIInstrInfo.h"
18#include "SIMachineFunctionInfo.h"
19#include "SIRegisterInfo.h"
20#include "llvm/CodeGen/RegisterClassInfo.h"
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +000021#include "llvm/Support/MathExtras.h"
Tom Stellard0d23ebe2016-08-29 19:42:52 +000022
23#define DEBUG_TYPE "misched"
24
25using namespace llvm;
26
27GCNMaxOccupancySchedStrategy::GCNMaxOccupancySchedStrategy(
28 const MachineSchedContext *C) :
29 GenericScheduler(C) { }
30
31static unsigned getMaxWaves(unsigned SGPRs, unsigned VGPRs,
32 const MachineFunction &MF) {
33
34 const SISubtarget &ST = MF.getSubtarget<SISubtarget>();
35 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
36 unsigned MinRegOccupancy = std::min(ST.getOccupancyWithNumSGPRs(SGPRs),
37 ST.getOccupancyWithNumVGPRs(VGPRs));
38 return std::min(MinRegOccupancy,
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +000039 ST.getOccupancyWithLocalMemSize(MFI->getLDSSize(),
40 *MF.getFunction()));
Tom Stellard0d23ebe2016-08-29 19:42:52 +000041}
42
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000043void GCNMaxOccupancySchedStrategy::initialize(ScheduleDAGMI *DAG) {
44 GenericScheduler::initialize(DAG);
45
46 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
47
48 // FIXME: This is also necessary, because some passes that run after
49 // scheduling and before regalloc increase register pressure.
50 const int ErrorMargin = 3;
51
52 SGPRExcessLimit = Context->RegClassInfo
53 ->getNumAllocatableRegs(&AMDGPU::SGPR_32RegClass) - ErrorMargin;
54 VGPRExcessLimit = Context->RegClassInfo
55 ->getNumAllocatableRegs(&AMDGPU::VGPR_32RegClass) - ErrorMargin;
56 SGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
57 SRI->getSGPRPressureSet()) - ErrorMargin;
58 VGPRCriticalLimit = SRI->getRegPressureSetLimit(DAG->MF,
59 SRI->getVGPRPressureSet()) - ErrorMargin;
60}
61
Tom Stellard0d23ebe2016-08-29 19:42:52 +000062void GCNMaxOccupancySchedStrategy::initCandidate(SchedCandidate &Cand, SUnit *SU,
63 bool AtTop, const RegPressureTracker &RPTracker,
64 const SIRegisterInfo *SRI,
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000065 unsigned SGPRPressure,
66 unsigned VGPRPressure) {
Tom Stellard0d23ebe2016-08-29 19:42:52 +000067
68 Cand.SU = SU;
69 Cand.AtTop = AtTop;
70
71 // getDownwardPressure() and getUpwardPressure() make temporary changes to
72 // the the tracker, so we need to pass those function a non-const copy.
73 RegPressureTracker &TempTracker = const_cast<RegPressureTracker&>(RPTracker);
74
75 std::vector<unsigned> Pressure;
76 std::vector<unsigned> MaxPressure;
77
78 if (AtTop)
79 TempTracker.getDownwardPressure(SU->getInstr(), Pressure, MaxPressure);
80 else {
81 // FIXME: I think for bottom up scheduling, the register pressure is cached
82 // and can be retrieved by DAG->getPressureDif(SU).
83 TempTracker.getUpwardPressure(SU->getInstr(), Pressure, MaxPressure);
84 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +000085
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000086 unsigned NewSGPRPressure = Pressure[SRI->getSGPRPressureSet()];
87 unsigned NewVGPRPressure = Pressure[SRI->getVGPRPressureSet()];
Tom Stellard0d23ebe2016-08-29 19:42:52 +000088
89 // If two instructions increase the pressure of different register sets
90 // by the same amount, the generic scheduler will prefer to schedule the
91 // instruction that increases the set with the least amount of registers,
92 // which in our case would be SGPRs. This is rarely what we want, so
93 // when we report excess/critical register pressure, we do it either
94 // only for VGPRs or only for SGPRs.
95
96 // FIXME: Better heuristics to determine whether to prefer SGPRs or VGPRs.
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +000097 const unsigned MaxVGPRPressureInc = 16;
Tom Stellard0d23ebe2016-08-29 19:42:52 +000098 bool ShouldTrackVGPRs = VGPRPressure + MaxVGPRPressureInc >= VGPRExcessLimit;
99 bool ShouldTrackSGPRs = !ShouldTrackVGPRs && SGPRPressure >= SGPRExcessLimit;
100
101
102 // FIXME: We have to enter REG-EXCESS before we reach the actual threshold
103 // to increase the likelihood we don't go over the limits. We should improve
104 // the analysis to look through dependencies to find the path with the least
105 // register pressure.
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000106
107 // We only need to update the RPDelata for instructions that increase
108 // register pressure. Instructions that decrease or keep reg pressure
109 // the same will be marked as RegExcess in tryCandidate() when they
110 // are compared with instructions that increase the register pressure.
111 if (ShouldTrackVGPRs && NewVGPRPressure >= VGPRExcessLimit) {
112 Cand.RPDelta.Excess = PressureChange(SRI->getVGPRPressureSet());
113 Cand.RPDelta.Excess.setUnitInc(NewVGPRPressure - VGPRExcessLimit);
114 }
115
116 if (ShouldTrackSGPRs && NewSGPRPressure >= SGPRExcessLimit) {
117 Cand.RPDelta.Excess = PressureChange(SRI->getSGPRPressureSet());
Valery Pykhtin75d1de92017-01-26 10:51:47 +0000118 Cand.RPDelta.Excess.setUnitInc(NewSGPRPressure - SGPRExcessLimit);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000119 }
120
121 // Register pressure is considered 'CRITICAL' if it is approaching a value
122 // that would reduce the wave occupancy for the execution unit. When
123 // register pressure is 'CRITICAL', increading SGPR and VGPR pressure both
124 // has the same cost, so we don't need to prefer one over the other.
125
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000126 int SGPRDelta = NewSGPRPressure - SGPRCriticalLimit;
127 int VGPRDelta = NewVGPRPressure - VGPRCriticalLimit;
128
129 if (SGPRDelta >= 0 || VGPRDelta >= 0) {
130 if (SGPRDelta > VGPRDelta) {
131 Cand.RPDelta.CriticalMax = PressureChange(SRI->getSGPRPressureSet());
132 Cand.RPDelta.CriticalMax.setUnitInc(SGPRDelta);
133 } else {
134 Cand.RPDelta.CriticalMax = PressureChange(SRI->getVGPRPressureSet());
135 Cand.RPDelta.CriticalMax.setUnitInc(VGPRDelta);
136 }
137 }
138}
139
140// This function is mostly cut and pasted from
141// GenericScheduler::pickNodeFromQueue()
142void GCNMaxOccupancySchedStrategy::pickNodeFromQueue(SchedBoundary &Zone,
143 const CandPolicy &ZonePolicy,
144 const RegPressureTracker &RPTracker,
145 SchedCandidate &Cand) {
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000146 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
147 ArrayRef<unsigned> Pressure = RPTracker.getRegSetPressureAtPos();
148 unsigned SGPRPressure = Pressure[SRI->getSGPRPressureSet()];
149 unsigned VGPRPressure = Pressure[SRI->getVGPRPressureSet()];
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000150 ReadyQueue &Q = Zone.Available;
151 for (SUnit *SU : Q) {
152
153 SchedCandidate TryCand(ZonePolicy);
154 initCandidate(TryCand, SU, Zone.isTop(), RPTracker, SRI,
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000155 SGPRPressure, VGPRPressure);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000156 // Pass SchedBoundary only when comparing nodes from the same boundary.
157 SchedBoundary *ZoneArg = Cand.AtTop == TryCand.AtTop ? &Zone : nullptr;
158 GenericScheduler::tryCandidate(Cand, TryCand, ZoneArg);
159 if (TryCand.Reason != NoCand) {
160 // Initialize resource delta if needed in case future heuristics query it.
161 if (TryCand.ResDelta == SchedResourceDelta())
162 TryCand.initResourceDelta(Zone.DAG, SchedModel);
163 Cand.setBest(TryCand);
164 }
165 }
166}
167
168static int getBidirectionalReasonRank(GenericSchedulerBase::CandReason Reason) {
169 switch (Reason) {
170 default:
171 return Reason;
172 case GenericSchedulerBase::RegCritical:
173 case GenericSchedulerBase::RegExcess:
174 return -Reason;
175 }
176}
177
178// This function is mostly cut and pasted from
179// GenericScheduler::pickNodeBidirectional()
180SUnit *GCNMaxOccupancySchedStrategy::pickNodeBidirectional(bool &IsTopNode) {
181 // Schedule as far as possible in the direction of no choice. This is most
182 // efficient, but also provides the best heuristics for CriticalPSets.
183 if (SUnit *SU = Bot.pickOnlyChoice()) {
184 IsTopNode = false;
185 return SU;
186 }
187 if (SUnit *SU = Top.pickOnlyChoice()) {
188 IsTopNode = true;
189 return SU;
190 }
191 // Set the bottom-up policy based on the state of the current bottom zone and
192 // the instructions outside the zone, including the top zone.
193 CandPolicy BotPolicy;
194 setPolicy(BotPolicy, /*IsPostRA=*/false, Bot, &Top);
195 // Set the top-down policy based on the state of the current top zone and
196 // the instructions outside the zone, including the bottom zone.
197 CandPolicy TopPolicy;
198 setPolicy(TopPolicy, /*IsPostRA=*/false, Top, &Bot);
199
200 // See if BotCand is still valid (because we previously scheduled from Top).
201 DEBUG(dbgs() << "Picking from Bot:\n");
202 if (!BotCand.isValid() || BotCand.SU->isScheduled ||
203 BotCand.Policy != BotPolicy) {
204 BotCand.reset(CandPolicy());
205 pickNodeFromQueue(Bot, BotPolicy, DAG->getBotRPTracker(), BotCand);
206 assert(BotCand.Reason != NoCand && "failed to find the first candidate");
207 } else {
208 DEBUG(traceCandidate(BotCand));
209 }
210
211 // Check if the top Q has a better candidate.
212 DEBUG(dbgs() << "Picking from Top:\n");
213 if (!TopCand.isValid() || TopCand.SU->isScheduled ||
214 TopCand.Policy != TopPolicy) {
215 TopCand.reset(CandPolicy());
216 pickNodeFromQueue(Top, TopPolicy, DAG->getTopRPTracker(), TopCand);
217 assert(TopCand.Reason != NoCand && "failed to find the first candidate");
218 } else {
219 DEBUG(traceCandidate(TopCand));
220 }
221
222 // Pick best from BotCand and TopCand.
223 DEBUG(
224 dbgs() << "Top Cand: ";
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000225 traceCandidate(TopCand);
Stanislav Mekhanoshin99be1af2017-02-06 23:16:51 +0000226 dbgs() << "Bot Cand: ";
227 traceCandidate(BotCand);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000228 );
229 SchedCandidate Cand;
230 if (TopCand.Reason == BotCand.Reason) {
231 Cand = BotCand;
232 GenericSchedulerBase::CandReason TopReason = TopCand.Reason;
233 TopCand.Reason = NoCand;
234 GenericScheduler::tryCandidate(Cand, TopCand, nullptr);
235 if (TopCand.Reason != NoCand) {
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000236 Cand.setBest(TopCand);
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000237 } else {
238 TopCand.Reason = TopReason;
239 }
240 } else {
241 if (TopCand.Reason == RegExcess && TopCand.RPDelta.Excess.getUnitInc() <= 0) {
242 Cand = TopCand;
243 } else if (BotCand.Reason == RegExcess && BotCand.RPDelta.Excess.getUnitInc() <= 0) {
244 Cand = BotCand;
245 } else if (TopCand.Reason == RegCritical && TopCand.RPDelta.CriticalMax.getUnitInc() <= 0) {
246 Cand = TopCand;
247 } else if (BotCand.Reason == RegCritical && BotCand.RPDelta.CriticalMax.getUnitInc() <= 0) {
248 Cand = BotCand;
249 } else {
250 int TopRank = getBidirectionalReasonRank(TopCand.Reason);
251 int BotRank = getBidirectionalReasonRank(BotCand.Reason);
252 if (TopRank > BotRank) {
253 Cand = TopCand;
254 } else {
255 Cand = BotCand;
256 }
257 }
258 }
259 DEBUG(
260 dbgs() << "Picking: ";
261 traceCandidate(Cand);
262 );
263
264 IsTopNode = Cand.AtTop;
265 return Cand.SU;
266}
267
268// This function is mostly cut and pasted from
269// GenericScheduler::pickNode()
270SUnit *GCNMaxOccupancySchedStrategy::pickNode(bool &IsTopNode) {
271 if (DAG->top() == DAG->bottom()) {
272 assert(Top.Available.empty() && Top.Pending.empty() &&
273 Bot.Available.empty() && Bot.Pending.empty() && "ReadyQ garbage");
274 return nullptr;
275 }
276 SUnit *SU;
277 do {
278 if (RegionPolicy.OnlyTopDown) {
279 SU = Top.pickOnlyChoice();
280 if (!SU) {
281 CandPolicy NoPolicy;
282 TopCand.reset(NoPolicy);
283 pickNodeFromQueue(Top, NoPolicy, DAG->getTopRPTracker(), TopCand);
284 assert(TopCand.Reason != NoCand && "failed to find a candidate");
285 SU = TopCand.SU;
286 }
287 IsTopNode = true;
288 } else if (RegionPolicy.OnlyBottomUp) {
289 SU = Bot.pickOnlyChoice();
290 if (!SU) {
291 CandPolicy NoPolicy;
292 BotCand.reset(NoPolicy);
293 pickNodeFromQueue(Bot, NoPolicy, DAG->getBotRPTracker(), BotCand);
294 assert(BotCand.Reason != NoCand && "failed to find a candidate");
295 SU = BotCand.SU;
296 }
297 IsTopNode = false;
298 } else {
299 SU = pickNodeBidirectional(IsTopNode);
300 }
301 } while (SU->isScheduled);
302
303 if (SU->isTopReady())
304 Top.removeReady(SU);
305 if (SU->isBottomReady())
306 Bot.removeReady(SU);
307
308 DEBUG(dbgs() << "Scheduling SU(" << SU->NodeNum << ") " << *SU->getInstr());
309 return SU;
310}
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000311
312void GCNScheduleDAGMILive::schedule() {
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000313 std::vector<MachineInstr*> Unsched;
314 Unsched.reserve(NumRegionInstrs);
315 for (auto &I : *this)
316 Unsched.push_back(&I);
317
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000318 std::pair<unsigned, unsigned> PressureBefore;
319 if (LIS) {
320 DEBUG(dbgs() << "Pressure before scheduling:\n");
321 discoverLiveIns();
322 PressureBefore = getRealRegPressure();
323 }
324
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000325 ScheduleDAGMILive::schedule();
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000326 if (!LIS)
327 return;
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000328
329 // Check the results of scheduling.
330 GCNMaxOccupancySchedStrategy &S = (GCNMaxOccupancySchedStrategy&)*SchedImpl;
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000331 DEBUG(dbgs() << "Pressure after scheduling:\n");
332 auto PressureAfter = getRealRegPressure();
333 LiveIns.clear();
334
335 if (PressureAfter.first <= S.SGPRCriticalLimit &&
336 PressureAfter.second <= S.VGPRCriticalLimit) {
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000337 DEBUG(dbgs() << "Pressure in desired limits, done.\n");
338 return;
339 }
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000340 unsigned WavesAfter = getMaxWaves(PressureAfter.first,
341 PressureAfter.second, MF);
342 unsigned WavesBefore = getMaxWaves(PressureBefore.first,
343 PressureBefore.second, MF);
344 DEBUG(dbgs() << "Occupancy before scheduling: " << WavesBefore <<
345 ", after " << WavesAfter << ".\n");
346
347 if (WavesAfter >= WavesBefore)
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000348 return;
349
350 DEBUG(dbgs() << "Attempting to revert scheduling.\n");
351 RegionEnd = RegionBegin;
352 for (MachineInstr *MI : Unsched) {
353 if (MI->getIterator() != RegionEnd) {
354 BB->remove(MI);
355 BB->insert(RegionEnd, MI);
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000356 LIS->handleMove(*MI, true);
Stanislav Mekhanoshin080889c2017-02-28 16:26:27 +0000357 }
358 // Reset read-undef flags and update them later.
359 for (auto &Op : MI->operands())
360 if (Op.isReg() && Op.isDef())
361 Op.setIsUndef(false);
362 RegisterOperands RegOpers;
363 RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
364 if (ShouldTrackLaneMasks) {
365 // Adjust liveness and add missing dead+read-undef flags.
366 SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
367 RegOpers.adjustLaneLiveness(*LIS, MRI, SlotIdx, MI);
368 } else {
369 // Adjust for missing dead-def flags.
370 RegOpers.detectDeadDefs(*MI, *LIS);
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000371 }
372 RegionEnd = MI->getIterator();
373 ++RegionEnd;
374 DEBUG(dbgs() << "Scheduling " << *MI);
375 }
376 RegionBegin = Unsched.front()->getIterator();
377
378 placeDebugValues();
379}
Stanislav Mekhanoshin282e8e42017-02-28 17:22:39 +0000380
381static inline void setMask(const MachineRegisterInfo &MRI,
382 const SIRegisterInfo *SRI, unsigned Reg,
383 LaneBitmask &PrevMask, LaneBitmask NewMask,
384 unsigned &SGPRs, unsigned &VGPRs) {
385 int NewRegs = countPopulation(NewMask.getAsInteger()) -
386 countPopulation(PrevMask.getAsInteger());
387 if (SRI->isSGPRReg(MRI, Reg))
388 SGPRs += NewRegs;
389 if (SRI->isVGPR(MRI, Reg))
390 VGPRs += NewRegs;
391 assert ((int)SGPRs >= 0 && (int)VGPRs >= 0);
392 PrevMask = NewMask;
393}
394
395void GCNScheduleDAGMILive::discoverLiveIns() {
396 unsigned SGPRs = 0;
397 unsigned VGPRs = 0;
398
399 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
400 SlotIndex SI = LIS->getInstructionIndex(*begin()).getBaseIndex();
401 assert (SI.isValid());
402
403 DEBUG(dbgs() << "Region live-ins:");
404 for (unsigned I = 0, E = MRI.getNumVirtRegs(); I != E; ++I) {
405 unsigned Reg = TargetRegisterInfo::index2VirtReg(I);
406 if (MRI.reg_nodbg_empty(Reg))
407 continue;
408 const LiveInterval &LI = LIS->getInterval(Reg);
409 LaneBitmask LaneMask = LaneBitmask::getNone();
410 if (LI.hasSubRanges()) {
411 for (const auto &S : LI.subranges())
412 if (S.liveAt(SI))
413 LaneMask |= S.LaneMask;
414 } else if (LI.liveAt(SI)) {
415 LaneMask = MRI.getMaxLaneMaskForVReg(Reg);
416 }
417
418 if (LaneMask.any()) {
419 setMask(MRI, SRI, Reg, LiveIns[Reg], LaneMask, SGPRs, VGPRs);
420
421 DEBUG(dbgs() << ' ' << PrintVRegOrUnit(Reg, SRI) << ':'
422 << PrintLaneMask(LiveIns[Reg]));
423 }
424 }
425
426 LiveInPressure = std::make_pair(SGPRs, VGPRs);
427
428 DEBUG(dbgs() << "\nLive-in pressure:\nSGPR = " << SGPRs
429 << "\nVGPR = " << VGPRs << '\n');
430}
431
432std::pair<unsigned, unsigned>
433GCNScheduleDAGMILive::getRealRegPressure() const {
434 unsigned SGPRs, MaxSGPRs, VGPRs, MaxVGPRs;
435 SGPRs = MaxSGPRs = LiveInPressure.first;
436 VGPRs = MaxVGPRs = LiveInPressure.second;
437
438 const SIRegisterInfo *SRI = static_cast<const SIRegisterInfo*>(TRI);
439 DenseMap<unsigned, LaneBitmask> LiveRegs(LiveIns);
440
441 for (const MachineInstr &MI : *this) {
442 if (MI.isDebugValue())
443 continue;
444 SlotIndex SI = LIS->getInstructionIndex(MI).getBaseIndex();
445 assert (SI.isValid());
446
447 // Remove dead registers or mask bits.
448 for (auto &It : LiveRegs) {
449 if (It.second.none())
450 continue;
451 const LiveInterval &LI = LIS->getInterval(It.first);
452 if (LI.hasSubRanges()) {
453 for (const auto &S : LI.subranges())
454 if (!S.liveAt(SI))
455 setMask(MRI, SRI, It.first, It.second, It.second & ~S.LaneMask,
456 SGPRs, VGPRs);
457 } else if (!LI.liveAt(SI)) {
458 setMask(MRI, SRI, It.first, It.second, LaneBitmask::getNone(),
459 SGPRs, VGPRs);
460 }
461 }
462
463 // Add new registers or mask bits.
464 for (const auto &MO : MI.defs()) {
465 if (!MO.isReg())
466 continue;
467 unsigned Reg = MO.getReg();
468 if (!TargetRegisterInfo::isVirtualRegister(Reg))
469 continue;
470 unsigned SubRegIdx = MO.getSubReg();
471 LaneBitmask LaneMask = SubRegIdx != 0
472 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
473 : MRI.getMaxLaneMaskForVReg(Reg);
474 LaneBitmask &LM = LiveRegs[Reg];
475 setMask(MRI, SRI, Reg, LM, LM | LaneMask, SGPRs, VGPRs);
476 }
477 MaxSGPRs = std::max(MaxSGPRs, SGPRs);
478 MaxVGPRs = std::max(MaxVGPRs, VGPRs);
479 }
480
481 DEBUG(dbgs() << "Real region's register pressure:\nSGPR = " << MaxSGPRs
482 << "\nVGPR = " << MaxVGPRs << '\n');
483
484 return std::make_pair(MaxSGPRs, MaxVGPRs);
485}
486
487void GCNScheduleDAGMILive::finalizeSchedule() {
488 LiveIns.shrink_and_clear();
489}