blob: 8dfd89aa759d33709dd8976e654de36828198277 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implements the AMDGPU specific subclass of TargetSubtarget.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUSubtarget.h"
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000016#include "SIMachineFunctionInfo.h"
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000017#include "llvm/ADT/SmallString.h"
Tom Stellard83f0bce2015-01-29 16:55:25 +000018#include "llvm/CodeGen/MachineScheduler.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000019#include "llvm/Target/TargetFrameLowering.h"
20#include <algorithm>
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000021
Tom Stellard75aadc22012-12-11 21:25:42 +000022using namespace llvm;
23
Chandler Carruthe96dd892014-04-21 22:55:11 +000024#define DEBUG_TYPE "amdgpu-subtarget"
25
Tom Stellard75aadc22012-12-11 21:25:42 +000026#define GET_SUBTARGETINFO_ENUM
27#define GET_SUBTARGETINFO_TARGET_DESC
28#define GET_SUBTARGETINFO_CTOR
29#include "AMDGPUGenSubtargetInfo.inc"
30
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000031AMDGPUSubtarget::~AMDGPUSubtarget() = default;
Matt Arsenault43e92fe2016-06-24 06:30:11 +000032
Eric Christopherac4b69e2014-07-25 22:22:39 +000033AMDGPUSubtarget &
Daniel Sandersa73f1fd2015-06-10 12:11:26 +000034AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
35 StringRef GPU, StringRef FS) {
Eric Christopherac4b69e2014-07-25 22:22:39 +000036 // Determine default and user-specified characteristics
Matt Arsenaultf171cf22014-07-14 23:40:49 +000037 // On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
38 // enabled, but some instructions do not respect them and they run at the
39 // double precision rate, so don't enable by default.
40 //
41 // We want to be able to turn these off, but making this a subtarget feature
42 // for SI has the unhelpful behavior that it unsets everything else if you
43 // disable it.
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000044
Matt Arsenaulta6867fd2017-01-23 22:31:03 +000045 SmallString<256> FullFS("+promote-alloca,+fp64-fp16-denormals,+load-store-opt,");
Changpeng Fangb41574a2015-12-22 20:55:23 +000046 if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA.
Matt Arsenault7f681ac2016-07-01 23:03:44 +000047 FullFS += "+flat-for-global,+unaligned-buffer-access,";
Matt Arsenaulta6867fd2017-01-23 22:31:03 +000048
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000049 FullFS += FS;
50
51 ParseSubtargetFeatures(GPU, FullFS);
Tom Stellard2e59a452014-06-13 01:32:00 +000052
Matt Arsenaultd8f7ea32017-01-27 17:42:26 +000053 // Unless +-flat-for-global is specified, turn on FlatForGlobal for all OS-es
54 // on VI and newer hardware to avoid assertion failures due to missing ADDR64
55 // variants of MUBUF instructions.
56 if (!hasAddr64() && !FS.contains("flat-for-global")) {
57 FlatForGlobal = true;
58 }
59
Eric Christopherac4b69e2014-07-25 22:22:39 +000060 // FIXME: I don't think think Evergreen has any useful support for
61 // denormals, but should be checked. Should we issue a warning somewhere
62 // if someone tries to enable these?
Tom Stellard2e59a452014-06-13 01:32:00 +000063 if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Matt Arsenaulta6867fd2017-01-23 22:31:03 +000064 FP64FP16Denormals = false;
Matt Arsenaultf171cf22014-07-14 23:40:49 +000065 FP32Denormals = false;
Eric Christopherac4b69e2014-07-25 22:22:39 +000066 }
Matt Arsenault24ee0782016-02-12 02:40:47 +000067
68 // Set defaults if needed.
69 if (MaxPrivateElementSize == 0)
Matt Arsenaulte8ed8e52016-05-11 00:28:54 +000070 MaxPrivateElementSize = 4;
Matt Arsenault24ee0782016-02-12 02:40:47 +000071
Eric Christopherac4b69e2014-07-25 22:22:39 +000072 return *this;
73}
74
Daniel Sandersa73f1fd2015-06-10 12:11:26 +000075AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
Matt Arsenault43e92fe2016-06-24 06:30:11 +000076 const TargetMachine &TM)
77 : AMDGPUGenSubtargetInfo(TT, GPU, FS),
78 TargetTriple(TT),
79 Gen(TT.getArch() == Triple::amdgcn ? SOUTHERN_ISLANDS : R600),
80 IsaVersion(ISAVersion0_0_0),
81 WavefrontSize(64),
82 LocalMemorySize(0),
83 LDSBankCount(0),
84 MaxPrivateElementSize(0),
Tom Stellard40ce8af2015-01-28 16:04:26 +000085
Matt Arsenault43e92fe2016-06-24 06:30:11 +000086 FastFMAF32(false),
87 HalfRate64Ops(false),
88
89 FP32Denormals(false),
Matt Arsenaulta6867fd2017-01-23 22:31:03 +000090 FP64FP16Denormals(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +000091 FPExceptions(false),
92 FlatForGlobal(false),
Tom Stellard64a9d082016-10-14 18:10:39 +000093 UnalignedScratchAccess(false),
Matt Arsenault7f681ac2016-07-01 23:03:44 +000094 UnalignedBufferAccess(false),
95
Matt Arsenault43e92fe2016-06-24 06:30:11 +000096 EnableXNACK(false),
97 DebuggerInsertNops(false),
98 DebuggerReserveRegs(false),
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +000099 DebuggerEmitPrologue(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000100
101 EnableVGPRSpilling(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000102 EnablePromoteAlloca(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000103 EnableLoadStoreOpt(false),
104 EnableUnsafeDSOffsetFolding(false),
105 EnableSIScheduler(false),
106 DumpCode(false),
107
108 FP64(false),
109 IsGCN(false),
110 GCN1Encoding(false),
111 GCN3Encoding(false),
112 CIInsts(false),
113 SGPRInitBug(false),
114 HasSMemRealTime(false),
115 Has16BitInsts(false),
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000116 HasMovrel(false),
117 HasVGPRIndexMode(false),
Matt Arsenaultc88ba362016-10-29 04:05:06 +0000118 HasScalarStores(false),
Benjamin Kramer11590b82017-01-20 10:37:53 +0000119 HasInv2PiInlineImm(false),
Sam Kolton07dbde22017-01-20 10:01:25 +0000120 HasSDWA(false),
121 HasDPP(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000122 FlatAddressSpace(false),
123
124 R600ALUInst(false),
125 CaymanISA(false),
126 CFALUBug(false),
127 HasVertexCache(false),
128 TexVTXClauseSize(0),
Alexander Timofeev18009562016-12-08 17:28:47 +0000129 ScalarizeGlobal(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000130
131 FeatureDisable(false),
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000132 InstrItins(getInstrItineraryForCPU(GPU)) {
Tom Stellard40ce8af2015-01-28 16:04:26 +0000133 initializeSubtargetDependencies(TT, GPU, FS);
Tom Stellarda40f9712014-01-22 21:55:43 +0000134}
Tom Stellardb8fd6ef2014-12-02 22:00:07 +0000135
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000136unsigned AMDGPUSubtarget::getMaxLocalMemSizeWithWaveCount(unsigned NWaves,
137 const Function &F) const {
138 if (NWaves == 1)
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000139 return getLocalMemorySize();
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000140 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second;
141 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize);
142 unsigned MaxWaves = getMaxWavesPerEU();
143 return getLocalMemorySize() * MaxWaves / WorkGroupsPerCu / NWaves;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000144}
145
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000146unsigned AMDGPUSubtarget::getOccupancyWithLocalMemSize(uint32_t Bytes,
147 const Function &F) const {
148 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second;
149 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize);
150 unsigned MaxWaves = getMaxWavesPerEU();
151 unsigned Limit = getLocalMemorySize() * MaxWaves / WorkGroupsPerCu;
152 unsigned NumWaves = Limit / (Bytes ? Bytes : 1u);
153 NumWaves = std::min(NumWaves, MaxWaves);
154 NumWaves = std::max(NumWaves, 1u);
155 return NumWaves;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000156}
157
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000158std::pair<unsigned, unsigned> AMDGPUSubtarget::getFlatWorkGroupSizes(
159 const Function &F) const {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000160 // Default minimum/maximum flat work group sizes.
161 std::pair<unsigned, unsigned> Default =
162 AMDGPU::isCompute(F.getCallingConv()) ?
163 std::pair<unsigned, unsigned>(getWavefrontSize() * 2,
164 getWavefrontSize() * 4) :
165 std::pair<unsigned, unsigned>(1, getWavefrontSize());
166
167 // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa
168 // starts using "amdgpu-flat-work-group-size" attribute.
169 Default.second = AMDGPU::getIntegerAttribute(
170 F, "amdgpu-max-work-group-size", Default.second);
171 Default.first = std::min(Default.first, Default.second);
172
173 // Requested minimum/maximum flat work group sizes.
174 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute(
175 F, "amdgpu-flat-work-group-size", Default);
176
177 // Make sure requested minimum is less than requested maximum.
178 if (Requested.first > Requested.second)
179 return Default;
180
181 // Make sure requested values do not violate subtarget's specifications.
182 if (Requested.first < getMinFlatWorkGroupSize())
183 return Default;
184 if (Requested.second > getMaxFlatWorkGroupSize())
185 return Default;
186
187 return Requested;
188}
189
190std::pair<unsigned, unsigned> AMDGPUSubtarget::getWavesPerEU(
191 const Function &F) const {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000192 // Default minimum/maximum number of waves per execution unit.
193 std::pair<unsigned, unsigned> Default(1, 0);
194
195 // Default/requested minimum/maximum flat work group sizes.
196 std::pair<unsigned, unsigned> FlatWorkGroupSizes = getFlatWorkGroupSizes(F);
197
198 // If minimum/maximum flat work group sizes were explicitly requested using
199 // "amdgpu-flat-work-group-size" attribute, then set default minimum/maximum
200 // number of waves per execution unit to values implied by requested
201 // minimum/maximum flat work group sizes.
202 unsigned MinImpliedByFlatWorkGroupSize =
203 getMaxWavesPerEU(FlatWorkGroupSizes.second);
204 bool RequestedFlatWorkGroupSize = false;
205
206 // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa
207 // starts using "amdgpu-flat-work-group-size" attribute.
208 if (F.hasFnAttribute("amdgpu-max-work-group-size") ||
209 F.hasFnAttribute("amdgpu-flat-work-group-size")) {
210 Default.first = MinImpliedByFlatWorkGroupSize;
211 RequestedFlatWorkGroupSize = true;
212 }
213
214 // Requested minimum/maximum number of waves per execution unit.
215 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute(
216 F, "amdgpu-waves-per-eu", Default, true);
217
218 // Make sure requested minimum is less than requested maximum.
219 if (Requested.second && Requested.first > Requested.second)
220 return Default;
221
222 // Make sure requested values do not violate subtarget's specifications.
223 if (Requested.first < getMinWavesPerEU() ||
224 Requested.first > getMaxWavesPerEU())
225 return Default;
226 if (Requested.second > getMaxWavesPerEU())
227 return Default;
228
229 // Make sure requested values are compatible with values implied by requested
230 // minimum/maximum flat work group sizes.
231 if (RequestedFlatWorkGroupSize &&
232 Requested.first > MinImpliedByFlatWorkGroupSize)
233 return Default;
234
235 return Requested;
236}
237
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000238R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS,
239 const TargetMachine &TM) :
240 AMDGPUSubtarget(TT, GPU, FS, TM),
241 InstrInfo(*this),
242 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
243 TLInfo(TM, *this) {}
244
245SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS,
246 const TargetMachine &TM) :
247 AMDGPUSubtarget(TT, GPU, FS, TM),
248 InstrInfo(*this),
249 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000250 TLInfo(TM, *this) {}
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000251
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000252void SISubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
Matt Arsenault55dff272016-06-28 00:11:26 +0000253 unsigned NumRegionInstrs) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000254 // Track register pressure so the scheduler can try to decrease
255 // pressure once register usage is above the threshold defined by
256 // SIRegisterInfo::getRegPressureSetLimit()
257 Policy.ShouldTrackPressure = true;
Tom Stellard83f0bce2015-01-29 16:55:25 +0000258
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000259 // Enabling both top down and bottom up scheduling seems to give us less
260 // register spills than just using one of these approaches on its own.
261 Policy.OnlyTopDown = false;
262 Policy.OnlyBottomUp = false;
Tom Stellard83f0bce2015-01-29 16:55:25 +0000263
Alexander Timofeeva3dace32017-02-07 17:57:48 +0000264 Policy.ShouldTrackLaneMasks = enableSubRegLiveness();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000265}
Tom Stellard0bc954e2016-03-30 16:35:09 +0000266
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000267bool SISubtarget::isVGPRSpillingEnabled(const Function& F) const {
268 return EnableVGPRSpilling || !AMDGPU::isShader(F.getCallingConv());
269}
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000270
Tom Stellard2f3f9852017-01-25 01:25:13 +0000271unsigned SISubtarget::getKernArgSegmentSize(const MachineFunction &MF,
Konstantin Zhuravlyov27d64c32017-02-08 13:29:23 +0000272 unsigned ExplicitArgBytes) const {
Tom Stellard2f3f9852017-01-25 01:25:13 +0000273 unsigned ImplicitBytes = getImplicitArgNumBytes(MF);
Tom Stellarde88bbc32016-09-23 01:33:26 +0000274 if (ImplicitBytes == 0)
275 return ExplicitArgBytes;
276
277 unsigned Alignment = getAlignmentForImplicitArgPtr();
278 return alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes;
279}
280
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000281unsigned SISubtarget::getOccupancyWithNumSGPRs(unsigned SGPRs) const {
282 if (getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
283 if (SGPRs <= 80)
284 return 10;
285 if (SGPRs <= 88)
286 return 9;
287 if (SGPRs <= 100)
288 return 8;
289 return 7;
290 }
291 if (SGPRs <= 48)
292 return 10;
293 if (SGPRs <= 56)
294 return 9;
295 if (SGPRs <= 64)
296 return 8;
297 if (SGPRs <= 72)
298 return 7;
299 if (SGPRs <= 80)
300 return 6;
301 return 5;
302}
303
304unsigned SISubtarget::getOccupancyWithNumVGPRs(unsigned VGPRs) const {
305 if (VGPRs <= 24)
306 return 10;
307 if (VGPRs <= 28)
308 return 9;
309 if (VGPRs <= 32)
310 return 8;
311 if (VGPRs <= 36)
312 return 7;
313 if (VGPRs <= 40)
314 return 6;
315 if (VGPRs <= 48)
316 return 5;
317 if (VGPRs <= 64)
318 return 4;
319 if (VGPRs <= 84)
320 return 3;
321 if (VGPRs <= 128)
322 return 2;
323 return 1;
324}
Matt Arsenault4eae3012016-10-28 20:31:47 +0000325
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000326unsigned SISubtarget::getReservedNumSGPRs(const MachineFunction &MF) const {
327 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
328 if (MFI.hasFlatScratchInit()) {
329 if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
330 return 6; // FLAT_SCRATCH, XNACK, VCC (in that order).
331 if (getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)
332 return 4; // FLAT_SCRATCH, VCC (in that order).
333 }
334
335 if (isXNACKEnabled())
336 return 4; // XNACK, VCC (in that order).
337 return 2; // VCC.
338}
339
340unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const {
341 const Function &F = *MF.getFunction();
342 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
343
344 // Compute maximum number of SGPRs function can use using default/requested
345 // minimum number of waves per execution unit.
346 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
347 unsigned MaxNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, false);
348 unsigned MaxAddressableNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, true);
349
350 // Check if maximum number of SGPRs was explicitly requested using
351 // "amdgpu-num-sgpr" attribute.
352 if (F.hasFnAttribute("amdgpu-num-sgpr")) {
353 unsigned Requested = AMDGPU::getIntegerAttribute(
354 F, "amdgpu-num-sgpr", MaxNumSGPRs);
355
356 // Make sure requested value does not violate subtarget's specifications.
357 if (Requested && (Requested <= getReservedNumSGPRs(MF)))
358 Requested = 0;
359
360 // If more SGPRs are required to support the input user/system SGPRs,
361 // increase to accommodate them.
362 //
363 // FIXME: This really ends up using the requested number of SGPRs + number
364 // of reserved special registers in total. Theoretically you could re-use
365 // the last input registers for these special registers, but this would
366 // require a lot of complexity to deal with the weird aliasing.
367 unsigned InputNumSGPRs = MFI.getNumPreloadedSGPRs();
368 if (Requested && Requested < InputNumSGPRs)
369 Requested = InputNumSGPRs;
370
371 // Make sure requested value is compatible with values implied by
372 // default/requested minimum/maximum number of waves per execution unit.
373 if (Requested && Requested > getMaxNumSGPRs(WavesPerEU.first, false))
374 Requested = 0;
375 if (WavesPerEU.second &&
376 Requested && Requested < getMinNumSGPRs(WavesPerEU.second))
377 Requested = 0;
378
379 if (Requested)
380 MaxNumSGPRs = Requested;
381 }
382
Matt Arsenault4eae3012016-10-28 20:31:47 +0000383 if (hasSGPRInitBug())
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000384 MaxNumSGPRs = AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
Matt Arsenault4eae3012016-10-28 20:31:47 +0000385
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000386 return std::min(MaxNumSGPRs - getReservedNumSGPRs(MF),
387 MaxAddressableNumSGPRs);
388}
Matt Arsenault4eae3012016-10-28 20:31:47 +0000389
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000390unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
391 const Function &F = *MF.getFunction();
392 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
393
394 // Compute maximum number of VGPRs function can use using default/requested
395 // minimum number of waves per execution unit.
396 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
397 unsigned MaxNumVGPRs = getMaxNumVGPRs(WavesPerEU.first);
398
399 // Check if maximum number of VGPRs was explicitly requested using
400 // "amdgpu-num-vgpr" attribute.
401 if (F.hasFnAttribute("amdgpu-num-vgpr")) {
402 unsigned Requested = AMDGPU::getIntegerAttribute(
403 F, "amdgpu-num-vgpr", MaxNumVGPRs);
404
405 // Make sure requested value does not violate subtarget's specifications.
406 if (Requested && Requested <= getReservedNumVGPRs(MF))
407 Requested = 0;
408
409 // Make sure requested value is compatible with values implied by
410 // default/requested minimum/maximum number of waves per execution unit.
411 if (Requested && Requested > getMaxNumVGPRs(WavesPerEU.first))
412 Requested = 0;
413 if (WavesPerEU.second &&
414 Requested && Requested < getMinNumVGPRs(WavesPerEU.second))
415 Requested = 0;
416
417 if (Requested)
418 MaxNumVGPRs = Requested;
419 }
420
421 return MaxNumVGPRs - getReservedNumVGPRs(MF);
Matt Arsenault4eae3012016-10-28 20:31:47 +0000422}