blob: 8bd4f3c977f5a50a8142537c239a779afc92b38a [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===//
2//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard75aadc22012-12-11 21:25:42 +00006//
7//===----------------------------------------------------------------------===//
8//
9/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000010/// Implements the AMDGPU specific subclass of TargetSubtarget.
Tom Stellard75aadc22012-12-11 21:25:42 +000011//
12//===----------------------------------------------------------------------===//
13
14#include "AMDGPUSubtarget.h"
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +000015#include "AMDGPU.h"
16#include "AMDGPUTargetMachine.h"
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +000017#include "AMDGPUCallLowering.h"
18#include "AMDGPUInstructionSelector.h"
19#include "AMDGPULegalizerInfo.h"
20#include "AMDGPURegisterBankInfo.h"
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000021#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000022#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000023#include "llvm/ADT/SmallString.h"
Tom Stellard83f0bce2015-01-29 16:55:25 +000024#include "llvm/CodeGen/MachineScheduler.h"
Tom Stellardc5a154d2018-06-28 23:47:12 +000025#include "llvm/MC/MCSubtargetInfo.h"
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +000026#include "llvm/IR/MDBuilder.h"
David Blaikie1be62f02017-11-03 22:32:11 +000027#include "llvm/CodeGen/TargetFrameLowering.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000028#include <algorithm>
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000029
Tom Stellard75aadc22012-12-11 21:25:42 +000030using namespace llvm;
31
Chandler Carruthe96dd892014-04-21 22:55:11 +000032#define DEBUG_TYPE "amdgpu-subtarget"
33
Tom Stellard75aadc22012-12-11 21:25:42 +000034#define GET_SUBTARGETINFO_TARGET_DESC
35#define GET_SUBTARGETINFO_CTOR
Tom Stellard5bfbae52018-07-11 20:59:01 +000036#define AMDGPUSubtarget GCNSubtarget
Tom Stellard75aadc22012-12-11 21:25:42 +000037#include "AMDGPUGenSubtargetInfo.inc"
Tom Stellardc5a154d2018-06-28 23:47:12 +000038#define GET_SUBTARGETINFO_TARGET_DESC
39#define GET_SUBTARGETINFO_CTOR
Tom Stellard5bfbae52018-07-11 20:59:01 +000040#undef AMDGPUSubtarget
Tom Stellardc5a154d2018-06-28 23:47:12 +000041#include "R600GenSubtargetInfo.inc"
Tom Stellard75aadc22012-12-11 21:25:42 +000042
Tom Stellard5bfbae52018-07-11 20:59:01 +000043GCNSubtarget::~GCNSubtarget() = default;
Matt Arsenault43e92fe2016-06-24 06:30:11 +000044
Tom Stellardc5a154d2018-06-28 23:47:12 +000045R600Subtarget &
46R600Subtarget::initializeSubtargetDependencies(const Triple &TT,
47 StringRef GPU, StringRef FS) {
48 SmallString<256> FullFS("+promote-alloca,+dx10-clamp,");
49 FullFS += FS;
50 ParseSubtargetFeatures(GPU, FullFS);
51
52 // FIXME: I don't think think Evergreen has any useful support for
53 // denormals, but should be checked. Should we issue a warning somewhere
54 // if someone tries to enable these?
Tom Stellard5bfbae52018-07-11 20:59:01 +000055 if (getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS) {
Tom Stellardc5a154d2018-06-28 23:47:12 +000056 FP32Denormals = false;
57 }
58
59 HasMulU24 = getGeneration() >= EVERGREEN;
60 HasMulI24 = hasCaymanISA();
61
62 return *this;
63}
64
Tom Stellard5bfbae52018-07-11 20:59:01 +000065GCNSubtarget &
66GCNSubtarget::initializeSubtargetDependencies(const Triple &TT,
Daniel Sandersa73f1fd2015-06-10 12:11:26 +000067 StringRef GPU, StringRef FS) {
Eric Christopherac4b69e2014-07-25 22:22:39 +000068 // Determine default and user-specified characteristics
Matt Arsenaultf171cf22014-07-14 23:40:49 +000069 // On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
70 // enabled, but some instructions do not respect them and they run at the
71 // double precision rate, so don't enable by default.
72 //
73 // We want to be able to turn these off, but making this a subtarget feature
74 // for SI has the unhelpful behavior that it unsets everything else if you
75 // disable it.
David Stuttardf77079f2019-01-14 11:55:24 +000076 //
77 // Similarly we want enable-prt-strict-null to be on by default and not to
78 // unset everything else if it is disabled
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000079
Jan Veselyd1c9b612017-12-04 22:57:29 +000080 SmallString<256> FullFS("+promote-alloca,+dx10-clamp,+load-store-opt,");
81
Changpeng Fangb41574a2015-12-22 20:55:23 +000082 if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA.
Matt Arsenault8728c5f2017-08-07 14:58:04 +000083 FullFS += "+flat-address-space,+flat-for-global,+unaligned-buffer-access,+trap-handler,";
Matt Arsenaulta6867fd2017-01-23 22:31:03 +000084
Jan Veselyd1c9b612017-12-04 22:57:29 +000085 // FIXME: I don't think think Evergreen has any useful support for
86 // denormals, but should be checked. Should we issue a warning somewhere
87 // if someone tries to enable these?
88 if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
89 FullFS += "+fp64-fp16-denormals,";
90 } else {
91 FullFS += "-fp32-denormals,";
92 }
93
David Stuttardf77079f2019-01-14 11:55:24 +000094 FullFS += "+enable-prt-strict-null,"; // This is overridden by a disable in FS
95
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000096 FullFS += FS;
97
98 ParseSubtargetFeatures(GPU, FullFS);
Tom Stellard2e59a452014-06-13 01:32:00 +000099
Jan Veselyd1c9b612017-12-04 22:57:29 +0000100 // We don't support FP64 for EG/NI atm.
101 assert(!hasFP64() || (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS));
102
Matt Arsenaultd8f7ea32017-01-27 17:42:26 +0000103 // Unless +-flat-for-global is specified, turn on FlatForGlobal for all OS-es
104 // on VI and newer hardware to avoid assertion failures due to missing ADDR64
105 // variants of MUBUF instructions.
106 if (!hasAddr64() && !FS.contains("flat-for-global")) {
107 FlatForGlobal = true;
108 }
109
Matt Arsenault24ee0782016-02-12 02:40:47 +0000110 // Set defaults if needed.
111 if (MaxPrivateElementSize == 0)
Matt Arsenaulte8ed8e52016-05-11 00:28:54 +0000112 MaxPrivateElementSize = 4;
Matt Arsenault24ee0782016-02-12 02:40:47 +0000113
Matt Arsenault8728c5f2017-08-07 14:58:04 +0000114 if (LDSBankCount == 0)
115 LDSBankCount = 32;
116
117 if (TT.getArch() == Triple::amdgcn) {
118 if (LocalMemorySize == 0)
119 LocalMemorySize = 32768;
120
121 // Do something sensible for unspecified target.
122 if (!HasMovrel && !HasVGPRIndexMode)
123 HasMovrel = true;
124 }
125
Tom Stellardc5a154d2018-06-28 23:47:12 +0000126 HasFminFmaxLegacy = getGeneration() < AMDGPUSubtarget::VOLCANIC_ISLANDS;
127
Eric Christopherac4b69e2014-07-25 22:22:39 +0000128 return *this;
129}
130
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000131AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT) :
Tom Stellardc5a154d2018-06-28 23:47:12 +0000132 TargetTriple(TT),
Tom Stellardc5a154d2018-06-28 23:47:12 +0000133 Has16BitInsts(false),
134 HasMadMixInsts(false),
135 FP32Denormals(false),
136 FPExceptions(false),
137 HasSDWA(false),
138 HasVOP3PInsts(false),
139 HasMulI24(true),
140 HasMulU24(true),
Matt Arsenault6c7ba822018-08-15 21:03:55 +0000141 HasInv2PiInlineImm(false),
Tom Stellardc5a154d2018-06-28 23:47:12 +0000142 HasFminFmaxLegacy(true),
143 EnablePromoteAlloca(false),
David Stuttard20de3e92018-09-14 10:27:19 +0000144 HasTrigReducedRange(false),
Tom Stellardc5a154d2018-06-28 23:47:12 +0000145 LocalMemorySize(0),
146 WavefrontSize(0)
147 { }
148
Tom Stellard5bfbae52018-07-11 20:59:01 +0000149GCNSubtarget::GCNSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000150 const GCNTargetMachine &TM) :
Tom Stellardc5a154d2018-06-28 23:47:12 +0000151 AMDGPUGenSubtargetInfo(TT, GPU, FS),
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000152 AMDGPUSubtarget(TT),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000153 TargetTriple(TT),
Tom Stellardc5a154d2018-06-28 23:47:12 +0000154 Gen(SOUTHERN_ISLANDS),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000155 IsaVersion(ISAVersion0_0_0),
Stanislav Mekhanoshin06d3b412018-09-17 16:04:32 +0000156 InstrItins(getInstrItineraryForCPU(GPU)),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000157 LDSBankCount(0),
158 MaxPrivateElementSize(0),
Tom Stellard40ce8af2015-01-28 16:04:26 +0000159
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000160 FastFMAF32(false),
161 HalfRate64Ops(false),
162
Matt Arsenaulta6867fd2017-01-23 22:31:03 +0000163 FP64FP16Denormals(false),
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000164 DX10Clamp(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000165 FlatForGlobal(false),
Konstantin Zhuravlyovbe6c0ca2017-06-02 17:40:26 +0000166 AutoWaitcntBeforeBarrier(false),
Konstantin Zhuravlyoveda425e2017-10-14 15:59:07 +0000167 CodeObjectV3(false),
Tom Stellard64a9d082016-10-14 18:10:39 +0000168 UnalignedScratchAccess(false),
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000169 UnalignedBufferAccess(false),
170
Matt Arsenaulte823d922017-02-18 18:29:53 +0000171 HasApertureRegs(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000172 EnableXNACK(false),
Wei Ding205bfdb2017-02-10 02:15:29 +0000173 TrapHandler(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000174 DebuggerInsertNops(false),
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000175 DebuggerEmitPrologue(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000176
Matt Arsenault45b98182017-11-15 00:45:43 +0000177 EnableHugePrivateBuffer(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000178 EnableLoadStoreOpt(false),
179 EnableUnsafeDSOffsetFolding(false),
180 EnableSIScheduler(false),
Marek Olsaka9a58fa2018-04-10 22:48:23 +0000181 EnableDS128(false),
David Stuttardf77079f2019-01-14 11:55:24 +0000182 EnablePRTStrictNull(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000183 DumpCode(false),
184
185 FP64(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000186 GCN3Encoding(false),
187 CIInsts(false),
Matt Arsenault96b67842018-08-07 07:28:46 +0000188 VIInsts(false),
Matt Arsenault2021f082017-02-18 19:12:26 +0000189 GFX9Insts(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000190 SGPRInitBug(false),
191 HasSMemRealTime(false),
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000192 HasIntClamp(false),
Matt Arsenault0084adc2018-04-30 19:08:16 +0000193 HasFmaMixInsts(false),
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000194 HasMovrel(false),
195 HasVGPRIndexMode(false),
Matt Arsenaultc88ba362016-10-29 04:05:06 +0000196 HasScalarStores(false),
Dmitry Preobrazhensky6bad04e2018-04-02 16:10:25 +0000197 HasScalarAtomics(false),
Sam Kolton3c4933f2017-06-22 06:26:41 +0000198 HasSDWAOmod(false),
199 HasSDWAScalar(false),
200 HasSDWASdst(false),
201 HasSDWAMac(false),
Sam Koltona179d252017-06-27 15:02:23 +0000202 HasSDWAOutModsVOPC(false),
Sam Kolton07dbde22017-01-20 10:01:25 +0000203 HasDPP(false),
Ryan Taylor1f334d02018-08-28 15:07:30 +0000204 HasR128A16(false),
Matt Arsenault0084adc2018-04-30 19:08:16 +0000205 HasDLInsts(false),
Stanislav Mekhanoshind3757d32019-01-10 03:25:20 +0000206 HasDotInsts(false),
Konstantin Zhuravlyov108927b2018-11-05 22:44:19 +0000207 EnableSRAMECC(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000208 FlatAddressSpace(false),
Matt Arsenaultacdc7652017-05-10 21:19:05 +0000209 FlatInstOffsets(false),
210 FlatGlobalInsts(false),
211 FlatScratchInsts(false),
Matt Arsenaultc37fe662017-07-20 17:42:47 +0000212 AddNoCarryInsts(false),
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000213 HasUnpackedD16VMem(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000214
Alexander Timofeev18009562016-12-08 17:28:47 +0000215 ScalarizeGlobal(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000216
Tom Stellard5bfbae52018-07-11 20:59:01 +0000217 FeatureDisable(false),
Tom Stellard752ddbd2018-07-11 22:15:15 +0000218 InstrInfo(initializeSubtargetDependencies(TT, GPU, FS)),
Matt Arsenault4bec7d42018-07-20 09:05:08 +0000219 TLInfo(TM, *this),
Tom Stellard5bfbae52018-07-11 20:59:01 +0000220 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0) {
Tom Stellard5bfbae52018-07-11 20:59:01 +0000221 CallLoweringInfo.reset(new AMDGPUCallLowering(*getTargetLowering()));
222 Legalizer.reset(new AMDGPULegalizerInfo(*this, TM));
223 RegBankInfo.reset(new AMDGPURegisterBankInfo(*getRegisterInfo()));
224 InstSelector.reset(new AMDGPUInstructionSelector(
225 *this, *static_cast<AMDGPURegisterBankInfo *>(RegBankInfo.get()), TM));
Tom Stellarda40f9712014-01-22 21:55:43 +0000226}
Tom Stellardb8fd6ef2014-12-02 22:00:07 +0000227
Tom Stellard5bfbae52018-07-11 20:59:01 +0000228unsigned AMDGPUSubtarget::getMaxLocalMemSizeWithWaveCount(unsigned NWaves,
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000229 const Function &F) const {
230 if (NWaves == 1)
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000231 return getLocalMemorySize();
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000232 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second;
233 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize);
234 unsigned MaxWaves = getMaxWavesPerEU();
235 return getLocalMemorySize() * MaxWaves / WorkGroupsPerCu / NWaves;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000236}
237
Tom Stellard5bfbae52018-07-11 20:59:01 +0000238unsigned AMDGPUSubtarget::getOccupancyWithLocalMemSize(uint32_t Bytes,
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000239 const Function &F) const {
240 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second;
241 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize);
242 unsigned MaxWaves = getMaxWavesPerEU();
243 unsigned Limit = getLocalMemorySize() * MaxWaves / WorkGroupsPerCu;
244 unsigned NumWaves = Limit / (Bytes ? Bytes : 1u);
245 NumWaves = std::min(NumWaves, MaxWaves);
246 NumWaves = std::max(NumWaves, 1u);
247 return NumWaves;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000248}
249
Tom Stellard44b30b42018-05-22 02:03:23 +0000250unsigned
Tom Stellard5bfbae52018-07-11 20:59:01 +0000251AMDGPUSubtarget::getOccupancyWithLocalMemSize(const MachineFunction &MF) const {
Tom Stellard44b30b42018-05-22 02:03:23 +0000252 const auto *MFI = MF.getInfo<SIMachineFunctionInfo>();
253 return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction());
254}
255
Matt Arsenaultb7918022017-10-23 17:09:35 +0000256std::pair<unsigned, unsigned>
Tom Stellard5bfbae52018-07-11 20:59:01 +0000257AMDGPUSubtarget::getDefaultFlatWorkGroupSize(CallingConv::ID CC) const {
Matt Arsenaultb7918022017-10-23 17:09:35 +0000258 switch (CC) {
259 case CallingConv::AMDGPU_CS:
260 case CallingConv::AMDGPU_KERNEL:
261 case CallingConv::SPIR_KERNEL:
262 return std::make_pair(getWavefrontSize() * 2, getWavefrontSize() * 4);
263 case CallingConv::AMDGPU_VS:
264 case CallingConv::AMDGPU_LS:
265 case CallingConv::AMDGPU_HS:
266 case CallingConv::AMDGPU_ES:
267 case CallingConv::AMDGPU_GS:
268 case CallingConv::AMDGPU_PS:
269 return std::make_pair(1, getWavefrontSize());
270 default:
271 return std::make_pair(1, 16 * getWavefrontSize());
272 }
273}
274
Tom Stellard5bfbae52018-07-11 20:59:01 +0000275std::pair<unsigned, unsigned> AMDGPUSubtarget::getFlatWorkGroupSizes(
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000276 const Function &F) const {
Matt Arsenaultb7918022017-10-23 17:09:35 +0000277 // FIXME: 1024 if function.
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000278 // Default minimum/maximum flat work group sizes.
279 std::pair<unsigned, unsigned> Default =
Matt Arsenaultb7918022017-10-23 17:09:35 +0000280 getDefaultFlatWorkGroupSize(F.getCallingConv());
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000281
282 // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa
283 // starts using "amdgpu-flat-work-group-size" attribute.
284 Default.second = AMDGPU::getIntegerAttribute(
285 F, "amdgpu-max-work-group-size", Default.second);
286 Default.first = std::min(Default.first, Default.second);
287
288 // Requested minimum/maximum flat work group sizes.
289 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute(
290 F, "amdgpu-flat-work-group-size", Default);
291
292 // Make sure requested minimum is less than requested maximum.
293 if (Requested.first > Requested.second)
294 return Default;
295
296 // Make sure requested values do not violate subtarget's specifications.
297 if (Requested.first < getMinFlatWorkGroupSize())
298 return Default;
299 if (Requested.second > getMaxFlatWorkGroupSize())
300 return Default;
301
302 return Requested;
303}
304
Tom Stellard5bfbae52018-07-11 20:59:01 +0000305std::pair<unsigned, unsigned> AMDGPUSubtarget::getWavesPerEU(
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000306 const Function &F) const {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000307 // Default minimum/maximum number of waves per execution unit.
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000308 std::pair<unsigned, unsigned> Default(1, getMaxWavesPerEU());
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000309
310 // Default/requested minimum/maximum flat work group sizes.
311 std::pair<unsigned, unsigned> FlatWorkGroupSizes = getFlatWorkGroupSizes(F);
312
313 // If minimum/maximum flat work group sizes were explicitly requested using
314 // "amdgpu-flat-work-group-size" attribute, then set default minimum/maximum
315 // number of waves per execution unit to values implied by requested
316 // minimum/maximum flat work group sizes.
317 unsigned MinImpliedByFlatWorkGroupSize =
318 getMaxWavesPerEU(FlatWorkGroupSizes.second);
319 bool RequestedFlatWorkGroupSize = false;
320
321 // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa
322 // starts using "amdgpu-flat-work-group-size" attribute.
323 if (F.hasFnAttribute("amdgpu-max-work-group-size") ||
324 F.hasFnAttribute("amdgpu-flat-work-group-size")) {
325 Default.first = MinImpliedByFlatWorkGroupSize;
326 RequestedFlatWorkGroupSize = true;
327 }
328
329 // Requested minimum/maximum number of waves per execution unit.
330 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute(
331 F, "amdgpu-waves-per-eu", Default, true);
332
333 // Make sure requested minimum is less than requested maximum.
334 if (Requested.second && Requested.first > Requested.second)
335 return Default;
336
337 // Make sure requested values do not violate subtarget's specifications.
338 if (Requested.first < getMinWavesPerEU() ||
339 Requested.first > getMaxWavesPerEU())
340 return Default;
341 if (Requested.second > getMaxWavesPerEU())
342 return Default;
343
344 // Make sure requested values are compatible with values implied by requested
345 // minimum/maximum flat work group sizes.
346 if (RequestedFlatWorkGroupSize &&
Konstantin Zhuravlyov2ec725c2017-07-16 19:38:47 +0000347 Requested.first < MinImpliedByFlatWorkGroupSize)
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000348 return Default;
349
350 return Requested;
351}
352
Tom Stellard5bfbae52018-07-11 20:59:01 +0000353bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000354 Function *Kernel = I->getParent()->getParent();
355 unsigned MinSize = 0;
356 unsigned MaxSize = getFlatWorkGroupSizes(*Kernel).second;
357 bool IdQuery = false;
358
359 // If reqd_work_group_size is present it narrows value down.
360 if (auto *CI = dyn_cast<CallInst>(I)) {
361 const Function *F = CI->getCalledFunction();
362 if (F) {
363 unsigned Dim = UINT_MAX;
364 switch (F->getIntrinsicID()) {
365 case Intrinsic::amdgcn_workitem_id_x:
366 case Intrinsic::r600_read_tidig_x:
367 IdQuery = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000368 LLVM_FALLTHROUGH;
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000369 case Intrinsic::r600_read_local_size_x:
370 Dim = 0;
371 break;
372 case Intrinsic::amdgcn_workitem_id_y:
373 case Intrinsic::r600_read_tidig_y:
374 IdQuery = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000375 LLVM_FALLTHROUGH;
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000376 case Intrinsic::r600_read_local_size_y:
377 Dim = 1;
378 break;
379 case Intrinsic::amdgcn_workitem_id_z:
380 case Intrinsic::r600_read_tidig_z:
381 IdQuery = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000382 LLVM_FALLTHROUGH;
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000383 case Intrinsic::r600_read_local_size_z:
384 Dim = 2;
385 break;
386 default:
387 break;
388 }
389 if (Dim <= 3) {
390 if (auto Node = Kernel->getMetadata("reqd_work_group_size"))
391 if (Node->getNumOperands() == 3)
392 MinSize = MaxSize = mdconst::extract<ConstantInt>(
393 Node->getOperand(Dim))->getZExtValue();
394 }
395 }
396 }
397
398 if (!MaxSize)
399 return false;
400
401 // Range metadata is [Lo, Hi). For ID query we need to pass max size
402 // as Hi. For size query we need to pass Hi + 1.
403 if (IdQuery)
404 MinSize = 0;
405 else
406 ++MaxSize;
407
408 MDBuilder MDB(I->getContext());
409 MDNode *MaxWorkGroupSizeRange = MDB.createRange(APInt(32, MinSize),
410 APInt(32, MaxSize));
411 I->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
412 return true;
413}
414
Matt Arsenault4bec7d42018-07-20 09:05:08 +0000415uint64_t AMDGPUSubtarget::getExplicitKernArgSize(const Function &F,
416 unsigned &MaxAlign) const {
417 assert(F.getCallingConv() == CallingConv::AMDGPU_KERNEL ||
418 F.getCallingConv() == CallingConv::SPIR_KERNEL);
419
420 const DataLayout &DL = F.getParent()->getDataLayout();
421 uint64_t ExplicitArgBytes = 0;
422 MaxAlign = 1;
423
424 for (const Argument &Arg : F.args()) {
425 Type *ArgTy = Arg.getType();
426
427 unsigned Align = DL.getABITypeAlignment(ArgTy);
428 uint64_t AllocSize = DL.getTypeAllocSize(ArgTy);
429 ExplicitArgBytes = alignTo(ExplicitArgBytes, Align) + AllocSize;
430 MaxAlign = std::max(MaxAlign, Align);
431 }
432
433 return ExplicitArgBytes;
434}
435
436unsigned AMDGPUSubtarget::getKernArgSegmentSize(const Function &F,
437 unsigned &MaxAlign) const {
438 uint64_t ExplicitArgBytes = getExplicitKernArgSize(F, MaxAlign);
439
440 unsigned ExplicitOffset = getExplicitKernelArgOffset(F);
441
442 uint64_t TotalSize = ExplicitOffset + ExplicitArgBytes;
443 unsigned ImplicitBytes = getImplicitArgNumBytes(F);
444 if (ImplicitBytes != 0) {
445 unsigned Alignment = getAlignmentForImplicitArgPtr();
446 TotalSize = alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes;
447 }
448
449 // Being able to dereference past the end is useful for emitting scalar loads.
450 return alignTo(TotalSize, 4);
451}
452
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000453R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS,
454 const TargetMachine &TM) :
Tom Stellardc5a154d2018-06-28 23:47:12 +0000455 R600GenSubtargetInfo(TT, GPU, FS),
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000456 AMDGPUSubtarget(TT),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000457 InstrInfo(*this),
458 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
Tom Stellardc5a154d2018-06-28 23:47:12 +0000459 FMA(false),
460 CaymanISA(false),
461 CFALUBug(false),
462 DX10Clamp(false),
463 HasVertexCache(false),
464 R600ALUInst(false),
465 FP64(false),
466 TexVTXClauseSize(0),
467 Gen(R600),
468 TLInfo(TM, initializeSubtargetDependencies(TT, GPU, FS)),
Matt Arsenault0da63502018-08-31 05:49:54 +0000469 InstrItins(getInstrItineraryForCPU(GPU)) { }
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000470
Tom Stellard5bfbae52018-07-11 20:59:01 +0000471void GCNSubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
Matt Arsenault55dff272016-06-28 00:11:26 +0000472 unsigned NumRegionInstrs) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000473 // Track register pressure so the scheduler can try to decrease
474 // pressure once register usage is above the threshold defined by
475 // SIRegisterInfo::getRegPressureSetLimit()
476 Policy.ShouldTrackPressure = true;
Tom Stellard83f0bce2015-01-29 16:55:25 +0000477
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000478 // Enabling both top down and bottom up scheduling seems to give us less
479 // register spills than just using one of these approaches on its own.
480 Policy.OnlyTopDown = false;
481 Policy.OnlyBottomUp = false;
Tom Stellard83f0bce2015-01-29 16:55:25 +0000482
Alexander Timofeev9f61fea2017-02-14 14:29:05 +0000483 // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler.
484 if (!enableSIScheduler())
485 Policy.ShouldTrackLaneMasks = true;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000486}
Tom Stellard0bc954e2016-03-30 16:35:09 +0000487
Tom Stellard5bfbae52018-07-11 20:59:01 +0000488unsigned GCNSubtarget::getOccupancyWithNumSGPRs(unsigned SGPRs) const {
489 if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS) {
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000490 if (SGPRs <= 80)
491 return 10;
492 if (SGPRs <= 88)
493 return 9;
494 if (SGPRs <= 100)
495 return 8;
496 return 7;
497 }
498 if (SGPRs <= 48)
499 return 10;
500 if (SGPRs <= 56)
501 return 9;
502 if (SGPRs <= 64)
503 return 8;
504 if (SGPRs <= 72)
505 return 7;
506 if (SGPRs <= 80)
507 return 6;
508 return 5;
509}
510
Tom Stellard5bfbae52018-07-11 20:59:01 +0000511unsigned GCNSubtarget::getOccupancyWithNumVGPRs(unsigned VGPRs) const {
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000512 if (VGPRs <= 24)
513 return 10;
514 if (VGPRs <= 28)
515 return 9;
516 if (VGPRs <= 32)
517 return 8;
518 if (VGPRs <= 36)
519 return 7;
520 if (VGPRs <= 40)
521 return 6;
522 if (VGPRs <= 48)
523 return 5;
524 if (VGPRs <= 64)
525 return 4;
526 if (VGPRs <= 84)
527 return 3;
528 if (VGPRs <= 128)
529 return 2;
530 return 1;
531}
Matt Arsenault4eae3012016-10-28 20:31:47 +0000532
Tom Stellard5bfbae52018-07-11 20:59:01 +0000533unsigned GCNSubtarget::getReservedNumSGPRs(const MachineFunction &MF) const {
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000534 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
535 if (MFI.hasFlatScratchInit()) {
536 if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
537 return 6; // FLAT_SCRATCH, XNACK, VCC (in that order).
538 if (getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)
539 return 4; // FLAT_SCRATCH, VCC (in that order).
540 }
541
542 if (isXNACKEnabled())
543 return 4; // XNACK, VCC (in that order).
544 return 2; // VCC.
545}
546
Tom Stellard5bfbae52018-07-11 20:59:01 +0000547unsigned GCNSubtarget::getMaxNumSGPRs(const MachineFunction &MF) const {
Matthias Braunf1caa282017-12-15 22:22:58 +0000548 const Function &F = MF.getFunction();
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000549 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
550
551 // Compute maximum number of SGPRs function can use using default/requested
552 // minimum number of waves per execution unit.
553 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
554 unsigned MaxNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, false);
555 unsigned MaxAddressableNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, true);
556
557 // Check if maximum number of SGPRs was explicitly requested using
558 // "amdgpu-num-sgpr" attribute.
559 if (F.hasFnAttribute("amdgpu-num-sgpr")) {
560 unsigned Requested = AMDGPU::getIntegerAttribute(
561 F, "amdgpu-num-sgpr", MaxNumSGPRs);
562
563 // Make sure requested value does not violate subtarget's specifications.
564 if (Requested && (Requested <= getReservedNumSGPRs(MF)))
565 Requested = 0;
566
567 // If more SGPRs are required to support the input user/system SGPRs,
568 // increase to accommodate them.
569 //
570 // FIXME: This really ends up using the requested number of SGPRs + number
571 // of reserved special registers in total. Theoretically you could re-use
572 // the last input registers for these special registers, but this would
573 // require a lot of complexity to deal with the weird aliasing.
574 unsigned InputNumSGPRs = MFI.getNumPreloadedSGPRs();
575 if (Requested && Requested < InputNumSGPRs)
576 Requested = InputNumSGPRs;
577
578 // Make sure requested value is compatible with values implied by
579 // default/requested minimum/maximum number of waves per execution unit.
580 if (Requested && Requested > getMaxNumSGPRs(WavesPerEU.first, false))
581 Requested = 0;
582 if (WavesPerEU.second &&
583 Requested && Requested < getMinNumSGPRs(WavesPerEU.second))
584 Requested = 0;
585
586 if (Requested)
587 MaxNumSGPRs = Requested;
588 }
589
Matt Arsenault4eae3012016-10-28 20:31:47 +0000590 if (hasSGPRInitBug())
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000591 MaxNumSGPRs = AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
Matt Arsenault4eae3012016-10-28 20:31:47 +0000592
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000593 return std::min(MaxNumSGPRs - getReservedNumSGPRs(MF),
594 MaxAddressableNumSGPRs);
595}
Matt Arsenault4eae3012016-10-28 20:31:47 +0000596
Tom Stellard5bfbae52018-07-11 20:59:01 +0000597unsigned GCNSubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
Matthias Braunf1caa282017-12-15 22:22:58 +0000598 const Function &F = MF.getFunction();
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000599 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
600
601 // Compute maximum number of VGPRs function can use using default/requested
602 // minimum number of waves per execution unit.
603 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
604 unsigned MaxNumVGPRs = getMaxNumVGPRs(WavesPerEU.first);
605
606 // Check if maximum number of VGPRs was explicitly requested using
607 // "amdgpu-num-vgpr" attribute.
608 if (F.hasFnAttribute("amdgpu-num-vgpr")) {
609 unsigned Requested = AMDGPU::getIntegerAttribute(
610 F, "amdgpu-num-vgpr", MaxNumVGPRs);
611
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000612 // Make sure requested value is compatible with values implied by
613 // default/requested minimum/maximum number of waves per execution unit.
614 if (Requested && Requested > getMaxNumVGPRs(WavesPerEU.first))
615 Requested = 0;
616 if (WavesPerEU.second &&
617 Requested && Requested < getMinNumVGPRs(WavesPerEU.second))
618 Requested = 0;
619
620 if (Requested)
621 MaxNumVGPRs = Requested;
622 }
623
Konstantin Zhuravlyove004b3d2018-06-21 20:28:19 +0000624 return MaxNumVGPRs;
Matt Arsenault4eae3012016-10-28 20:31:47 +0000625}
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000626
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000627namespace {
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000628struct MemOpClusterMutation : ScheduleDAGMutation {
629 const SIInstrInfo *TII;
630
631 MemOpClusterMutation(const SIInstrInfo *tii) : TII(tii) {}
632
633 void apply(ScheduleDAGInstrs *DAGInstrs) override {
634 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
635
636 SUnit *SUa = nullptr;
637 // Search for two consequent memory operations and link them
638 // to prevent scheduler from moving them apart.
639 // In DAG pre-process SUnits are in the original order of
640 // the instructions before scheduling.
641 for (SUnit &SU : DAG->SUnits) {
642 MachineInstr &MI2 = *SU.getInstr();
643 if (!MI2.mayLoad() && !MI2.mayStore()) {
644 SUa = nullptr;
645 continue;
646 }
647 if (!SUa) {
648 SUa = &SU;
649 continue;
650 }
651
652 MachineInstr &MI1 = *SUa->getInstr();
653 if ((TII->isVMEM(MI1) && TII->isVMEM(MI2)) ||
654 (TII->isFLAT(MI1) && TII->isFLAT(MI2)) ||
655 (TII->isSMRD(MI1) && TII->isSMRD(MI2)) ||
656 (TII->isDS(MI1) && TII->isDS(MI2))) {
657 SU.addPredBarrier(SUa);
658
659 for (const SDep &SI : SU.Preds) {
660 if (SI.getSUnit() != SUa)
661 SUa->addPred(SDep(SI.getSUnit(), SDep::Artificial));
662 }
663
664 if (&SU != &DAG->ExitSU) {
665 for (const SDep &SI : SUa->Succs) {
666 if (SI.getSUnit() != &SU)
667 SI.getSUnit()->addPred(SDep(&SU, SDep::Artificial));
668 }
669 }
670 }
671
672 SUa = &SU;
673 }
674 }
675};
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000676} // namespace
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000677
Tom Stellard5bfbae52018-07-11 20:59:01 +0000678void GCNSubtarget::getPostRAMutations(
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000679 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
680 Mutations.push_back(llvm::make_unique<MemOpClusterMutation>(&InstrInfo));
681}
Tom Stellardc5a154d2018-06-28 23:47:12 +0000682
Tom Stellard5bfbae52018-07-11 20:59:01 +0000683const AMDGPUSubtarget &AMDGPUSubtarget::get(const MachineFunction &MF) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000684 if (MF.getTarget().getTargetTriple().getArch() == Triple::amdgcn)
Tom Stellard5bfbae52018-07-11 20:59:01 +0000685 return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<GCNSubtarget>());
Tom Stellardc5a154d2018-06-28 23:47:12 +0000686 else
Tom Stellard5bfbae52018-07-11 20:59:01 +0000687 return static_cast<const AMDGPUSubtarget&>(MF.getSubtarget<R600Subtarget>());
Tom Stellardc5a154d2018-06-28 23:47:12 +0000688}
689
Tom Stellard5bfbae52018-07-11 20:59:01 +0000690const AMDGPUSubtarget &AMDGPUSubtarget::get(const TargetMachine &TM, const Function &F) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000691 if (TM.getTargetTriple().getArch() == Triple::amdgcn)
Tom Stellard5bfbae52018-07-11 20:59:01 +0000692 return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<GCNSubtarget>(F));
Tom Stellardc5a154d2018-06-28 23:47:12 +0000693 else
Tom Stellard5bfbae52018-07-11 20:59:01 +0000694 return static_cast<const AMDGPUSubtarget&>(TM.getSubtarget<R600Subtarget>(F));
Tom Stellardc5a154d2018-06-28 23:47:12 +0000695}