blob: 1d5a883fa8f18db2e994e71f0454aae238e32077 [file] [log] [blame]
Tom Stellard75aadc22012-12-11 21:25:42 +00001//===-- AMDGPUSubtarget.cpp - AMDGPU Subtarget Information ----------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000011/// Implements the AMDGPU specific subclass of TargetSubtarget.
Tom Stellard75aadc22012-12-11 21:25:42 +000012//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUSubtarget.h"
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +000016#include "AMDGPU.h"
17#include "AMDGPUTargetMachine.h"
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +000018#include "AMDGPUCallLowering.h"
19#include "AMDGPUInstructionSelector.h"
20#include "AMDGPULegalizerInfo.h"
21#include "AMDGPURegisterBankInfo.h"
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +000022#include "SIMachineFunctionInfo.h"
Tom Stellard44b30b42018-05-22 02:03:23 +000023#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000024#include "llvm/ADT/SmallString.h"
Tom Stellard83f0bce2015-01-29 16:55:25 +000025#include "llvm/CodeGen/MachineScheduler.h"
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +000026#include "llvm/IR/MDBuilder.h"
David Blaikie1be62f02017-11-03 22:32:11 +000027#include "llvm/CodeGen/TargetFrameLowering.h"
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000028#include <algorithm>
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000029
Tom Stellard75aadc22012-12-11 21:25:42 +000030using namespace llvm;
31
Chandler Carruthe96dd892014-04-21 22:55:11 +000032#define DEBUG_TYPE "amdgpu-subtarget"
33
Tom Stellard75aadc22012-12-11 21:25:42 +000034#define GET_SUBTARGETINFO_TARGET_DESC
35#define GET_SUBTARGETINFO_CTOR
36#include "AMDGPUGenSubtargetInfo.inc"
37
Eugene Zelenko6a9226d2016-12-12 22:23:53 +000038AMDGPUSubtarget::~AMDGPUSubtarget() = default;
Matt Arsenault43e92fe2016-06-24 06:30:11 +000039
Eric Christopherac4b69e2014-07-25 22:22:39 +000040AMDGPUSubtarget &
Daniel Sandersa73f1fd2015-06-10 12:11:26 +000041AMDGPUSubtarget::initializeSubtargetDependencies(const Triple &TT,
42 StringRef GPU, StringRef FS) {
Eric Christopherac4b69e2014-07-25 22:22:39 +000043 // Determine default and user-specified characteristics
Matt Arsenaultf171cf22014-07-14 23:40:49 +000044 // On SI+, we want FP64 denormals to be on by default. FP32 denormals can be
45 // enabled, but some instructions do not respect them and they run at the
46 // double precision rate, so don't enable by default.
47 //
48 // We want to be able to turn these off, but making this a subtarget feature
49 // for SI has the unhelpful behavior that it unsets everything else if you
50 // disable it.
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000051
Jan Veselyd1c9b612017-12-04 22:57:29 +000052 SmallString<256> FullFS("+promote-alloca,+dx10-clamp,+load-store-opt,");
53
Changpeng Fangb41574a2015-12-22 20:55:23 +000054 if (isAmdHsaOS()) // Turn on FlatForGlobal for HSA.
Matt Arsenault8728c5f2017-08-07 14:58:04 +000055 FullFS += "+flat-address-space,+flat-for-global,+unaligned-buffer-access,+trap-handler,";
Matt Arsenaulta6867fd2017-01-23 22:31:03 +000056
Jan Veselyd1c9b612017-12-04 22:57:29 +000057 // FIXME: I don't think think Evergreen has any useful support for
58 // denormals, but should be checked. Should we issue a warning somewhere
59 // if someone tries to enable these?
60 if (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
61 FullFS += "+fp64-fp16-denormals,";
62 } else {
63 FullFS += "-fp32-denormals,";
64 }
65
Matt Arsenaultd9a23ab2014-07-13 02:08:26 +000066 FullFS += FS;
67
68 ParseSubtargetFeatures(GPU, FullFS);
Tom Stellard2e59a452014-06-13 01:32:00 +000069
Jan Veselyd1c9b612017-12-04 22:57:29 +000070 // We don't support FP64 for EG/NI atm.
71 assert(!hasFP64() || (getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS));
72
Matt Arsenaultd8f7ea32017-01-27 17:42:26 +000073 // Unless +-flat-for-global is specified, turn on FlatForGlobal for all OS-es
74 // on VI and newer hardware to avoid assertion failures due to missing ADDR64
75 // variants of MUBUF instructions.
76 if (!hasAddr64() && !FS.contains("flat-for-global")) {
77 FlatForGlobal = true;
78 }
79
Matt Arsenault24ee0782016-02-12 02:40:47 +000080 // Set defaults if needed.
81 if (MaxPrivateElementSize == 0)
Matt Arsenaulte8ed8e52016-05-11 00:28:54 +000082 MaxPrivateElementSize = 4;
Matt Arsenault24ee0782016-02-12 02:40:47 +000083
Matt Arsenault8728c5f2017-08-07 14:58:04 +000084 if (LDSBankCount == 0)
85 LDSBankCount = 32;
86
87 if (TT.getArch() == Triple::amdgcn) {
88 if (LocalMemorySize == 0)
89 LocalMemorySize = 32768;
90
91 // Do something sensible for unspecified target.
92 if (!HasMovrel && !HasVGPRIndexMode)
93 HasMovrel = true;
94 }
95
Eric Christopherac4b69e2014-07-25 22:22:39 +000096 return *this;
97}
98
Daniel Sandersa73f1fd2015-06-10 12:11:26 +000099AMDGPUSubtarget::AMDGPUSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000100 const TargetMachine &TM)
101 : AMDGPUGenSubtargetInfo(TT, GPU, FS),
102 TargetTriple(TT),
103 Gen(TT.getArch() == Triple::amdgcn ? SOUTHERN_ISLANDS : R600),
104 IsaVersion(ISAVersion0_0_0),
Konstantin Zhuravlyov339e7442017-10-23 23:02:39 +0000105 WavefrontSize(0),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000106 LocalMemorySize(0),
107 LDSBankCount(0),
108 MaxPrivateElementSize(0),
Tom Stellard40ce8af2015-01-28 16:04:26 +0000109
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000110 FastFMAF32(false),
111 HalfRate64Ops(false),
112
113 FP32Denormals(false),
Matt Arsenaulta6867fd2017-01-23 22:31:03 +0000114 FP64FP16Denormals(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000115 FPExceptions(false),
Matt Arsenault2fdf2a12017-02-21 23:35:48 +0000116 DX10Clamp(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000117 FlatForGlobal(false),
Konstantin Zhuravlyovbe6c0ca2017-06-02 17:40:26 +0000118 AutoWaitcntBeforeBarrier(false),
Konstantin Zhuravlyoveda425e2017-10-14 15:59:07 +0000119 CodeObjectV3(false),
Tom Stellard64a9d082016-10-14 18:10:39 +0000120 UnalignedScratchAccess(false),
Matt Arsenault7f681ac2016-07-01 23:03:44 +0000121 UnalignedBufferAccess(false),
122
Matt Arsenaulte823d922017-02-18 18:29:53 +0000123 HasApertureRegs(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000124 EnableXNACK(false),
Wei Ding205bfdb2017-02-10 02:15:29 +0000125 TrapHandler(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000126 DebuggerInsertNops(false),
127 DebuggerReserveRegs(false),
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000128 DebuggerEmitPrologue(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000129
Matt Arsenault45b98182017-11-15 00:45:43 +0000130 EnableHugePrivateBuffer(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000131 EnableVGPRSpilling(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000132 EnablePromoteAlloca(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000133 EnableLoadStoreOpt(false),
134 EnableUnsafeDSOffsetFolding(false),
135 EnableSIScheduler(false),
Marek Olsaka9a58fa2018-04-10 22:48:23 +0000136 EnableDS128(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000137 DumpCode(false),
138
139 FP64(false),
Matt Arsenaulte42b08d2017-12-05 03:15:44 +0000140 FMA(false),
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000141 MIMG_R128(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000142 IsGCN(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000143 GCN3Encoding(false),
144 CIInsts(false),
Matt Arsenault2021f082017-02-18 19:12:26 +0000145 GFX9Insts(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000146 SGPRInitBug(false),
147 HasSMemRealTime(false),
148 Has16BitInsts(false),
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000149 HasIntClamp(false),
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000150 HasVOP3PInsts(false),
Matt Arsenault28f52e52017-10-25 07:00:51 +0000151 HasMadMixInsts(false),
Matt Arsenault0084adc2018-04-30 19:08:16 +0000152 HasFmaMixInsts(false),
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000153 HasMovrel(false),
154 HasVGPRIndexMode(false),
Matt Arsenaultc88ba362016-10-29 04:05:06 +0000155 HasScalarStores(false),
Dmitry Preobrazhensky6bad04e2018-04-02 16:10:25 +0000156 HasScalarAtomics(false),
Benjamin Kramer11590b82017-01-20 10:37:53 +0000157 HasInv2PiInlineImm(false),
Sam Kolton07dbde22017-01-20 10:01:25 +0000158 HasSDWA(false),
Sam Kolton3c4933f2017-06-22 06:26:41 +0000159 HasSDWAOmod(false),
160 HasSDWAScalar(false),
161 HasSDWASdst(false),
162 HasSDWAMac(false),
Sam Koltona179d252017-06-27 15:02:23 +0000163 HasSDWAOutModsVOPC(false),
Sam Kolton07dbde22017-01-20 10:01:25 +0000164 HasDPP(false),
Matt Arsenault0084adc2018-04-30 19:08:16 +0000165 HasDLInsts(false),
Konstantin Zhuravlyovc2c2eb72018-05-04 20:06:57 +0000166 D16PreservesUnusedBits(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000167 FlatAddressSpace(false),
Matt Arsenaultacdc7652017-05-10 21:19:05 +0000168 FlatInstOffsets(false),
169 FlatGlobalInsts(false),
170 FlatScratchInsts(false),
Matt Arsenaultc37fe662017-07-20 17:42:47 +0000171 AddNoCarryInsts(false),
Changpeng Fang44dfa1d2018-01-12 21:12:19 +0000172 HasUnpackedD16VMem(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000173
174 R600ALUInst(false),
175 CaymanISA(false),
176 CFALUBug(false),
177 HasVertexCache(false),
178 TexVTXClauseSize(0),
Alexander Timofeev18009562016-12-08 17:28:47 +0000179 ScalarizeGlobal(false),
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000180
181 FeatureDisable(false),
Eugene Zelenko6a9226d2016-12-12 22:23:53 +0000182 InstrItins(getInstrItineraryForCPU(GPU)) {
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000183 AS = AMDGPU::getAMDGPUAS(TT);
Tom Stellard40ce8af2015-01-28 16:04:26 +0000184 initializeSubtargetDependencies(TT, GPU, FS);
Tom Stellarda40f9712014-01-22 21:55:43 +0000185}
Tom Stellardb8fd6ef2014-12-02 22:00:07 +0000186
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000187unsigned AMDGPUSubtarget::getMaxLocalMemSizeWithWaveCount(unsigned NWaves,
188 const Function &F) const {
189 if (NWaves == 1)
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000190 return getLocalMemorySize();
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000191 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second;
192 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize);
193 unsigned MaxWaves = getMaxWavesPerEU();
194 return getLocalMemorySize() * MaxWaves / WorkGroupsPerCu / NWaves;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000195}
196
Stanislav Mekhanoshin2b913b12017-02-01 22:59:50 +0000197unsigned AMDGPUSubtarget::getOccupancyWithLocalMemSize(uint32_t Bytes,
198 const Function &F) const {
199 unsigned WorkGroupSize = getFlatWorkGroupSizes(F).second;
200 unsigned WorkGroupsPerCu = getMaxWorkGroupsPerCU(WorkGroupSize);
201 unsigned MaxWaves = getMaxWavesPerEU();
202 unsigned Limit = getLocalMemorySize() * MaxWaves / WorkGroupsPerCu;
203 unsigned NumWaves = Limit / (Bytes ? Bytes : 1u);
204 NumWaves = std::min(NumWaves, MaxWaves);
205 NumWaves = std::max(NumWaves, 1u);
206 return NumWaves;
Matt Arsenault8a028bf2016-05-16 21:19:59 +0000207}
208
Tom Stellard44b30b42018-05-22 02:03:23 +0000209unsigned
210AMDGPUSubtarget::getOccupancyWithLocalMemSize(const MachineFunction &MF) const {
211 const auto *MFI = MF.getInfo<SIMachineFunctionInfo>();
212 return getOccupancyWithLocalMemSize(MFI->getLDSSize(), MF.getFunction());
213}
214
Matt Arsenaultb7918022017-10-23 17:09:35 +0000215std::pair<unsigned, unsigned>
216AMDGPUSubtarget::getDefaultFlatWorkGroupSize(CallingConv::ID CC) const {
217 switch (CC) {
218 case CallingConv::AMDGPU_CS:
219 case CallingConv::AMDGPU_KERNEL:
220 case CallingConv::SPIR_KERNEL:
221 return std::make_pair(getWavefrontSize() * 2, getWavefrontSize() * 4);
222 case CallingConv::AMDGPU_VS:
223 case CallingConv::AMDGPU_LS:
224 case CallingConv::AMDGPU_HS:
225 case CallingConv::AMDGPU_ES:
226 case CallingConv::AMDGPU_GS:
227 case CallingConv::AMDGPU_PS:
228 return std::make_pair(1, getWavefrontSize());
229 default:
230 return std::make_pair(1, 16 * getWavefrontSize());
231 }
232}
233
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000234std::pair<unsigned, unsigned> AMDGPUSubtarget::getFlatWorkGroupSizes(
235 const Function &F) const {
Matt Arsenaultb7918022017-10-23 17:09:35 +0000236 // FIXME: 1024 if function.
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000237 // Default minimum/maximum flat work group sizes.
238 std::pair<unsigned, unsigned> Default =
Matt Arsenaultb7918022017-10-23 17:09:35 +0000239 getDefaultFlatWorkGroupSize(F.getCallingConv());
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000240
241 // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa
242 // starts using "amdgpu-flat-work-group-size" attribute.
243 Default.second = AMDGPU::getIntegerAttribute(
244 F, "amdgpu-max-work-group-size", Default.second);
245 Default.first = std::min(Default.first, Default.second);
246
247 // Requested minimum/maximum flat work group sizes.
248 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute(
249 F, "amdgpu-flat-work-group-size", Default);
250
251 // Make sure requested minimum is less than requested maximum.
252 if (Requested.first > Requested.second)
253 return Default;
254
255 // Make sure requested values do not violate subtarget's specifications.
256 if (Requested.first < getMinFlatWorkGroupSize())
257 return Default;
258 if (Requested.second > getMaxFlatWorkGroupSize())
259 return Default;
260
261 return Requested;
262}
263
264std::pair<unsigned, unsigned> AMDGPUSubtarget::getWavesPerEU(
265 const Function &F) const {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000266 // Default minimum/maximum number of waves per execution unit.
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000267 std::pair<unsigned, unsigned> Default(1, getMaxWavesPerEU());
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000268
269 // Default/requested minimum/maximum flat work group sizes.
270 std::pair<unsigned, unsigned> FlatWorkGroupSizes = getFlatWorkGroupSizes(F);
271
272 // If minimum/maximum flat work group sizes were explicitly requested using
273 // "amdgpu-flat-work-group-size" attribute, then set default minimum/maximum
274 // number of waves per execution unit to values implied by requested
275 // minimum/maximum flat work group sizes.
276 unsigned MinImpliedByFlatWorkGroupSize =
277 getMaxWavesPerEU(FlatWorkGroupSizes.second);
278 bool RequestedFlatWorkGroupSize = false;
279
280 // TODO: Do not process "amdgpu-max-work-group-size" attribute once mesa
281 // starts using "amdgpu-flat-work-group-size" attribute.
282 if (F.hasFnAttribute("amdgpu-max-work-group-size") ||
283 F.hasFnAttribute("amdgpu-flat-work-group-size")) {
284 Default.first = MinImpliedByFlatWorkGroupSize;
285 RequestedFlatWorkGroupSize = true;
286 }
287
288 // Requested minimum/maximum number of waves per execution unit.
289 std::pair<unsigned, unsigned> Requested = AMDGPU::getIntegerPairAttribute(
290 F, "amdgpu-waves-per-eu", Default, true);
291
292 // Make sure requested minimum is less than requested maximum.
293 if (Requested.second && Requested.first > Requested.second)
294 return Default;
295
296 // Make sure requested values do not violate subtarget's specifications.
297 if (Requested.first < getMinWavesPerEU() ||
298 Requested.first > getMaxWavesPerEU())
299 return Default;
300 if (Requested.second > getMaxWavesPerEU())
301 return Default;
302
303 // Make sure requested values are compatible with values implied by requested
304 // minimum/maximum flat work group sizes.
305 if (RequestedFlatWorkGroupSize &&
Konstantin Zhuravlyov2ec725c2017-07-16 19:38:47 +0000306 Requested.first < MinImpliedByFlatWorkGroupSize)
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000307 return Default;
308
309 return Requested;
310}
311
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000312bool AMDGPUSubtarget::makeLIDRangeMetadata(Instruction *I) const {
313 Function *Kernel = I->getParent()->getParent();
314 unsigned MinSize = 0;
315 unsigned MaxSize = getFlatWorkGroupSizes(*Kernel).second;
316 bool IdQuery = false;
317
318 // If reqd_work_group_size is present it narrows value down.
319 if (auto *CI = dyn_cast<CallInst>(I)) {
320 const Function *F = CI->getCalledFunction();
321 if (F) {
322 unsigned Dim = UINT_MAX;
323 switch (F->getIntrinsicID()) {
324 case Intrinsic::amdgcn_workitem_id_x:
325 case Intrinsic::r600_read_tidig_x:
326 IdQuery = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000327 LLVM_FALLTHROUGH;
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000328 case Intrinsic::r600_read_local_size_x:
329 Dim = 0;
330 break;
331 case Intrinsic::amdgcn_workitem_id_y:
332 case Intrinsic::r600_read_tidig_y:
333 IdQuery = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000334 LLVM_FALLTHROUGH;
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000335 case Intrinsic::r600_read_local_size_y:
336 Dim = 1;
337 break;
338 case Intrinsic::amdgcn_workitem_id_z:
339 case Intrinsic::r600_read_tidig_z:
340 IdQuery = true;
Simon Pilgrim0f5b3502017-07-07 10:18:57 +0000341 LLVM_FALLTHROUGH;
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000342 case Intrinsic::r600_read_local_size_z:
343 Dim = 2;
344 break;
345 default:
346 break;
347 }
348 if (Dim <= 3) {
349 if (auto Node = Kernel->getMetadata("reqd_work_group_size"))
350 if (Node->getNumOperands() == 3)
351 MinSize = MaxSize = mdconst::extract<ConstantInt>(
352 Node->getOperand(Dim))->getZExtValue();
353 }
354 }
355 }
356
357 if (!MaxSize)
358 return false;
359
360 // Range metadata is [Lo, Hi). For ID query we need to pass max size
361 // as Hi. For size query we need to pass Hi + 1.
362 if (IdQuery)
363 MinSize = 0;
364 else
365 ++MaxSize;
366
367 MDBuilder MDB(I->getContext());
368 MDNode *MaxWorkGroupSizeRange = MDB.createRange(APInt(32, MinSize),
369 APInt(32, MaxSize));
370 I->setMetadata(LLVMContext::MD_range, MaxWorkGroupSizeRange);
371 return true;
372}
373
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000374R600Subtarget::R600Subtarget(const Triple &TT, StringRef GPU, StringRef FS,
375 const TargetMachine &TM) :
376 AMDGPUSubtarget(TT, GPU, FS, TM),
377 InstrInfo(*this),
378 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
379 TLInfo(TM, *this) {}
380
381SISubtarget::SISubtarget(const Triple &TT, StringRef GPU, StringRef FS,
Matt Arsenaultc3fe46b2018-03-08 16:24:16 +0000382 const GCNTargetMachine &TM)
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +0000383 : AMDGPUSubtarget(TT, GPU, FS, TM), InstrInfo(*this),
384 FrameLowering(TargetFrameLowering::StackGrowsUp, getStackAlignment(), 0),
385 TLInfo(TM, *this) {
Quentin Colombet61d71a12017-08-15 22:31:51 +0000386 CallLoweringInfo.reset(new AMDGPUCallLowering(*getTargetLowering()));
Matt Arsenaultc3fe46b2018-03-08 16:24:16 +0000387 Legalizer.reset(new AMDGPULegalizerInfo(*this, TM));
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +0000388
Quentin Colombet61d71a12017-08-15 22:31:51 +0000389 RegBankInfo.reset(new AMDGPURegisterBankInfo(*getRegisterInfo()));
390 InstSelector.reset(new AMDGPUInstructionSelector(
Tom Stellard1dc90202018-05-10 20:53:06 +0000391 *this, *static_cast<AMDGPURegisterBankInfo *>(RegBankInfo.get()), TM));
Quentin Colombetf3f7d4d2017-07-05 18:40:56 +0000392}
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000393
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000394void SISubtarget::overrideSchedPolicy(MachineSchedPolicy &Policy,
Matt Arsenault55dff272016-06-28 00:11:26 +0000395 unsigned NumRegionInstrs) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000396 // Track register pressure so the scheduler can try to decrease
397 // pressure once register usage is above the threshold defined by
398 // SIRegisterInfo::getRegPressureSetLimit()
399 Policy.ShouldTrackPressure = true;
Tom Stellard83f0bce2015-01-29 16:55:25 +0000400
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000401 // Enabling both top down and bottom up scheduling seems to give us less
402 // register spills than just using one of these approaches on its own.
403 Policy.OnlyTopDown = false;
404 Policy.OnlyBottomUp = false;
Tom Stellard83f0bce2015-01-29 16:55:25 +0000405
Alexander Timofeev9f61fea2017-02-14 14:29:05 +0000406 // Enabling ShouldTrackLaneMasks crashes the SI Machine Scheduler.
407 if (!enableSIScheduler())
408 Policy.ShouldTrackLaneMasks = true;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000409}
Tom Stellard0bc954e2016-03-30 16:35:09 +0000410
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000411bool SISubtarget::isVGPRSpillingEnabled(const Function& F) const {
412 return EnableVGPRSpilling || !AMDGPU::isShader(F.getCallingConv());
413}
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000414
Tom Stellard2f3f9852017-01-25 01:25:13 +0000415unsigned SISubtarget::getKernArgSegmentSize(const MachineFunction &MF,
Konstantin Zhuravlyov27d64c32017-02-08 13:29:23 +0000416 unsigned ExplicitArgBytes) const {
Tom Stellard2f3f9852017-01-25 01:25:13 +0000417 unsigned ImplicitBytes = getImplicitArgNumBytes(MF);
Tom Stellarde88bbc32016-09-23 01:33:26 +0000418 if (ImplicitBytes == 0)
419 return ExplicitArgBytes;
420
421 unsigned Alignment = getAlignmentForImplicitArgPtr();
422 return alignTo(ExplicitArgBytes, Alignment) + ImplicitBytes;
423}
424
Tom Stellard0d23ebe2016-08-29 19:42:52 +0000425unsigned SISubtarget::getOccupancyWithNumSGPRs(unsigned SGPRs) const {
426 if (getGeneration() >= SISubtarget::VOLCANIC_ISLANDS) {
427 if (SGPRs <= 80)
428 return 10;
429 if (SGPRs <= 88)
430 return 9;
431 if (SGPRs <= 100)
432 return 8;
433 return 7;
434 }
435 if (SGPRs <= 48)
436 return 10;
437 if (SGPRs <= 56)
438 return 9;
439 if (SGPRs <= 64)
440 return 8;
441 if (SGPRs <= 72)
442 return 7;
443 if (SGPRs <= 80)
444 return 6;
445 return 5;
446}
447
448unsigned SISubtarget::getOccupancyWithNumVGPRs(unsigned VGPRs) const {
449 if (VGPRs <= 24)
450 return 10;
451 if (VGPRs <= 28)
452 return 9;
453 if (VGPRs <= 32)
454 return 8;
455 if (VGPRs <= 36)
456 return 7;
457 if (VGPRs <= 40)
458 return 6;
459 if (VGPRs <= 48)
460 return 5;
461 if (VGPRs <= 64)
462 return 4;
463 if (VGPRs <= 84)
464 return 3;
465 if (VGPRs <= 128)
466 return 2;
467 return 1;
468}
Matt Arsenault4eae3012016-10-28 20:31:47 +0000469
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000470unsigned SISubtarget::getReservedNumSGPRs(const MachineFunction &MF) const {
471 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
472 if (MFI.hasFlatScratchInit()) {
473 if (getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS)
474 return 6; // FLAT_SCRATCH, XNACK, VCC (in that order).
475 if (getGeneration() == AMDGPUSubtarget::SEA_ISLANDS)
476 return 4; // FLAT_SCRATCH, VCC (in that order).
477 }
478
479 if (isXNACKEnabled())
480 return 4; // XNACK, VCC (in that order).
481 return 2; // VCC.
482}
483
484unsigned SISubtarget::getMaxNumSGPRs(const MachineFunction &MF) const {
Matthias Braunf1caa282017-12-15 22:22:58 +0000485 const Function &F = MF.getFunction();
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000486 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
487
488 // Compute maximum number of SGPRs function can use using default/requested
489 // minimum number of waves per execution unit.
490 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
491 unsigned MaxNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, false);
492 unsigned MaxAddressableNumSGPRs = getMaxNumSGPRs(WavesPerEU.first, true);
493
494 // Check if maximum number of SGPRs was explicitly requested using
495 // "amdgpu-num-sgpr" attribute.
496 if (F.hasFnAttribute("amdgpu-num-sgpr")) {
497 unsigned Requested = AMDGPU::getIntegerAttribute(
498 F, "amdgpu-num-sgpr", MaxNumSGPRs);
499
500 // Make sure requested value does not violate subtarget's specifications.
501 if (Requested && (Requested <= getReservedNumSGPRs(MF)))
502 Requested = 0;
503
504 // If more SGPRs are required to support the input user/system SGPRs,
505 // increase to accommodate them.
506 //
507 // FIXME: This really ends up using the requested number of SGPRs + number
508 // of reserved special registers in total. Theoretically you could re-use
509 // the last input registers for these special registers, but this would
510 // require a lot of complexity to deal with the weird aliasing.
511 unsigned InputNumSGPRs = MFI.getNumPreloadedSGPRs();
512 if (Requested && Requested < InputNumSGPRs)
513 Requested = InputNumSGPRs;
514
515 // Make sure requested value is compatible with values implied by
516 // default/requested minimum/maximum number of waves per execution unit.
517 if (Requested && Requested > getMaxNumSGPRs(WavesPerEU.first, false))
518 Requested = 0;
519 if (WavesPerEU.second &&
520 Requested && Requested < getMinNumSGPRs(WavesPerEU.second))
521 Requested = 0;
522
523 if (Requested)
524 MaxNumSGPRs = Requested;
525 }
526
Matt Arsenault4eae3012016-10-28 20:31:47 +0000527 if (hasSGPRInitBug())
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000528 MaxNumSGPRs = AMDGPU::IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
Matt Arsenault4eae3012016-10-28 20:31:47 +0000529
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000530 return std::min(MaxNumSGPRs - getReservedNumSGPRs(MF),
531 MaxAddressableNumSGPRs);
532}
Matt Arsenault4eae3012016-10-28 20:31:47 +0000533
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000534unsigned SISubtarget::getMaxNumVGPRs(const MachineFunction &MF) const {
Matthias Braunf1caa282017-12-15 22:22:58 +0000535 const Function &F = MF.getFunction();
Konstantin Zhuravlyove03b1d72017-02-08 13:02:33 +0000536 const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
537
538 // Compute maximum number of VGPRs function can use using default/requested
539 // minimum number of waves per execution unit.
540 std::pair<unsigned, unsigned> WavesPerEU = MFI.getWavesPerEU();
541 unsigned MaxNumVGPRs = getMaxNumVGPRs(WavesPerEU.first);
542
543 // Check if maximum number of VGPRs was explicitly requested using
544 // "amdgpu-num-vgpr" attribute.
545 if (F.hasFnAttribute("amdgpu-num-vgpr")) {
546 unsigned Requested = AMDGPU::getIntegerAttribute(
547 F, "amdgpu-num-vgpr", MaxNumVGPRs);
548
549 // Make sure requested value does not violate subtarget's specifications.
550 if (Requested && Requested <= getReservedNumVGPRs(MF))
551 Requested = 0;
552
553 // Make sure requested value is compatible with values implied by
554 // default/requested minimum/maximum number of waves per execution unit.
555 if (Requested && Requested > getMaxNumVGPRs(WavesPerEU.first))
556 Requested = 0;
557 if (WavesPerEU.second &&
558 Requested && Requested < getMinNumVGPRs(WavesPerEU.second))
559 Requested = 0;
560
561 if (Requested)
562 MaxNumVGPRs = Requested;
563 }
564
565 return MaxNumVGPRs - getReservedNumVGPRs(MF);
Matt Arsenault4eae3012016-10-28 20:31:47 +0000566}
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000567
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000568namespace {
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000569struct MemOpClusterMutation : ScheduleDAGMutation {
570 const SIInstrInfo *TII;
571
572 MemOpClusterMutation(const SIInstrInfo *tii) : TII(tii) {}
573
574 void apply(ScheduleDAGInstrs *DAGInstrs) override {
575 ScheduleDAGMI *DAG = static_cast<ScheduleDAGMI*>(DAGInstrs);
576
577 SUnit *SUa = nullptr;
578 // Search for two consequent memory operations and link them
579 // to prevent scheduler from moving them apart.
580 // In DAG pre-process SUnits are in the original order of
581 // the instructions before scheduling.
582 for (SUnit &SU : DAG->SUnits) {
583 MachineInstr &MI2 = *SU.getInstr();
584 if (!MI2.mayLoad() && !MI2.mayStore()) {
585 SUa = nullptr;
586 continue;
587 }
588 if (!SUa) {
589 SUa = &SU;
590 continue;
591 }
592
593 MachineInstr &MI1 = *SUa->getInstr();
594 if ((TII->isVMEM(MI1) && TII->isVMEM(MI2)) ||
595 (TII->isFLAT(MI1) && TII->isFLAT(MI2)) ||
596 (TII->isSMRD(MI1) && TII->isSMRD(MI2)) ||
597 (TII->isDS(MI1) && TII->isDS(MI2))) {
598 SU.addPredBarrier(SUa);
599
600 for (const SDep &SI : SU.Preds) {
601 if (SI.getSUnit() != SUa)
602 SUa->addPred(SDep(SI.getSUnit(), SDep::Artificial));
603 }
604
605 if (&SU != &DAG->ExitSU) {
606 for (const SDep &SI : SUa->Succs) {
607 if (SI.getSUnit() != &SU)
608 SI.getSUnit()->addPred(SDep(&SU, SDep::Artificial));
609 }
610 }
611 }
612
613 SUa = &SU;
614 }
615 }
616};
Benjamin Kramerf9ab3dd2017-10-31 23:21:30 +0000617} // namespace
Stanislav Mekhanoshind4ae4702017-09-19 20:54:38 +0000618
619void SISubtarget::getPostRAMutations(
620 std::vector<std::unique_ptr<ScheduleDAGMutation>> &Mutations) const {
621 Mutations.push_back(llvm::make_unique<MemOpClusterMutation>(&InstrInfo));
622}