blob: 2db372f201ff156f6db40b3b161ba05fba5c6bd4 [file] [log] [blame]
Eugene Zelenkod96089b2017-02-14 00:33:36 +00001//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
Tom Stellard347ac792015-06-26 21:15:07 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard347ac792015-06-26 21:15:07 +00006//
7//===----------------------------------------------------------------------===//
Eugene Zelenkod96089b2017-02-14 00:33:36 +00008
Eugene Zelenkod96089b2017-02-14 00:33:36 +00009#include "AMDGPUBaseInfo.h"
Alexander Timofeev2e5eece2018-03-05 15:12:21 +000010#include "AMDGPUTargetTransformInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000011#include "AMDGPU.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000012#include "SIDefines.h"
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +000013#include "AMDGPUAsmUtils.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000014#include "llvm/ADT/StringRef.h"
15#include "llvm/ADT/Triple.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000016#include "llvm/BinaryFormat/ELF.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000017#include "llvm/CodeGen/MachineMemOperand.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000018#include "llvm/IR/Attributes.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000019#include "llvm/IR/Constants.h"
Tom Stellardac00eb52015-12-15 16:26:16 +000020#include "llvm/IR/Function.h"
Tom Stellarde3b5aea2015-12-02 17:00:42 +000021#include "llvm/IR/GlobalValue.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000022#include "llvm/IR/Instruction.h"
Tom Stellardca166212017-01-30 21:56:46 +000023#include "llvm/IR/LLVMContext.h"
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000024#include "llvm/IR/Module.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000025#include "llvm/MC/MCContext.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000026#include "llvm/MC/MCInstrDesc.h"
Matt Arsenaultcad7fa82017-12-13 21:07:51 +000027#include "llvm/MC/MCInstrInfo.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000028#include "llvm/MC/MCRegisterInfo.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000029#include "llvm/MC/MCSectionELF.h"
Tom Stellard2b65ed32015-12-21 18:44:27 +000030#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard347ac792015-06-26 21:15:07 +000031#include "llvm/MC/SubtargetFeature.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000032#include "llvm/Support/Casting.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000033#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/MathExtras.h"
35#include <algorithm>
36#include <cassert>
37#include <cstdint>
38#include <cstring>
39#include <utility>
Tom Stellard347ac792015-06-26 21:15:07 +000040
Matt Arsenault678e1112017-04-10 17:58:06 +000041#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000042
Sam Koltona3ec5c12016-10-07 14:46:06 +000043#define GET_INSTRINFO_NAMED_OPS
Matt Arsenaultcad7fa82017-12-13 21:07:51 +000044#define GET_INSTRMAP_INFO
Sam Koltona3ec5c12016-10-07 14:46:06 +000045#include "AMDGPUGenInstrInfo.inc"
Matt Arsenaultcad7fa82017-12-13 21:07:51 +000046#undef GET_INSTRMAP_INFO
Sam Koltona3ec5c12016-10-07 14:46:06 +000047#undef GET_INSTRINFO_NAMED_OPS
Sam Koltona3ec5c12016-10-07 14:46:06 +000048
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000049namespace {
50
51/// \returns Bit mask for given bit \p Shift and bit \p Width.
52unsigned getBitMask(unsigned Shift, unsigned Width) {
53 return ((1 << Width) - 1) << Shift;
54}
55
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000056/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000057///
58/// \returns Packed \p Dst.
59unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
60 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
61 Dst |= (Src << Shift) & getBitMask(Shift, Width);
62 return Dst;
63}
64
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000065/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000066///
67/// \returns Unpacked bits.
68unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
69 return (Src & getBitMask(Shift, Width)) >> Shift;
70}
71
Matt Arsenaulte823d922017-02-18 18:29:53 +000072/// \returns Vmcnt bit shift (lower bits).
73unsigned getVmcntBitShiftLo() { return 0; }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000074
Matt Arsenaulte823d922017-02-18 18:29:53 +000075/// \returns Vmcnt bit width (lower bits).
76unsigned getVmcntBitWidthLo() { return 4; }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000077
78/// \returns Expcnt bit shift.
79unsigned getExpcntBitShift() { return 4; }
80
81/// \returns Expcnt bit width.
82unsigned getExpcntBitWidth() { return 3; }
83
84/// \returns Lgkmcnt bit shift.
85unsigned getLgkmcntBitShift() { return 8; }
86
87/// \returns Lgkmcnt bit width.
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +000088unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
89 return (VersionMajor >= 10) ? 6 : 4;
90}
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000091
Matt Arsenaulte823d922017-02-18 18:29:53 +000092/// \returns Vmcnt bit shift (higher bits).
93unsigned getVmcntBitShiftHi() { return 14; }
94
95/// \returns Vmcnt bit width (higher bits).
96unsigned getVmcntBitWidthHi() { return 2; }
97
Eugene Zelenkod96089b2017-02-14 00:33:36 +000098} // end namespace anonymous
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000099
Tom Stellard347ac792015-06-26 21:15:07 +0000100namespace llvm {
Konstantin Zhuravlyov3d1cc882017-04-21 19:45:22 +0000101
Tom Stellard347ac792015-06-26 21:15:07 +0000102namespace AMDGPU {
103
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +0000104#define GET_MIMGBaseOpcodesTable_IMPL
105#define GET_MIMGDimInfoTable_IMPL
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +0000106#define GET_MIMGInfoTable_IMPL
Ryan Taylor894c8fd2018-08-01 12:12:01 +0000107#define GET_MIMGLZMappingTable_IMPL
Piotr Sobczak9b11e932019-06-10 15:58:51 +0000108#define GET_MIMGMIPMappingTable_IMPL
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +0000109#include "AMDGPUGenSearchableTables.inc"
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000110
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +0000111int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
112 unsigned VDataDwords, unsigned VAddrDwords) {
113 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
114 VDataDwords, VAddrDwords);
115 return Info ? Info->Opcode : -1;
116}
117
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000118const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
119 const MIMGInfo *Info = getMIMGInfo(Opc);
120 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
121}
122
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +0000123int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
124 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
125 const MIMGInfo *NewInfo =
126 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
127 NewChannels, OrigInfo->VAddrDwords);
128 return NewInfo ? NewInfo->Opcode : -1;
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000129}
130
Neil Henning76504a42018-12-12 16:15:21 +0000131struct MUBUFInfo {
132 uint16_t Opcode;
133 uint16_t BaseOpcode;
134 uint8_t dwords;
135 bool has_vaddr;
136 bool has_srsrc;
137 bool has_soffset;
138};
139
140#define GET_MUBUFInfoTable_DECL
141#define GET_MUBUFInfoTable_IMPL
142#include "AMDGPUGenSearchableTables.inc"
143
144int getMUBUFBaseOpcode(unsigned Opc) {
145 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
146 return Info ? Info->BaseOpcode : -1;
147}
148
149int getMUBUFOpcode(unsigned BaseOpc, unsigned Dwords) {
150 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndDwords(BaseOpc, Dwords);
151 return Info ? Info->Opcode : -1;
152}
153
154int getMUBUFDwords(unsigned Opc) {
155 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
156 return Info ? Info->dwords : 0;
157}
158
159bool getMUBUFHasVAddr(unsigned Opc) {
160 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
161 return Info ? Info->has_vaddr : false;
162}
163
164bool getMUBUFHasSrsrc(unsigned Opc) {
165 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
166 return Info ? Info->has_srsrc : false;
167}
168
169bool getMUBUFHasSoffset(unsigned Opc) {
170 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
171 return Info ? Info->has_soffset : false;
172}
173
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000174// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
175// header files, so we need to wrap it in a function that takes unsigned
176// instead.
177int getMCOpcode(uint16_t Opcode, unsigned Gen) {
178 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
179}
180
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000181namespace IsaInfo {
Tom Stellard347ac792015-06-26 21:15:07 +0000182
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000183void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream) {
184 auto TargetTriple = STI->getTargetTriple();
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000185 auto Version = getIsaVersion(STI->getCPU());
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000186
187 Stream << TargetTriple.getArchName() << '-'
188 << TargetTriple.getVendorName() << '-'
189 << TargetTriple.getOSName() << '-'
190 << TargetTriple.getEnvironmentName() << '-'
191 << "gfx"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000192 << Version.Major
193 << Version.Minor
194 << Version.Stepping;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000195
196 if (hasXNACK(*STI))
197 Stream << "+xnack";
Konstantin Zhuravlyov108927b2018-11-05 22:44:19 +0000198 if (hasSRAMECC(*STI))
199 Stream << "+sram-ecc";
Scott Linder1e8c2c72018-06-21 19:38:56 +0000200
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000201 Stream.flush();
202}
203
Konstantin Zhuravlyov00f2cb12018-06-12 18:02:46 +0000204bool hasCodeObjectV3(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyovaf7b5d72018-11-15 23:14:23 +0000205 return STI->getTargetTriple().getOS() == Triple::AMDHSA &&
206 STI->getFeatureBits().test(FeatureCodeObjectV3);
Konstantin Zhuravlyoveda425e2017-10-14 15:59:07 +0000207}
208
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000209unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
210 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000211 return 16;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000212 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000213 return 32;
214
215 return 64;
216}
217
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000218unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
219 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000220 return 32768;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000221 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000222 return 65536;
223
224 return 0;
225}
226
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000227unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000228 return 4;
229}
230
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000231unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000232 unsigned FlatWorkGroupSize) {
Matt Arsenaultd7047272019-02-08 19:18:01 +0000233 assert(FlatWorkGroupSize != 0);
234 if (STI->getTargetTriple().getArch() != Triple::amdgcn)
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000235 return 8;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000236 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
Stanislav Mekhanoshin19f98c62017-02-15 01:03:59 +0000237 if (N == 1)
238 return 40;
239 N = 40 / N;
240 return std::min(N, 16u);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000241}
242
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000243unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI) {
244 return getMaxWavesPerEU() * getEUsPerCU(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000245}
246
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000247unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000248 unsigned FlatWorkGroupSize) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000249 return getWavesPerWorkGroup(STI, FlatWorkGroupSize);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000250}
251
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000252unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000253 return 1;
254}
255
Tom Stellardc5a154d2018-06-28 23:47:12 +0000256unsigned getMaxWavesPerEU() {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000257 // FIXME: Need to take scratch memory into account.
258 return 10;
259}
260
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000261unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000262 unsigned FlatWorkGroupSize) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000263 return alignTo(getMaxWavesPerCU(STI, FlatWorkGroupSize),
264 getEUsPerCU(STI)) / getEUsPerCU(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000265}
266
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000267unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000268 return 1;
269}
270
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000271unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000272 return 2048;
273}
274
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000275unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000276 unsigned FlatWorkGroupSize) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000277 return alignTo(FlatWorkGroupSize, getWavefrontSize(STI)) /
278 getWavefrontSize(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000279}
280
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000281unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
282 IsaVersion Version = getIsaVersion(STI->getCPU());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000283 if (Version.Major >= 10)
284 return getAddressableNumSGPRs(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000285 if (Version.Major >= 8)
286 return 16;
287 return 8;
288}
289
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000290unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000291 return 8;
292}
293
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000294unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
295 IsaVersion Version = getIsaVersion(STI->getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000296 if (Version.Major >= 8)
297 return 800;
298 return 512;
299}
300
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000301unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
302 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000303 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
304
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000305 IsaVersion Version = getIsaVersion(STI->getCPU());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000306 if (Version.Major >= 10)
307 return 106;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000308 if (Version.Major >= 8)
309 return 102;
310 return 104;
311}
312
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000313unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000314 assert(WavesPerEU != 0);
315
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000316 IsaVersion Version = getIsaVersion(STI->getCPU());
317 if (Version.Major >= 10)
318 return 0;
319
Tom Stellardc5a154d2018-06-28 23:47:12 +0000320 if (WavesPerEU >= getMaxWavesPerEU())
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000321 return 0;
Konstantin Zhuravlyovc72ece62018-05-16 20:47:48 +0000322
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000323 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
324 if (STI->getFeatureBits().test(FeatureTrapHandler))
Konstantin Zhuravlyovc72ece62018-05-16 20:47:48 +0000325 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000326 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
327 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000328}
329
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000330unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000331 bool Addressable) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000332 assert(WavesPerEU != 0);
333
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000334 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000335 IsaVersion Version = getIsaVersion(STI->getCPU());
336 if (Version.Major >= 10)
337 return Addressable ? AddressableNumSGPRs : 108;
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000338 if (Version.Major >= 8 && !Addressable)
339 AddressableNumSGPRs = 112;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000340 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
341 if (STI->getFeatureBits().test(FeatureTrapHandler))
Konstantin Zhuravlyovc72ece62018-05-16 20:47:48 +0000342 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000343 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000344 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000345}
346
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000347unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
Scott Linder1e8c2c72018-06-21 19:38:56 +0000348 bool FlatScrUsed, bool XNACKUsed) {
349 unsigned ExtraSGPRs = 0;
350 if (VCCUsed)
351 ExtraSGPRs = 2;
352
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000353 IsaVersion Version = getIsaVersion(STI->getCPU());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000354 if (Version.Major >= 10)
355 return ExtraSGPRs;
356
Scott Linder1e8c2c72018-06-21 19:38:56 +0000357 if (Version.Major < 8) {
358 if (FlatScrUsed)
359 ExtraSGPRs = 4;
360 } else {
361 if (XNACKUsed)
362 ExtraSGPRs = 4;
363
364 if (FlatScrUsed)
365 ExtraSGPRs = 6;
366 }
367
368 return ExtraSGPRs;
369}
370
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000371unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
Scott Linder1e8c2c72018-06-21 19:38:56 +0000372 bool FlatScrUsed) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000373 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
374 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000375}
376
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000377unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
378 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000379 // SGPRBlocks is actual number of SGPR blocks minus 1.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000380 return NumSGPRs / getSGPREncodingGranule(STI) - 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000381}
382
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +0000383unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI,
384 Optional<bool> EnableWavefrontSize32) {
385 bool IsWave32 = EnableWavefrontSize32 ?
386 *EnableWavefrontSize32 :
387 STI->getFeatureBits().test(FeatureWavefrontSize32);
388 return IsWave32 ? 8 : 4;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000389}
390
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +0000391unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI,
392 Optional<bool> EnableWavefrontSize32) {
393 return getVGPRAllocGranule(STI, EnableWavefrontSize32);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000394}
395
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000396unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000397 return 256;
398}
399
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000400unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
401 return getTotalNumVGPRs(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000402}
403
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000404unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000405 assert(WavesPerEU != 0);
406
Tom Stellardc5a154d2018-06-28 23:47:12 +0000407 if (WavesPerEU >= getMaxWavesPerEU())
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000408 return 0;
409 unsigned MinNumVGPRs =
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000410 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
411 getVGPRAllocGranule(STI)) + 1;
412 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000413}
414
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000415unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000416 assert(WavesPerEU != 0);
417
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000418 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
419 getVGPRAllocGranule(STI));
420 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000421 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000422}
423
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +0000424unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs,
425 Optional<bool> EnableWavefrontSize32) {
426 NumVGPRs = alignTo(std::max(1u, NumVGPRs),
427 getVGPREncodingGranule(STI, EnableWavefrontSize32));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000428 // VGPRBlocks is actual number of VGPR blocks minus 1.
Stanislav Mekhanoshin8bcc9bb2019-06-13 19:18:29 +0000429 return NumVGPRs / getVGPREncodingGranule(STI, EnableWavefrontSize32) - 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000430}
431
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000432} // end namespace IsaInfo
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000433
Tom Stellardff7416b2015-06-26 21:58:31 +0000434void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000435 const MCSubtargetInfo *STI) {
436 IsaVersion Version = getIsaVersion(STI->getCPU());
Tom Stellardff7416b2015-06-26 21:58:31 +0000437
438 memset(&Header, 0, sizeof(Header));
439
440 Header.amd_kernel_code_version_major = 1;
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +0000441 Header.amd_kernel_code_version_minor = 2;
Tom Stellardff7416b2015-06-26 21:58:31 +0000442 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000443 Header.amd_machine_version_major = Version.Major;
444 Header.amd_machine_version_minor = Version.Minor;
445 Header.amd_machine_version_stepping = Version.Stepping;
Tom Stellardff7416b2015-06-26 21:58:31 +0000446 Header.kernel_code_entry_byte_offset = sizeof(Header);
Tom Stellardff7416b2015-06-26 21:58:31 +0000447 Header.wavefront_size = 6;
Matt Arsenault5d910192017-01-25 20:21:57 +0000448
449 // If the code object does not support indirect functions, then the value must
450 // be 0xffffffff.
451 Header.call_convention = -1;
452
Tom Stellardff7416b2015-06-26 21:58:31 +0000453 // These alignment values are specified in powers of two, so alignment =
454 // 2^n. The minimum alignment is 2^4 = 16.
455 Header.kernarg_segment_alignment = 4;
456 Header.group_segment_alignment = 4;
457 Header.private_segment_alignment = 4;
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000458
459 if (Version.Major >= 10) {
460 Header.compute_pgm_resource_registers |=
461 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
462 S_00B848_MEM_ORDERED(1);
463 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000464}
465
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000466amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
467 const MCSubtargetInfo *STI) {
468 IsaVersion Version = getIsaVersion(STI->getCPU());
469
Scott Linder1e8c2c72018-06-21 19:38:56 +0000470 amdhsa::kernel_descriptor_t KD;
471 memset(&KD, 0, sizeof(KD));
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000472
Scott Linder1e8c2c72018-06-21 19:38:56 +0000473 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
474 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
475 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
476 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
477 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
478 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
479 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
480 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
481 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000482 if (Version.Major >= 10) {
483 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
484 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
485 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
486 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
487 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
488 }
Scott Linder1e8c2c72018-06-21 19:38:56 +0000489 return KD;
490}
491
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000492bool isGroupSegment(const GlobalValue *GV) {
493 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000494}
495
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000496bool isGlobalSegment(const GlobalValue *GV) {
497 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellard00f2f912015-12-02 19:47:57 +0000498}
499
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000500bool isReadOnlySegment(const GlobalValue *GV) {
Matt Arsenault923712b2018-02-09 16:57:57 +0000501 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
502 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
Tom Stellard00f2f912015-12-02 19:47:57 +0000503}
504
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +0000505bool shouldEmitConstantsToTextSection(const Triple &TT) {
506 return TT.getOS() != Triple::AMDHSA;
507}
508
Matt Arsenault83002722016-05-12 02:45:18 +0000509int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
Marek Olsakfccabaf2016-01-13 11:45:36 +0000510 Attribute A = F.getFnAttribute(Name);
Matt Arsenault83002722016-05-12 02:45:18 +0000511 int Result = Default;
Tom Stellardac00eb52015-12-15 16:26:16 +0000512
513 if (A.isStringAttribute()) {
514 StringRef Str = A.getValueAsString();
Marek Olsakfccabaf2016-01-13 11:45:36 +0000515 if (Str.getAsInteger(0, Result)) {
Tom Stellardac00eb52015-12-15 16:26:16 +0000516 LLVMContext &Ctx = F.getContext();
Matt Arsenault83002722016-05-12 02:45:18 +0000517 Ctx.emitError("can't parse integer attribute " + Name);
Tom Stellardac00eb52015-12-15 16:26:16 +0000518 }
519 }
Matt Arsenault83002722016-05-12 02:45:18 +0000520
Marek Olsakfccabaf2016-01-13 11:45:36 +0000521 return Result;
522}
523
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000524std::pair<int, int> getIntegerPairAttribute(const Function &F,
525 StringRef Name,
526 std::pair<int, int> Default,
527 bool OnlyFirstRequired) {
528 Attribute A = F.getFnAttribute(Name);
529 if (!A.isStringAttribute())
530 return Default;
531
532 LLVMContext &Ctx = F.getContext();
533 std::pair<int, int> Ints = Default;
534 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
535 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
536 Ctx.emitError("can't parse first integer attribute " + Name);
537 return Default;
538 }
539 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000540 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000541 Ctx.emitError("can't parse second integer attribute " + Name);
542 return Default;
543 }
544 }
545
546 return Ints;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000547}
548
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000549unsigned getVmcntBitMask(const IsaVersion &Version) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000550 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
551 if (Version.Major < 9)
552 return VmcntLo;
553
554 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
555 return VmcntLo | VmcntHi;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000556}
557
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000558unsigned getExpcntBitMask(const IsaVersion &Version) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000559 return (1 << getExpcntBitWidth()) - 1;
560}
561
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000562unsigned getLgkmcntBitMask(const IsaVersion &Version) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000563 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000564}
565
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000566unsigned getWaitcntBitMask(const IsaVersion &Version) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000567 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000568 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000569 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(),
570 getLgkmcntBitWidth(Version.Major));
Matt Arsenaulte823d922017-02-18 18:29:53 +0000571 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
572 if (Version.Major < 9)
573 return Waitcnt;
574
575 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
576 return Waitcnt | VmcntHi;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000577}
578
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000579unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000580 unsigned VmcntLo =
581 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
582 if (Version.Major < 9)
583 return VmcntLo;
584
585 unsigned VmcntHi =
586 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
587 VmcntHi <<= getVmcntBitWidthLo();
588 return VmcntLo | VmcntHi;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000589}
590
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000591unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000592 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
593}
594
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000595unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000596 return unpackBits(Waitcnt, getLgkmcntBitShift(),
597 getLgkmcntBitWidth(Version.Major));
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000598}
599
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000600void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000601 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
602 Vmcnt = decodeVmcnt(Version, Waitcnt);
603 Expcnt = decodeExpcnt(Version, Waitcnt);
604 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
605}
606
Nicolai Haehnle1a94cbb2018-11-29 11:06:06 +0000607Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
608 Waitcnt Decoded;
609 Decoded.VmCnt = decodeVmcnt(Version, Encoded);
610 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
611 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
612 return Decoded;
613}
614
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000615unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000616 unsigned Vmcnt) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000617 Waitcnt =
618 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
619 if (Version.Major < 9)
620 return Waitcnt;
621
622 Vmcnt >>= getVmcntBitWidthLo();
623 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000624}
625
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000626unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000627 unsigned Expcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000628 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
629}
630
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000631unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000632 unsigned Lgkmcnt) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000633 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(),
634 getLgkmcntBitWidth(Version.Major));
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000635}
636
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000637unsigned encodeWaitcnt(const IsaVersion &Version,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000638 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
Konstantin Zhuravlyov31dbb032017-01-06 17:23:21 +0000639 unsigned Waitcnt = getWaitcntBitMask(Version);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000640 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
641 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
642 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
643 return Waitcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000644}
645
Nicolai Haehnle1a94cbb2018-11-29 11:06:06 +0000646unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
647 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
648}
649
Dmitry Preobrazhensky1fca3b12019-06-13 12:46:37 +0000650//===----------------------------------------------------------------------===//
651// hwreg
652//===----------------------------------------------------------------------===//
653
654namespace Hwreg {
655
656int64_t getHwregId(const StringRef Name) {
657 for (int Id = ID_SYMBOLIC_FIRST_; Id < ID_SYMBOLIC_LAST_; ++Id) {
658 if (IdSymbolic[Id] && Name == IdSymbolic[Id])
659 return Id;
660 }
661 return ID_UNKNOWN_;
662}
663
664static unsigned getLastSymbolicHwreg(const MCSubtargetInfo &STI) {
665 if (isSI(STI) || isCI(STI) || isVI(STI))
666 return ID_SYMBOLIC_FIRST_GFX9_;
667 else if (isGFX9(STI))
668 return ID_SYMBOLIC_FIRST_GFX10_;
669 else
670 return ID_SYMBOLIC_LAST_;
671}
672
673bool isValidHwreg(int64_t Id, const MCSubtargetInfo &STI) {
674 return ID_SYMBOLIC_FIRST_ <= Id && Id < getLastSymbolicHwreg(STI) &&
675 IdSymbolic[Id];
676}
677
678bool isValidHwreg(int64_t Id) {
679 return 0 <= Id && isUInt<ID_WIDTH_>(Id);
680}
681
682bool isValidHwregOffset(int64_t Offset) {
683 return 0 <= Offset && isUInt<OFFSET_WIDTH_>(Offset);
684}
685
686bool isValidHwregWidth(int64_t Width) {
687 return 0 <= (Width - 1) && isUInt<WIDTH_M1_WIDTH_>(Width - 1);
688}
689
690int64_t encodeHwreg(int64_t Id, int64_t Offset, int64_t Width) {
691 return (Id << ID_SHIFT_) |
692 (Offset << OFFSET_SHIFT_) |
693 ((Width - 1) << WIDTH_M1_SHIFT_);
694}
695
696StringRef getHwreg(unsigned Id, const MCSubtargetInfo &STI) {
697 return isValidHwreg(Id, STI) ? IdSymbolic[Id] : "";
698}
699
700void decodeHwreg(unsigned Val, unsigned &Id, unsigned &Offset, unsigned &Width) {
701 Id = (Val & ID_MASK_) >> ID_SHIFT_;
702 Offset = (Val & OFFSET_MASK_) >> OFFSET_SHIFT_;
703 Width = ((Val & WIDTH_M1_MASK_) >> WIDTH_M1_SHIFT_) + 1;
704}
705
706} // namespace Hwreg
707
708//===----------------------------------------------------------------------===//
709//
710//===----------------------------------------------------------------------===//
711
Marek Olsakfccabaf2016-01-13 11:45:36 +0000712unsigned getInitialPSInputAddr(const Function &F) {
713 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
Tom Stellardac00eb52015-12-15 16:26:16 +0000714}
715
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000716bool isShader(CallingConv::ID cc) {
717 switch(cc) {
718 case CallingConv::AMDGPU_VS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000719 case CallingConv::AMDGPU_LS:
Marek Olsaka302a7362017-05-02 15:41:10 +0000720 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000721 case CallingConv::AMDGPU_ES:
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000722 case CallingConv::AMDGPU_GS:
723 case CallingConv::AMDGPU_PS:
724 case CallingConv::AMDGPU_CS:
725 return true;
726 default:
727 return false;
728 }
729}
730
731bool isCompute(CallingConv::ID cc) {
732 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
733}
734
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000735bool isEntryFunctionCC(CallingConv::ID CC) {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000736 switch (CC) {
737 case CallingConv::AMDGPU_KERNEL:
738 case CallingConv::SPIR_KERNEL:
739 case CallingConv::AMDGPU_VS:
740 case CallingConv::AMDGPU_GS:
741 case CallingConv::AMDGPU_PS:
742 case CallingConv::AMDGPU_CS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000743 case CallingConv::AMDGPU_ES:
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000744 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000745 case CallingConv::AMDGPU_LS:
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000746 return true;
747 default:
748 return false;
749 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000750}
751
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000752bool hasXNACK(const MCSubtargetInfo &STI) {
753 return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
754}
755
Konstantin Zhuravlyov108927b2018-11-05 22:44:19 +0000756bool hasSRAMECC(const MCSubtargetInfo &STI) {
757 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
758}
759
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000760bool hasMIMG_R128(const MCSubtargetInfo &STI) {
761 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128];
762}
763
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000764bool hasPackedD16(const MCSubtargetInfo &STI) {
765 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem];
766}
767
Tom Stellard2b65ed32015-12-21 18:44:27 +0000768bool isSI(const MCSubtargetInfo &STI) {
769 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
770}
771
772bool isCI(const MCSubtargetInfo &STI) {
773 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
774}
775
776bool isVI(const MCSubtargetInfo &STI) {
777 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
778}
779
Sam Koltonf7659d712017-05-23 10:08:55 +0000780bool isGFX9(const MCSubtargetInfo &STI) {
781 return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
782}
783
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000784bool isGFX10(const MCSubtargetInfo &STI) {
785 return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
786}
787
Matt Arsenault8728c5f2017-08-07 14:58:04 +0000788bool isGCN3Encoding(const MCSubtargetInfo &STI) {
789 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
790}
791
Sam Koltonf7659d712017-05-23 10:08:55 +0000792bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
793 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
794 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
795 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
796 Reg == AMDGPU::SCC;
797}
798
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +0000799bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI) {
Dmitry Preobrazhensky00deef82017-07-18 11:14:02 +0000800 for (MCRegAliasIterator R(Reg0, TRI, true); R.isValid(); ++R) {
801 if (*R == Reg1) return true;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +0000802 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +0000803 return false;
804}
805
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000806#define MAP_REG2REG \
807 using namespace AMDGPU; \
808 switch(Reg) { \
809 default: return Reg; \
810 CASE_CI_VI(FLAT_SCR) \
811 CASE_CI_VI(FLAT_SCR_LO) \
812 CASE_CI_VI(FLAT_SCR_HI) \
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000813 CASE_VI_GFX9_GFX10(TTMP0) \
814 CASE_VI_GFX9_GFX10(TTMP1) \
815 CASE_VI_GFX9_GFX10(TTMP2) \
816 CASE_VI_GFX9_GFX10(TTMP3) \
817 CASE_VI_GFX9_GFX10(TTMP4) \
818 CASE_VI_GFX9_GFX10(TTMP5) \
819 CASE_VI_GFX9_GFX10(TTMP6) \
820 CASE_VI_GFX9_GFX10(TTMP7) \
821 CASE_VI_GFX9_GFX10(TTMP8) \
822 CASE_VI_GFX9_GFX10(TTMP9) \
823 CASE_VI_GFX9_GFX10(TTMP10) \
824 CASE_VI_GFX9_GFX10(TTMP11) \
825 CASE_VI_GFX9_GFX10(TTMP12) \
826 CASE_VI_GFX9_GFX10(TTMP13) \
827 CASE_VI_GFX9_GFX10(TTMP14) \
828 CASE_VI_GFX9_GFX10(TTMP15) \
829 CASE_VI_GFX9_GFX10(TTMP0_TTMP1) \
830 CASE_VI_GFX9_GFX10(TTMP2_TTMP3) \
831 CASE_VI_GFX9_GFX10(TTMP4_TTMP5) \
832 CASE_VI_GFX9_GFX10(TTMP6_TTMP7) \
833 CASE_VI_GFX9_GFX10(TTMP8_TTMP9) \
834 CASE_VI_GFX9_GFX10(TTMP10_TTMP11) \
835 CASE_VI_GFX9_GFX10(TTMP12_TTMP13) \
836 CASE_VI_GFX9_GFX10(TTMP14_TTMP15) \
837 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3) \
838 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7) \
839 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11) \
840 CASE_VI_GFX9_GFX10(TTMP12_TTMP13_TTMP14_TTMP15) \
841 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
842 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
843 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
844 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
Tom Stellard2b65ed32015-12-21 18:44:27 +0000845 }
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000846
847#define CASE_CI_VI(node) \
848 assert(!isSI(STI)); \
849 case node: return isCI(STI) ? node##_ci : node##_vi;
850
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000851#define CASE_VI_GFX9_GFX10(node) \
852 case node: return (isGFX9(STI) || isGFX10(STI)) ? node##_gfx9_gfx10 : node##_vi;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000853
854unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000855 if (STI.getTargetTriple().getArch() == Triple::r600)
856 return Reg;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000857 MAP_REG2REG
Tom Stellard2b65ed32015-12-21 18:44:27 +0000858}
859
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000860#undef CASE_CI_VI
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000861#undef CASE_VI_GFX9_GFX10
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000862
863#define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000864#define CASE_VI_GFX9_GFX10(node) case node##_vi: case node##_gfx9_gfx10: return node;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000865
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000866unsigned mc2PseudoReg(unsigned Reg) {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000867 MAP_REG2REG
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000868}
869
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000870#undef CASE_CI_VI
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000871#undef CASE_VI_GFX9_GFX10
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000872#undef MAP_REG2REG
873
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000874bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000875 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000876 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000877 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
878 OpType <= AMDGPU::OPERAND_SRC_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000879}
880
881bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000882 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000883 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000884 switch (OpType) {
885 case AMDGPU::OPERAND_REG_IMM_FP32:
886 case AMDGPU::OPERAND_REG_IMM_FP64:
887 case AMDGPU::OPERAND_REG_IMM_FP16:
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000888 case AMDGPU::OPERAND_REG_IMM_V2FP16:
889 case AMDGPU::OPERAND_REG_IMM_V2INT16:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000890 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
891 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
892 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000893 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000894 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000895 return true;
896 default:
897 return false;
898 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000899}
900
901bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000902 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000903 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000904 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
905 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000906}
907
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000908// Avoid using MCRegisterClass::getSize, since that function will go away
909// (move from MC* level to Target* level). Return size in bits.
Tom Stellardb133fbb2016-10-27 23:05:31 +0000910unsigned getRegBitWidth(unsigned RCID) {
911 switch (RCID) {
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000912 case AMDGPU::SGPR_32RegClassID:
913 case AMDGPU::VGPR_32RegClassID:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000914 case AMDGPU::VRegOrLds_32RegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000915 case AMDGPU::VS_32RegClassID:
916 case AMDGPU::SReg_32RegClassID:
917 case AMDGPU::SReg_32_XM0RegClassID:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000918 case AMDGPU::SRegOrLds_32RegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000919 return 32;
920 case AMDGPU::SGPR_64RegClassID:
921 case AMDGPU::VS_64RegClassID:
922 case AMDGPU::SReg_64RegClassID:
923 case AMDGPU::VReg_64RegClassID:
Ron Liebermancac749a2018-11-16 01:13:34 +0000924 case AMDGPU::SReg_64_XEXECRegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000925 return 64;
Tim Renouf361b5b22019-03-21 12:01:21 +0000926 case AMDGPU::SGPR_96RegClassID:
927 case AMDGPU::SReg_96RegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000928 case AMDGPU::VReg_96RegClassID:
929 return 96;
930 case AMDGPU::SGPR_128RegClassID:
931 case AMDGPU::SReg_128RegClassID:
932 case AMDGPU::VReg_128RegClassID:
933 return 128;
Tim Renouf033f99a2019-03-22 10:11:21 +0000934 case AMDGPU::SGPR_160RegClassID:
935 case AMDGPU::SReg_160RegClassID:
936 case AMDGPU::VReg_160RegClassID:
937 return 160;
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000938 case AMDGPU::SReg_256RegClassID:
939 case AMDGPU::VReg_256RegClassID:
940 return 256;
941 case AMDGPU::SReg_512RegClassID:
942 case AMDGPU::VReg_512RegClassID:
943 return 512;
944 default:
945 llvm_unreachable("Unexpected register class");
946 }
947}
948
Tom Stellardb133fbb2016-10-27 23:05:31 +0000949unsigned getRegBitWidth(const MCRegisterClass &RC) {
950 return getRegBitWidth(RC.getID());
951}
952
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000953unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
954 unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000955 assert(OpNo < Desc.NumOperands);
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000956 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
957 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000958}
959
Matt Arsenault26faed32016-12-05 22:26:17 +0000960bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000961 if (Literal >= -16 && Literal <= 64)
962 return true;
963
Matt Arsenault26faed32016-12-05 22:26:17 +0000964 uint64_t Val = static_cast<uint64_t>(Literal);
965 return (Val == DoubleToBits(0.0)) ||
966 (Val == DoubleToBits(1.0)) ||
967 (Val == DoubleToBits(-1.0)) ||
968 (Val == DoubleToBits(0.5)) ||
969 (Val == DoubleToBits(-0.5)) ||
970 (Val == DoubleToBits(2.0)) ||
971 (Val == DoubleToBits(-2.0)) ||
972 (Val == DoubleToBits(4.0)) ||
973 (Val == DoubleToBits(-4.0)) ||
974 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000975}
976
Matt Arsenault26faed32016-12-05 22:26:17 +0000977bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000978 if (Literal >= -16 && Literal <= 64)
979 return true;
980
Matt Arsenault4bd72362016-12-10 00:39:12 +0000981 // The actual type of the operand does not seem to matter as long
982 // as the bits match one of the inline immediate values. For example:
983 //
984 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
985 // so it is a legal inline immediate.
986 //
987 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
988 // floating-point, so it is a legal inline immediate.
989
Matt Arsenault26faed32016-12-05 22:26:17 +0000990 uint32_t Val = static_cast<uint32_t>(Literal);
991 return (Val == FloatToBits(0.0f)) ||
992 (Val == FloatToBits(1.0f)) ||
993 (Val == FloatToBits(-1.0f)) ||
994 (Val == FloatToBits(0.5f)) ||
995 (Val == FloatToBits(-0.5f)) ||
996 (Val == FloatToBits(2.0f)) ||
997 (Val == FloatToBits(-2.0f)) ||
998 (Val == FloatToBits(4.0f)) ||
999 (Val == FloatToBits(-4.0f)) ||
1000 (Val == 0x3e22f983 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001001}
1002
Matt Arsenault4bd72362016-12-10 00:39:12 +00001003bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
Sam Kolton9dffada2017-01-17 15:26:02 +00001004 if (!HasInv2Pi)
1005 return false;
Matt Arsenault4bd72362016-12-10 00:39:12 +00001006
1007 if (Literal >= -16 && Literal <= 64)
1008 return true;
1009
1010 uint16_t Val = static_cast<uint16_t>(Literal);
1011 return Val == 0x3C00 || // 1.0
1012 Val == 0xBC00 || // -1.0
1013 Val == 0x3800 || // 0.5
1014 Val == 0xB800 || // -0.5
1015 Val == 0x4000 || // 2.0
1016 Val == 0xC000 || // -2.0
1017 Val == 0x4400 || // 4.0
1018 Val == 0xC400 || // -4.0
1019 Val == 0x3118; // 1/2pi
1020}
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001021
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001022bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
1023 assert(HasInv2Pi);
1024
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +00001025 if (isInt<16>(Literal) || isUInt<16>(Literal)) {
1026 int16_t Trunc = static_cast<int16_t>(Literal);
1027 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
1028 }
1029 if (!(Literal & 0xffff))
1030 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
1031
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001032 int16_t Lo16 = static_cast<int16_t>(Literal);
1033 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
1034 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
1035}
1036
Matt Arsenault894e53d2017-07-26 20:39:42 +00001037bool isArgPassedInSGPR(const Argument *A) {
1038 const Function *F = A->getParent();
1039
1040 // Arguments to compute shaders are never a source of divergence.
1041 CallingConv::ID CC = F->getCallingConv();
1042 switch (CC) {
1043 case CallingConv::AMDGPU_KERNEL:
1044 case CallingConv::SPIR_KERNEL:
1045 return true;
1046 case CallingConv::AMDGPU_VS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +00001047 case CallingConv::AMDGPU_LS:
Matt Arsenault894e53d2017-07-26 20:39:42 +00001048 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +00001049 case CallingConv::AMDGPU_ES:
Matt Arsenault894e53d2017-07-26 20:39:42 +00001050 case CallingConv::AMDGPU_GS:
1051 case CallingConv::AMDGPU_PS:
1052 case CallingConv::AMDGPU_CS:
1053 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
1054 // Everything else is in VGPRs.
1055 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
1056 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
1057 default:
1058 // TODO: Should calls support inreg for SGPR inputs?
1059 return false;
1060 }
1061}
1062
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +00001063static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
1064 return isGCN3Encoding(ST) || isGFX10(ST);
1065}
1066
Tom Stellard08efb7e2017-01-27 18:41:14 +00001067int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +00001068 if (hasSMEMByteOffset(ST))
Matt Arsenault8728c5f2017-08-07 14:58:04 +00001069 return ByteOffset;
1070 return ByteOffset >> 2;
Tom Stellard08efb7e2017-01-27 18:41:14 +00001071}
1072
1073bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
1074 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +00001075 return (hasSMEMByteOffset(ST)) ?
Matt Arsenault8728c5f2017-08-07 14:58:04 +00001076 isUInt<20>(EncodedOffset) : isUInt<8>(EncodedOffset);
Tom Stellard08efb7e2017-01-27 18:41:14 +00001077}
Matt Arsenaultcad7fa82017-12-13 21:07:51 +00001078
Tim Renouf4f703f52018-08-21 11:07:10 +00001079// Given Imm, split it into the values to put into the SOffset and ImmOffset
1080// fields in an MUBUF instruction. Return false if it is not possible (due to a
1081// hardware bug needing a workaround).
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00001082//
1083// The required alignment ensures that individual address components remain
1084// aligned if they are aligned to begin with. It also ensures that additional
1085// offsets within the given alignment can be added to the resulting ImmOffset.
Tim Renouf4f703f52018-08-21 11:07:10 +00001086bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00001087 const GCNSubtarget *Subtarget, uint32_t Align) {
Tim Renouf4f703f52018-08-21 11:07:10 +00001088 const uint32_t MaxImm = alignDown(4095, Align);
1089 uint32_t Overflow = 0;
1090
1091 if (Imm > MaxImm) {
1092 if (Imm <= MaxImm + 64) {
1093 // Use an SOffset inline constant for 4..64
1094 Overflow = Imm - MaxImm;
1095 Imm = MaxImm;
1096 } else {
1097 // Try to keep the same value in SOffset for adjacent loads, so that
1098 // the corresponding register contents can be re-used.
1099 //
1100 // Load values with all low-bits (except for alignment bits) set into
1101 // SOffset, so that a larger range of values can be covered using
1102 // s_movk_i32.
1103 //
1104 // Atomic operations fail to work correctly when individual address
1105 // components are unaligned, even if their sum is aligned.
1106 uint32_t High = (Imm + Align) & ~4095;
1107 uint32_t Low = (Imm + Align) & 4095;
1108 Imm = Low;
1109 Overflow = High - Align;
1110 }
1111 }
1112
1113 // There is a hardware bug in SI and CI which prevents address clamping in
1114 // MUBUF instructions from working correctly with SOffsets. The immediate
1115 // offset is unaffected.
1116 if (Overflow > 0 &&
1117 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
1118 return false;
1119
1120 ImmOffset = Imm;
1121 SOffset = Overflow;
1122 return true;
1123}
1124
Matt Arsenault055e4dc2019-03-29 19:14:54 +00001125SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) {
1126 *this = getDefaultForCallingConv(F.getCallingConv());
1127
1128 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
1129 if (!IEEEAttr.empty())
1130 IEEE = IEEEAttr == "true";
1131
1132 StringRef DX10ClampAttr
1133 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
1134 if (!DX10ClampAttr.empty())
1135 DX10Clamp = DX10ClampAttr == "true";
1136}
1137
Nicolai Haehnle4254d452018-04-01 17:09:14 +00001138namespace {
1139
1140struct SourceOfDivergence {
1141 unsigned Intr;
1142};
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +00001143const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
Nicolai Haehnle4254d452018-04-01 17:09:14 +00001144
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +00001145#define GET_SourcesOfDivergence_IMPL
Nicolai Haehnle4254d452018-04-01 17:09:14 +00001146#include "AMDGPUGenSearchableTables.inc"
1147
1148} // end anonymous namespace
1149
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00001150bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +00001151 return lookupSourceOfDivergence(IntrID);
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00001152}
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001153
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001154} // namespace AMDGPU
1155} // namespace llvm