blob: 0a1ab73d8dcfda545f321fc05d041e346dbb3cb0 [file] [log] [blame]
Eugene Zelenkod96089b2017-02-14 00:33:36 +00001//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
Tom Stellard347ac792015-06-26 21:15:07 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Eugene Zelenkod96089b2017-02-14 00:33:36 +00009
Tom Stellarde3b5aea2015-12-02 17:00:42 +000010#include "AMDGPU.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000011#include "AMDGPUBaseInfo.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000012#include "SIDefines.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000013#include "llvm/ADT/StringRef.h"
14#include "llvm/ADT/Triple.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000015#include "llvm/CodeGen/MachineMemOperand.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000016#include "llvm/IR/Attributes.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000017#include "llvm/IR/Constants.h"
Tom Stellardac00eb52015-12-15 16:26:16 +000018#include "llvm/IR/Function.h"
Tom Stellarde3b5aea2015-12-02 17:00:42 +000019#include "llvm/IR/GlobalValue.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000020#include "llvm/IR/Instruction.h"
Tom Stellardca166212017-01-30 21:56:46 +000021#include "llvm/IR/LLVMContext.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000022#include "llvm/MC/MCContext.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000023#include "llvm/MC/MCInstrDesc.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000024#include "llvm/MC/MCRegisterInfo.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000025#include "llvm/MC/MCSectionELF.h"
Tom Stellard2b65ed32015-12-21 18:44:27 +000026#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard347ac792015-06-26 21:15:07 +000027#include "llvm/MC/SubtargetFeature.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000028#include "llvm/Support/Casting.h"
29#include "llvm/Support/ELF.h"
30#include "llvm/Support/ErrorHandling.h"
31#include "llvm/Support/MathExtras.h"
32#include <algorithm>
33#include <cassert>
34#include <cstdint>
35#include <cstring>
36#include <utility>
Tom Stellard347ac792015-06-26 21:15:07 +000037
38#define GET_SUBTARGETINFO_ENUM
39#include "AMDGPUGenSubtargetInfo.inc"
40#undef GET_SUBTARGETINFO_ENUM
41
Tom Stellard2b65ed32015-12-21 18:44:27 +000042#define GET_REGINFO_ENUM
43#include "AMDGPUGenRegisterInfo.inc"
44#undef GET_REGINFO_ENUM
45
Sam Koltona3ec5c12016-10-07 14:46:06 +000046#define GET_INSTRINFO_NAMED_OPS
47#define GET_INSTRINFO_ENUM
48#include "AMDGPUGenInstrInfo.inc"
49#undef GET_INSTRINFO_NAMED_OPS
50#undef GET_INSTRINFO_ENUM
51
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000052namespace {
53
54/// \returns Bit mask for given bit \p Shift and bit \p Width.
55unsigned getBitMask(unsigned Shift, unsigned Width) {
56 return ((1 << Width) - 1) << Shift;
57}
58
59/// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
60///
61/// \returns Packed \p Dst.
62unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
63 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
64 Dst |= (Src << Shift) & getBitMask(Shift, Width);
65 return Dst;
66}
67
68/// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
69///
70/// \returns Unpacked bits.
71unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
72 return (Src & getBitMask(Shift, Width)) >> Shift;
73}
74
75/// \returns Vmcnt bit shift.
76unsigned getVmcntBitShift() { return 0; }
77
78/// \returns Vmcnt bit width.
79unsigned getVmcntBitWidth() { return 4; }
80
81/// \returns Expcnt bit shift.
82unsigned getExpcntBitShift() { return 4; }
83
84/// \returns Expcnt bit width.
85unsigned getExpcntBitWidth() { return 3; }
86
87/// \returns Lgkmcnt bit shift.
88unsigned getLgkmcntBitShift() { return 8; }
89
90/// \returns Lgkmcnt bit width.
91unsigned getLgkmcntBitWidth() { return 4; }
92
Eugene Zelenkod96089b2017-02-14 00:33:36 +000093} // end namespace anonymous
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000094
Tom Stellard347ac792015-06-26 21:15:07 +000095namespace llvm {
96namespace AMDGPU {
97
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +000098namespace IsaInfo {
Tom Stellard347ac792015-06-26 21:15:07 +000099
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000100IsaVersion getIsaVersion(const FeatureBitset &Features) {
101 // CI.
Tom Stellard347ac792015-06-26 21:15:07 +0000102 if (Features.test(FeatureISAVersion7_0_0))
103 return {7, 0, 0};
Tom Stellard347ac792015-06-26 21:15:07 +0000104 if (Features.test(FeatureISAVersion7_0_1))
105 return {7, 0, 1};
Yaxun Liu94add852016-10-26 16:37:56 +0000106 if (Features.test(FeatureISAVersion7_0_2))
107 return {7, 0, 2};
108
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000109 // VI.
Tom Stellard347ac792015-06-26 21:15:07 +0000110 if (Features.test(FeatureISAVersion8_0_0))
111 return {8, 0, 0};
Tom Stellard347ac792015-06-26 21:15:07 +0000112 if (Features.test(FeatureISAVersion8_0_1))
113 return {8, 0, 1};
Changpeng Fang98317d22016-10-11 16:00:47 +0000114 if (Features.test(FeatureISAVersion8_0_2))
115 return {8, 0, 2};
Changpeng Fangc16be002016-01-13 20:39:25 +0000116 if (Features.test(FeatureISAVersion8_0_3))
117 return {8, 0, 3};
Yaxun Liu94add852016-10-26 16:37:56 +0000118 if (Features.test(FeatureISAVersion8_0_4))
119 return {8, 0, 4};
Yaxun Liu94add852016-10-26 16:37:56 +0000120 if (Features.test(FeatureISAVersion8_1_0))
121 return {8, 1, 0};
122
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000123 if (!Features.test(FeatureGCN) || Features.test(FeatureSouthernIslands))
124 return {0, 0, 0};
125 return {7, 0, 0};
Tom Stellard347ac792015-06-26 21:15:07 +0000126}
127
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000128unsigned getWavefrontSize(const FeatureBitset &Features) {
129 if (Features.test(FeatureWavefrontSize16))
130 return 16;
131 if (Features.test(FeatureWavefrontSize32))
132 return 32;
133
134 return 64;
135}
136
137unsigned getLocalMemorySize(const FeatureBitset &Features) {
138 if (Features.test(FeatureLocalMemorySize32768))
139 return 32768;
140 if (Features.test(FeatureLocalMemorySize65536))
141 return 65536;
142
143 return 0;
144}
145
146unsigned getEUsPerCU(const FeatureBitset &Features) {
147 return 4;
148}
149
150unsigned getMaxWorkGroupsPerCU(const FeatureBitset &Features,
151 unsigned FlatWorkGroupSize) {
152 if (!Features.test(FeatureGCN))
153 return 8;
Stanislav Mekhanoshin19f98c62017-02-15 01:03:59 +0000154 unsigned N = getWavesPerWorkGroup(Features, FlatWorkGroupSize);
155 if (N == 1)
156 return 40;
157 N = 40 / N;
158 return std::min(N, 16u);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000159}
160
161unsigned getMaxWavesPerCU(const FeatureBitset &Features) {
162 return getMaxWavesPerEU(Features) * getEUsPerCU(Features);
163}
164
165unsigned getMaxWavesPerCU(const FeatureBitset &Features,
166 unsigned FlatWorkGroupSize) {
167 return getWavesPerWorkGroup(Features, FlatWorkGroupSize);
168}
169
170unsigned getMinWavesPerEU(const FeatureBitset &Features) {
171 return 1;
172}
173
174unsigned getMaxWavesPerEU(const FeatureBitset &Features) {
175 if (!Features.test(FeatureGCN))
176 return 8;
177 // FIXME: Need to take scratch memory into account.
178 return 10;
179}
180
181unsigned getMaxWavesPerEU(const FeatureBitset &Features,
182 unsigned FlatWorkGroupSize) {
183 return alignTo(getMaxWavesPerCU(Features, FlatWorkGroupSize),
184 getEUsPerCU(Features)) / getEUsPerCU(Features);
185}
186
187unsigned getMinFlatWorkGroupSize(const FeatureBitset &Features) {
188 return 1;
189}
190
191unsigned getMaxFlatWorkGroupSize(const FeatureBitset &Features) {
192 return 2048;
193}
194
195unsigned getWavesPerWorkGroup(const FeatureBitset &Features,
196 unsigned FlatWorkGroupSize) {
197 return alignTo(FlatWorkGroupSize, getWavefrontSize(Features)) /
198 getWavefrontSize(Features);
199}
200
201unsigned getSGPRAllocGranule(const FeatureBitset &Features) {
202 IsaVersion Version = getIsaVersion(Features);
203 if (Version.Major >= 8)
204 return 16;
205 return 8;
206}
207
208unsigned getSGPREncodingGranule(const FeatureBitset &Features) {
209 return 8;
210}
211
212unsigned getTotalNumSGPRs(const FeatureBitset &Features) {
213 IsaVersion Version = getIsaVersion(Features);
214 if (Version.Major >= 8)
215 return 800;
216 return 512;
217}
218
219unsigned getAddressableNumSGPRs(const FeatureBitset &Features) {
220 if (Features.test(FeatureSGPRInitBug))
221 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
222
223 IsaVersion Version = getIsaVersion(Features);
224 if (Version.Major >= 8)
225 return 102;
226 return 104;
227}
228
229unsigned getMinNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000230 assert(WavesPerEU != 0);
231
232 if (WavesPerEU >= getMaxWavesPerEU(Features))
233 return 0;
234 unsigned MinNumSGPRs =
235 alignDown(getTotalNumSGPRs(Features) / (WavesPerEU + 1),
236 getSGPRAllocGranule(Features)) + 1;
237 return std::min(MinNumSGPRs, getAddressableNumSGPRs(Features));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000238}
239
240unsigned getMaxNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU,
241 bool Addressable) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000242 assert(WavesPerEU != 0);
243
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000244 IsaVersion Version = getIsaVersion(Features);
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000245 unsigned MaxNumSGPRs = alignDown(getTotalNumSGPRs(Features) / WavesPerEU,
246 getSGPRAllocGranule(Features));
247 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(Features);
248 if (Version.Major >= 8 && !Addressable)
249 AddressableNumSGPRs = 112;
250 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000251}
252
253unsigned getVGPRAllocGranule(const FeatureBitset &Features) {
254 return 4;
255}
256
257unsigned getVGPREncodingGranule(const FeatureBitset &Features) {
258 return getVGPRAllocGranule(Features);
259}
260
261unsigned getTotalNumVGPRs(const FeatureBitset &Features) {
262 return 256;
263}
264
265unsigned getAddressableNumVGPRs(const FeatureBitset &Features) {
266 return getTotalNumVGPRs(Features);
267}
268
269unsigned getMinNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000270 assert(WavesPerEU != 0);
271
272 if (WavesPerEU >= getMaxWavesPerEU(Features))
273 return 0;
274 unsigned MinNumVGPRs =
275 alignDown(getTotalNumVGPRs(Features) / (WavesPerEU + 1),
276 getVGPRAllocGranule(Features)) + 1;
277 return std::min(MinNumVGPRs, getAddressableNumVGPRs(Features));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000278}
279
280unsigned getMaxNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000281 assert(WavesPerEU != 0);
282
283 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(Features) / WavesPerEU,
284 getVGPRAllocGranule(Features));
285 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(Features);
286 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000287}
288
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000289} // end namespace IsaInfo
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000290
Tom Stellardff7416b2015-06-26 21:58:31 +0000291void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
292 const FeatureBitset &Features) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000293 IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(Features);
Tom Stellardff7416b2015-06-26 21:58:31 +0000294
295 memset(&Header, 0, sizeof(Header));
296
297 Header.amd_kernel_code_version_major = 1;
298 Header.amd_kernel_code_version_minor = 0;
299 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
300 Header.amd_machine_version_major = ISA.Major;
301 Header.amd_machine_version_minor = ISA.Minor;
302 Header.amd_machine_version_stepping = ISA.Stepping;
303 Header.kernel_code_entry_byte_offset = sizeof(Header);
304 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
305 Header.wavefront_size = 6;
Matt Arsenault5d910192017-01-25 20:21:57 +0000306
307 // If the code object does not support indirect functions, then the value must
308 // be 0xffffffff.
309 Header.call_convention = -1;
310
Tom Stellardff7416b2015-06-26 21:58:31 +0000311 // These alignment values are specified in powers of two, so alignment =
312 // 2^n. The minimum alignment is 2^4 = 16.
313 Header.kernarg_segment_alignment = 4;
314 Header.group_segment_alignment = 4;
315 Header.private_segment_alignment = 4;
316}
317
Tom Stellarde135ffd2015-09-25 21:41:28 +0000318MCSection *getHSATextSection(MCContext &Ctx) {
319 return Ctx.getELFSection(".hsatext", ELF::SHT_PROGBITS,
320 ELF::SHF_ALLOC | ELF::SHF_WRITE |
321 ELF::SHF_EXECINSTR |
322 ELF::SHF_AMDGPU_HSA_AGENT |
323 ELF::SHF_AMDGPU_HSA_CODE);
324}
325
Tom Stellard00f2f912015-12-02 19:47:57 +0000326MCSection *getHSADataGlobalAgentSection(MCContext &Ctx) {
327 return Ctx.getELFSection(".hsadata_global_agent", ELF::SHT_PROGBITS,
328 ELF::SHF_ALLOC | ELF::SHF_WRITE |
329 ELF::SHF_AMDGPU_HSA_GLOBAL |
330 ELF::SHF_AMDGPU_HSA_AGENT);
331}
332
333MCSection *getHSADataGlobalProgramSection(MCContext &Ctx) {
334 return Ctx.getELFSection(".hsadata_global_program", ELF::SHT_PROGBITS,
335 ELF::SHF_ALLOC | ELF::SHF_WRITE |
336 ELF::SHF_AMDGPU_HSA_GLOBAL);
337}
338
Tom Stellard9760f032015-12-03 03:34:32 +0000339MCSection *getHSARodataReadonlyAgentSection(MCContext &Ctx) {
340 return Ctx.getELFSection(".hsarodata_readonly_agent", ELF::SHT_PROGBITS,
341 ELF::SHF_ALLOC | ELF::SHF_AMDGPU_HSA_READONLY |
342 ELF::SHF_AMDGPU_HSA_AGENT);
343}
344
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000345bool isGroupSegment(const GlobalValue *GV) {
346 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
347}
348
Tom Stellard00f2f912015-12-02 19:47:57 +0000349bool isGlobalSegment(const GlobalValue *GV) {
350 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
351}
352
353bool isReadOnlySegment(const GlobalValue *GV) {
354 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
355}
356
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +0000357bool shouldEmitConstantsToTextSection(const Triple &TT) {
358 return TT.getOS() != Triple::AMDHSA;
359}
360
Matt Arsenault83002722016-05-12 02:45:18 +0000361int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
Marek Olsakfccabaf2016-01-13 11:45:36 +0000362 Attribute A = F.getFnAttribute(Name);
Matt Arsenault83002722016-05-12 02:45:18 +0000363 int Result = Default;
Tom Stellardac00eb52015-12-15 16:26:16 +0000364
365 if (A.isStringAttribute()) {
366 StringRef Str = A.getValueAsString();
Marek Olsakfccabaf2016-01-13 11:45:36 +0000367 if (Str.getAsInteger(0, Result)) {
Tom Stellardac00eb52015-12-15 16:26:16 +0000368 LLVMContext &Ctx = F.getContext();
Matt Arsenault83002722016-05-12 02:45:18 +0000369 Ctx.emitError("can't parse integer attribute " + Name);
Tom Stellardac00eb52015-12-15 16:26:16 +0000370 }
371 }
Matt Arsenault83002722016-05-12 02:45:18 +0000372
Marek Olsakfccabaf2016-01-13 11:45:36 +0000373 return Result;
374}
375
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000376std::pair<int, int> getIntegerPairAttribute(const Function &F,
377 StringRef Name,
378 std::pair<int, int> Default,
379 bool OnlyFirstRequired) {
380 Attribute A = F.getFnAttribute(Name);
381 if (!A.isStringAttribute())
382 return Default;
383
384 LLVMContext &Ctx = F.getContext();
385 std::pair<int, int> Ints = Default;
386 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
387 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
388 Ctx.emitError("can't parse first integer attribute " + Name);
389 return Default;
390 }
391 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000392 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000393 Ctx.emitError("can't parse second integer attribute " + Name);
394 return Default;
395 }
396 }
397
398 return Ints;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000399}
400
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000401unsigned getVmcntBitMask(const IsaInfo::IsaVersion &Version) {
402 return (1 << getVmcntBitWidth()) - 1;
403}
404
405unsigned getExpcntBitMask(const IsaInfo::IsaVersion &Version) {
406 return (1 << getExpcntBitWidth()) - 1;
407}
408
409unsigned getLgkmcntBitMask(const IsaInfo::IsaVersion &Version) {
410 return (1 << getLgkmcntBitWidth()) - 1;
411}
412
413unsigned getWaitcntBitMask(const IsaInfo::IsaVersion &Version) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000414 unsigned Vmcnt = getBitMask(getVmcntBitShift(), getVmcntBitWidth());
415 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
416 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
417 return Vmcnt | Expcnt | Lgkmcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000418}
419
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000420unsigned decodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000421 return unpackBits(Waitcnt, getVmcntBitShift(), getVmcntBitWidth());
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000422}
423
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000424unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000425 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
426}
427
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000428unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000429 return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
430}
431
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000432void decodeWaitcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000433 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
434 Vmcnt = decodeVmcnt(Version, Waitcnt);
435 Expcnt = decodeExpcnt(Version, Waitcnt);
436 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
437}
438
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000439unsigned encodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
440 unsigned Vmcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000441 return packBits(Vmcnt, Waitcnt, getVmcntBitShift(), getVmcntBitWidth());
442}
443
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000444unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
445 unsigned Expcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000446 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
447}
448
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000449unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
450 unsigned Lgkmcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000451 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
452}
453
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000454unsigned encodeWaitcnt(const IsaInfo::IsaVersion &Version,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000455 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
Konstantin Zhuravlyov31dbb032017-01-06 17:23:21 +0000456 unsigned Waitcnt = getWaitcntBitMask(Version);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000457 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
458 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
459 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
460 return Waitcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000461}
462
Marek Olsakfccabaf2016-01-13 11:45:36 +0000463unsigned getInitialPSInputAddr(const Function &F) {
464 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
Tom Stellardac00eb52015-12-15 16:26:16 +0000465}
466
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000467bool isShader(CallingConv::ID cc) {
468 switch(cc) {
469 case CallingConv::AMDGPU_VS:
470 case CallingConv::AMDGPU_GS:
471 case CallingConv::AMDGPU_PS:
472 case CallingConv::AMDGPU_CS:
473 return true;
474 default:
475 return false;
476 }
477}
478
479bool isCompute(CallingConv::ID cc) {
480 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
481}
482
Tom Stellard2b65ed32015-12-21 18:44:27 +0000483bool isSI(const MCSubtargetInfo &STI) {
484 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
485}
486
487bool isCI(const MCSubtargetInfo &STI) {
488 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
489}
490
491bool isVI(const MCSubtargetInfo &STI) {
492 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
493}
494
495unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
496
497 switch(Reg) {
498 default: break;
499 case AMDGPU::FLAT_SCR:
500 assert(!isSI(STI));
501 return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi;
502
503 case AMDGPU::FLAT_SCR_LO:
504 assert(!isSI(STI));
505 return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi;
506
507 case AMDGPU::FLAT_SCR_HI:
508 assert(!isSI(STI));
509 return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi;
510 }
511 return Reg;
512}
513
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000514bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000515 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000516 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000517 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
518 OpType <= AMDGPU::OPERAND_SRC_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000519}
520
521bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000522 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000523 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000524 switch (OpType) {
525 case AMDGPU::OPERAND_REG_IMM_FP32:
526 case AMDGPU::OPERAND_REG_IMM_FP64:
527 case AMDGPU::OPERAND_REG_IMM_FP16:
528 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
529 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
530 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
531 return true;
532 default:
533 return false;
534 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000535}
536
537bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000538 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000539 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000540 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
541 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000542}
543
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000544// Avoid using MCRegisterClass::getSize, since that function will go away
545// (move from MC* level to Target* level). Return size in bits.
Tom Stellardb133fbb2016-10-27 23:05:31 +0000546unsigned getRegBitWidth(unsigned RCID) {
547 switch (RCID) {
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000548 case AMDGPU::SGPR_32RegClassID:
549 case AMDGPU::VGPR_32RegClassID:
550 case AMDGPU::VS_32RegClassID:
551 case AMDGPU::SReg_32RegClassID:
552 case AMDGPU::SReg_32_XM0RegClassID:
553 return 32;
554 case AMDGPU::SGPR_64RegClassID:
555 case AMDGPU::VS_64RegClassID:
556 case AMDGPU::SReg_64RegClassID:
557 case AMDGPU::VReg_64RegClassID:
558 return 64;
559 case AMDGPU::VReg_96RegClassID:
560 return 96;
561 case AMDGPU::SGPR_128RegClassID:
562 case AMDGPU::SReg_128RegClassID:
563 case AMDGPU::VReg_128RegClassID:
564 return 128;
565 case AMDGPU::SReg_256RegClassID:
566 case AMDGPU::VReg_256RegClassID:
567 return 256;
568 case AMDGPU::SReg_512RegClassID:
569 case AMDGPU::VReg_512RegClassID:
570 return 512;
571 default:
572 llvm_unreachable("Unexpected register class");
573 }
574}
575
Tom Stellardb133fbb2016-10-27 23:05:31 +0000576unsigned getRegBitWidth(const MCRegisterClass &RC) {
577 return getRegBitWidth(RC.getID());
578}
579
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000580unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
581 unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000582 assert(OpNo < Desc.NumOperands);
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000583 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
584 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000585}
586
Matt Arsenault26faed32016-12-05 22:26:17 +0000587bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000588 if (Literal >= -16 && Literal <= 64)
589 return true;
590
Matt Arsenault26faed32016-12-05 22:26:17 +0000591 uint64_t Val = static_cast<uint64_t>(Literal);
592 return (Val == DoubleToBits(0.0)) ||
593 (Val == DoubleToBits(1.0)) ||
594 (Val == DoubleToBits(-1.0)) ||
595 (Val == DoubleToBits(0.5)) ||
596 (Val == DoubleToBits(-0.5)) ||
597 (Val == DoubleToBits(2.0)) ||
598 (Val == DoubleToBits(-2.0)) ||
599 (Val == DoubleToBits(4.0)) ||
600 (Val == DoubleToBits(-4.0)) ||
601 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000602}
603
Matt Arsenault26faed32016-12-05 22:26:17 +0000604bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000605 if (Literal >= -16 && Literal <= 64)
606 return true;
607
Matt Arsenault4bd72362016-12-10 00:39:12 +0000608 // The actual type of the operand does not seem to matter as long
609 // as the bits match one of the inline immediate values. For example:
610 //
611 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
612 // so it is a legal inline immediate.
613 //
614 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
615 // floating-point, so it is a legal inline immediate.
616
Matt Arsenault26faed32016-12-05 22:26:17 +0000617 uint32_t Val = static_cast<uint32_t>(Literal);
618 return (Val == FloatToBits(0.0f)) ||
619 (Val == FloatToBits(1.0f)) ||
620 (Val == FloatToBits(-1.0f)) ||
621 (Val == FloatToBits(0.5f)) ||
622 (Val == FloatToBits(-0.5f)) ||
623 (Val == FloatToBits(2.0f)) ||
624 (Val == FloatToBits(-2.0f)) ||
625 (Val == FloatToBits(4.0f)) ||
626 (Val == FloatToBits(-4.0f)) ||
627 (Val == 0x3e22f983 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000628}
629
Matt Arsenault4bd72362016-12-10 00:39:12 +0000630bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
Sam Kolton9dffada2017-01-17 15:26:02 +0000631 if (!HasInv2Pi)
632 return false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000633
634 if (Literal >= -16 && Literal <= 64)
635 return true;
636
637 uint16_t Val = static_cast<uint16_t>(Literal);
638 return Val == 0x3C00 || // 1.0
639 Val == 0xBC00 || // -1.0
640 Val == 0x3800 || // 0.5
641 Val == 0xB800 || // -0.5
642 Val == 0x4000 || // 2.0
643 Val == 0xC000 || // -2.0
644 Val == 0x4400 || // 4.0
645 Val == 0xC400 || // -4.0
646 Val == 0x3118; // 1/2pi
647}
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000648
Tom Stellard08efb7e2017-01-27 18:41:14 +0000649bool isUniformMMO(const MachineMemOperand *MMO) {
650 const Value *Ptr = MMO->getValue();
651 // UndefValue means this is a load of a kernel input. These are uniform.
652 // Sometimes LDS instructions have constant pointers.
653 // If Ptr is null, then that means this mem operand contains a
654 // PseudoSourceValue like GOT.
655 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
656 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
657 return true;
658
659 const Instruction *I = dyn_cast<Instruction>(Ptr);
660 return I && I->getMetadata("amdgpu.uniform");
661}
662
663int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
664 if (isSI(ST) || isCI(ST))
665 return ByteOffset >> 2;
666
667 return ByteOffset;
668}
669
670bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
671 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
672 return isSI(ST) || isCI(ST) ? isUInt<8>(EncodedOffset) :
673 isUInt<20>(EncodedOffset);
674}
675
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000676} // end namespace AMDGPU
677} // end namespace llvm