blob: 8ed0522c71ded51c421a49e0b93ecb3d83ec6585 [file] [log] [blame]
Eugene Zelenkod96089b2017-02-14 00:33:36 +00001//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
Tom Stellard347ac792015-06-26 21:15:07 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
Eugene Zelenkod96089b2017-02-14 00:33:36 +00009
Eugene Zelenkod96089b2017-02-14 00:33:36 +000010#include "AMDGPUBaseInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000011#include "AMDGPU.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000012#include "SIDefines.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000013#include "llvm/ADT/StringRef.h"
14#include "llvm/ADT/Triple.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000015#include "llvm/BinaryFormat/ELF.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000016#include "llvm/CodeGen/MachineMemOperand.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000017#include "llvm/IR/Attributes.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000018#include "llvm/IR/Constants.h"
Tom Stellardac00eb52015-12-15 16:26:16 +000019#include "llvm/IR/Function.h"
Tom Stellarde3b5aea2015-12-02 17:00:42 +000020#include "llvm/IR/GlobalValue.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000021#include "llvm/IR/Instruction.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/LLVMContext.h"
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000023#include "llvm/IR/Module.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000024#include "llvm/MC/MCContext.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000025#include "llvm/MC/MCInstrDesc.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000026#include "llvm/MC/MCRegisterInfo.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000027#include "llvm/MC/MCSectionELF.h"
Tom Stellard2b65ed32015-12-21 18:44:27 +000028#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard347ac792015-06-26 21:15:07 +000029#include "llvm/MC/SubtargetFeature.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000030#include "llvm/Support/Casting.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000031#include "llvm/Support/ErrorHandling.h"
32#include "llvm/Support/MathExtras.h"
33#include <algorithm>
34#include <cassert>
35#include <cstdint>
36#include <cstring>
37#include <utility>
Tom Stellard347ac792015-06-26 21:15:07 +000038
Matt Arsenault678e1112017-04-10 17:58:06 +000039#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000040
Sam Koltona3ec5c12016-10-07 14:46:06 +000041#define GET_INSTRINFO_NAMED_OPS
Sam Koltona3ec5c12016-10-07 14:46:06 +000042#include "AMDGPUGenInstrInfo.inc"
43#undef GET_INSTRINFO_NAMED_OPS
Sam Koltona3ec5c12016-10-07 14:46:06 +000044
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000045namespace {
46
47/// \returns Bit mask for given bit \p Shift and bit \p Width.
48unsigned getBitMask(unsigned Shift, unsigned Width) {
49 return ((1 << Width) - 1) << Shift;
50}
51
52/// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
53///
54/// \returns Packed \p Dst.
55unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
56 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
57 Dst |= (Src << Shift) & getBitMask(Shift, Width);
58 return Dst;
59}
60
61/// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
62///
63/// \returns Unpacked bits.
64unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
65 return (Src & getBitMask(Shift, Width)) >> Shift;
66}
67
Matt Arsenaulte823d922017-02-18 18:29:53 +000068/// \returns Vmcnt bit shift (lower bits).
69unsigned getVmcntBitShiftLo() { return 0; }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000070
Matt Arsenaulte823d922017-02-18 18:29:53 +000071/// \returns Vmcnt bit width (lower bits).
72unsigned getVmcntBitWidthLo() { return 4; }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000073
74/// \returns Expcnt bit shift.
75unsigned getExpcntBitShift() { return 4; }
76
77/// \returns Expcnt bit width.
78unsigned getExpcntBitWidth() { return 3; }
79
80/// \returns Lgkmcnt bit shift.
81unsigned getLgkmcntBitShift() { return 8; }
82
83/// \returns Lgkmcnt bit width.
84unsigned getLgkmcntBitWidth() { return 4; }
85
Matt Arsenaulte823d922017-02-18 18:29:53 +000086/// \returns Vmcnt bit shift (higher bits).
87unsigned getVmcntBitShiftHi() { return 14; }
88
89/// \returns Vmcnt bit width (higher bits).
90unsigned getVmcntBitWidthHi() { return 2; }
91
Eugene Zelenkod96089b2017-02-14 00:33:36 +000092} // end namespace anonymous
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000093
Tom Stellard347ac792015-06-26 21:15:07 +000094namespace llvm {
Konstantin Zhuravlyov3d1cc882017-04-21 19:45:22 +000095
96static cl::opt<bool> EnablePackedInlinableLiterals(
97 "enable-packed-inlinable-literals",
98 cl::desc("Enable packed inlinable literals (v2f16, v2i16)"),
99 cl::init(false));
100
Tom Stellard347ac792015-06-26 21:15:07 +0000101namespace AMDGPU {
102
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000103namespace IsaInfo {
Tom Stellard347ac792015-06-26 21:15:07 +0000104
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000105IsaVersion getIsaVersion(const FeatureBitset &Features) {
106 // CI.
Tom Stellard347ac792015-06-26 21:15:07 +0000107 if (Features.test(FeatureISAVersion7_0_0))
108 return {7, 0, 0};
Tom Stellard347ac792015-06-26 21:15:07 +0000109 if (Features.test(FeatureISAVersion7_0_1))
110 return {7, 0, 1};
Yaxun Liu94add852016-10-26 16:37:56 +0000111 if (Features.test(FeatureISAVersion7_0_2))
112 return {7, 0, 2};
113
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000114 // VI.
Tom Stellard347ac792015-06-26 21:15:07 +0000115 if (Features.test(FeatureISAVersion8_0_0))
116 return {8, 0, 0};
Tom Stellard347ac792015-06-26 21:15:07 +0000117 if (Features.test(FeatureISAVersion8_0_1))
118 return {8, 0, 1};
Changpeng Fang98317d22016-10-11 16:00:47 +0000119 if (Features.test(FeatureISAVersion8_0_2))
120 return {8, 0, 2};
Changpeng Fangc16be002016-01-13 20:39:25 +0000121 if (Features.test(FeatureISAVersion8_0_3))
122 return {8, 0, 3};
Yaxun Liu94add852016-10-26 16:37:56 +0000123 if (Features.test(FeatureISAVersion8_0_4))
124 return {8, 0, 4};
Yaxun Liu94add852016-10-26 16:37:56 +0000125 if (Features.test(FeatureISAVersion8_1_0))
126 return {8, 1, 0};
127
Matt Arsenaulte823d922017-02-18 18:29:53 +0000128 // GFX9.
129 if (Features.test(FeatureISAVersion9_0_0))
130 return {9, 0, 0};
131 if (Features.test(FeatureISAVersion9_0_1))
132 return {9, 0, 1};
133
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000134 if (!Features.test(FeatureGCN) || Features.test(FeatureSouthernIslands))
135 return {0, 0, 0};
136 return {7, 0, 0};
Tom Stellard347ac792015-06-26 21:15:07 +0000137}
138
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000139unsigned getWavefrontSize(const FeatureBitset &Features) {
140 if (Features.test(FeatureWavefrontSize16))
141 return 16;
142 if (Features.test(FeatureWavefrontSize32))
143 return 32;
144
145 return 64;
146}
147
148unsigned getLocalMemorySize(const FeatureBitset &Features) {
149 if (Features.test(FeatureLocalMemorySize32768))
150 return 32768;
151 if (Features.test(FeatureLocalMemorySize65536))
152 return 65536;
153
154 return 0;
155}
156
157unsigned getEUsPerCU(const FeatureBitset &Features) {
158 return 4;
159}
160
161unsigned getMaxWorkGroupsPerCU(const FeatureBitset &Features,
162 unsigned FlatWorkGroupSize) {
163 if (!Features.test(FeatureGCN))
164 return 8;
Stanislav Mekhanoshin19f98c62017-02-15 01:03:59 +0000165 unsigned N = getWavesPerWorkGroup(Features, FlatWorkGroupSize);
166 if (N == 1)
167 return 40;
168 N = 40 / N;
169 return std::min(N, 16u);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000170}
171
172unsigned getMaxWavesPerCU(const FeatureBitset &Features) {
173 return getMaxWavesPerEU(Features) * getEUsPerCU(Features);
174}
175
176unsigned getMaxWavesPerCU(const FeatureBitset &Features,
177 unsigned FlatWorkGroupSize) {
178 return getWavesPerWorkGroup(Features, FlatWorkGroupSize);
179}
180
181unsigned getMinWavesPerEU(const FeatureBitset &Features) {
182 return 1;
183}
184
185unsigned getMaxWavesPerEU(const FeatureBitset &Features) {
186 if (!Features.test(FeatureGCN))
187 return 8;
188 // FIXME: Need to take scratch memory into account.
189 return 10;
190}
191
192unsigned getMaxWavesPerEU(const FeatureBitset &Features,
193 unsigned FlatWorkGroupSize) {
194 return alignTo(getMaxWavesPerCU(Features, FlatWorkGroupSize),
195 getEUsPerCU(Features)) / getEUsPerCU(Features);
196}
197
198unsigned getMinFlatWorkGroupSize(const FeatureBitset &Features) {
199 return 1;
200}
201
202unsigned getMaxFlatWorkGroupSize(const FeatureBitset &Features) {
203 return 2048;
204}
205
206unsigned getWavesPerWorkGroup(const FeatureBitset &Features,
207 unsigned FlatWorkGroupSize) {
208 return alignTo(FlatWorkGroupSize, getWavefrontSize(Features)) /
209 getWavefrontSize(Features);
210}
211
212unsigned getSGPRAllocGranule(const FeatureBitset &Features) {
213 IsaVersion Version = getIsaVersion(Features);
214 if (Version.Major >= 8)
215 return 16;
216 return 8;
217}
218
219unsigned getSGPREncodingGranule(const FeatureBitset &Features) {
220 return 8;
221}
222
223unsigned getTotalNumSGPRs(const FeatureBitset &Features) {
224 IsaVersion Version = getIsaVersion(Features);
225 if (Version.Major >= 8)
226 return 800;
227 return 512;
228}
229
230unsigned getAddressableNumSGPRs(const FeatureBitset &Features) {
231 if (Features.test(FeatureSGPRInitBug))
232 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
233
234 IsaVersion Version = getIsaVersion(Features);
235 if (Version.Major >= 8)
236 return 102;
237 return 104;
238}
239
240unsigned getMinNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000241 assert(WavesPerEU != 0);
242
243 if (WavesPerEU >= getMaxWavesPerEU(Features))
244 return 0;
245 unsigned MinNumSGPRs =
246 alignDown(getTotalNumSGPRs(Features) / (WavesPerEU + 1),
247 getSGPRAllocGranule(Features)) + 1;
248 return std::min(MinNumSGPRs, getAddressableNumSGPRs(Features));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000249}
250
251unsigned getMaxNumSGPRs(const FeatureBitset &Features, unsigned WavesPerEU,
252 bool Addressable) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000253 assert(WavesPerEU != 0);
254
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000255 IsaVersion Version = getIsaVersion(Features);
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000256 unsigned MaxNumSGPRs = alignDown(getTotalNumSGPRs(Features) / WavesPerEU,
257 getSGPRAllocGranule(Features));
258 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(Features);
259 if (Version.Major >= 8 && !Addressable)
260 AddressableNumSGPRs = 112;
261 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000262}
263
264unsigned getVGPRAllocGranule(const FeatureBitset &Features) {
265 return 4;
266}
267
268unsigned getVGPREncodingGranule(const FeatureBitset &Features) {
269 return getVGPRAllocGranule(Features);
270}
271
272unsigned getTotalNumVGPRs(const FeatureBitset &Features) {
273 return 256;
274}
275
276unsigned getAddressableNumVGPRs(const FeatureBitset &Features) {
277 return getTotalNumVGPRs(Features);
278}
279
280unsigned getMinNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000281 assert(WavesPerEU != 0);
282
283 if (WavesPerEU >= getMaxWavesPerEU(Features))
284 return 0;
285 unsigned MinNumVGPRs =
286 alignDown(getTotalNumVGPRs(Features) / (WavesPerEU + 1),
287 getVGPRAllocGranule(Features)) + 1;
288 return std::min(MinNumVGPRs, getAddressableNumVGPRs(Features));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000289}
290
291unsigned getMaxNumVGPRs(const FeatureBitset &Features, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000292 assert(WavesPerEU != 0);
293
294 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(Features) / WavesPerEU,
295 getVGPRAllocGranule(Features));
296 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(Features);
297 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000298}
299
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000300} // end namespace IsaInfo
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000301
Tom Stellardff7416b2015-06-26 21:58:31 +0000302void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
303 const FeatureBitset &Features) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000304 IsaInfo::IsaVersion ISA = IsaInfo::getIsaVersion(Features);
Tom Stellardff7416b2015-06-26 21:58:31 +0000305
306 memset(&Header, 0, sizeof(Header));
307
308 Header.amd_kernel_code_version_major = 1;
Konstantin Zhuravlyov182e9cc2017-02-28 17:17:52 +0000309 Header.amd_kernel_code_version_minor = 1;
Tom Stellardff7416b2015-06-26 21:58:31 +0000310 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
311 Header.amd_machine_version_major = ISA.Major;
312 Header.amd_machine_version_minor = ISA.Minor;
313 Header.amd_machine_version_stepping = ISA.Stepping;
314 Header.kernel_code_entry_byte_offset = sizeof(Header);
315 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
316 Header.wavefront_size = 6;
Matt Arsenault5d910192017-01-25 20:21:57 +0000317
318 // If the code object does not support indirect functions, then the value must
319 // be 0xffffffff.
320 Header.call_convention = -1;
321
Tom Stellardff7416b2015-06-26 21:58:31 +0000322 // These alignment values are specified in powers of two, so alignment =
323 // 2^n. The minimum alignment is 2^4 = 16.
324 Header.kernarg_segment_alignment = 4;
325 Header.group_segment_alignment = 4;
326 Header.private_segment_alignment = 4;
327}
328
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000329bool isGroupSegment(const GlobalValue *GV, AMDGPUAS AS) {
330 return GV->getType()->getAddressSpace() == AS.LOCAL_ADDRESS;
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000331}
332
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000333bool isGlobalSegment(const GlobalValue *GV, AMDGPUAS AS) {
334 return GV->getType()->getAddressSpace() == AS.GLOBAL_ADDRESS;
Tom Stellard00f2f912015-12-02 19:47:57 +0000335}
336
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000337bool isReadOnlySegment(const GlobalValue *GV, AMDGPUAS AS) {
338 return GV->getType()->getAddressSpace() == AS.CONSTANT_ADDRESS;
Tom Stellard00f2f912015-12-02 19:47:57 +0000339}
340
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +0000341bool shouldEmitConstantsToTextSection(const Triple &TT) {
342 return TT.getOS() != Triple::AMDHSA;
343}
344
Matt Arsenault83002722016-05-12 02:45:18 +0000345int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
Marek Olsakfccabaf2016-01-13 11:45:36 +0000346 Attribute A = F.getFnAttribute(Name);
Matt Arsenault83002722016-05-12 02:45:18 +0000347 int Result = Default;
Tom Stellardac00eb52015-12-15 16:26:16 +0000348
349 if (A.isStringAttribute()) {
350 StringRef Str = A.getValueAsString();
Marek Olsakfccabaf2016-01-13 11:45:36 +0000351 if (Str.getAsInteger(0, Result)) {
Tom Stellardac00eb52015-12-15 16:26:16 +0000352 LLVMContext &Ctx = F.getContext();
Matt Arsenault83002722016-05-12 02:45:18 +0000353 Ctx.emitError("can't parse integer attribute " + Name);
Tom Stellardac00eb52015-12-15 16:26:16 +0000354 }
355 }
Matt Arsenault83002722016-05-12 02:45:18 +0000356
Marek Olsakfccabaf2016-01-13 11:45:36 +0000357 return Result;
358}
359
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000360std::pair<int, int> getIntegerPairAttribute(const Function &F,
361 StringRef Name,
362 std::pair<int, int> Default,
363 bool OnlyFirstRequired) {
364 Attribute A = F.getFnAttribute(Name);
365 if (!A.isStringAttribute())
366 return Default;
367
368 LLVMContext &Ctx = F.getContext();
369 std::pair<int, int> Ints = Default;
370 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
371 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
372 Ctx.emitError("can't parse first integer attribute " + Name);
373 return Default;
374 }
375 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000376 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000377 Ctx.emitError("can't parse second integer attribute " + Name);
378 return Default;
379 }
380 }
381
382 return Ints;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000383}
384
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000385unsigned getVmcntBitMask(const IsaInfo::IsaVersion &Version) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000386 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
387 if (Version.Major < 9)
388 return VmcntLo;
389
390 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
391 return VmcntLo | VmcntHi;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000392}
393
394unsigned getExpcntBitMask(const IsaInfo::IsaVersion &Version) {
395 return (1 << getExpcntBitWidth()) - 1;
396}
397
398unsigned getLgkmcntBitMask(const IsaInfo::IsaVersion &Version) {
399 return (1 << getLgkmcntBitWidth()) - 1;
400}
401
402unsigned getWaitcntBitMask(const IsaInfo::IsaVersion &Version) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000403 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000404 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
405 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
Matt Arsenaulte823d922017-02-18 18:29:53 +0000406 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
407 if (Version.Major < 9)
408 return Waitcnt;
409
410 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
411 return Waitcnt | VmcntHi;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000412}
413
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000414unsigned decodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000415 unsigned VmcntLo =
416 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
417 if (Version.Major < 9)
418 return VmcntLo;
419
420 unsigned VmcntHi =
421 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
422 VmcntHi <<= getVmcntBitWidthLo();
423 return VmcntLo | VmcntHi;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000424}
425
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000426unsigned decodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000427 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
428}
429
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000430unsigned decodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000431 return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
432}
433
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000434void decodeWaitcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000435 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
436 Vmcnt = decodeVmcnt(Version, Waitcnt);
437 Expcnt = decodeExpcnt(Version, Waitcnt);
438 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
439}
440
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000441unsigned encodeVmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
442 unsigned Vmcnt) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000443 Waitcnt =
444 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
445 if (Version.Major < 9)
446 return Waitcnt;
447
448 Vmcnt >>= getVmcntBitWidthLo();
449 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000450}
451
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000452unsigned encodeExpcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
453 unsigned Expcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000454 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
455}
456
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000457unsigned encodeLgkmcnt(const IsaInfo::IsaVersion &Version, unsigned Waitcnt,
458 unsigned Lgkmcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000459 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
460}
461
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000462unsigned encodeWaitcnt(const IsaInfo::IsaVersion &Version,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000463 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
Konstantin Zhuravlyov31dbb032017-01-06 17:23:21 +0000464 unsigned Waitcnt = getWaitcntBitMask(Version);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000465 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
466 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
467 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
468 return Waitcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000469}
470
Marek Olsakfccabaf2016-01-13 11:45:36 +0000471unsigned getInitialPSInputAddr(const Function &F) {
472 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
Tom Stellardac00eb52015-12-15 16:26:16 +0000473}
474
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000475bool isShader(CallingConv::ID cc) {
476 switch(cc) {
477 case CallingConv::AMDGPU_VS:
Marek Olsaka302a7362017-05-02 15:41:10 +0000478 case CallingConv::AMDGPU_HS:
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000479 case CallingConv::AMDGPU_GS:
480 case CallingConv::AMDGPU_PS:
481 case CallingConv::AMDGPU_CS:
482 return true;
483 default:
484 return false;
485 }
486}
487
488bool isCompute(CallingConv::ID cc) {
489 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
490}
491
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000492bool isEntryFunctionCC(CallingConv::ID CC) {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000493 switch (CC) {
494 case CallingConv::AMDGPU_KERNEL:
495 case CallingConv::SPIR_KERNEL:
496 case CallingConv::AMDGPU_VS:
497 case CallingConv::AMDGPU_GS:
498 case CallingConv::AMDGPU_PS:
499 case CallingConv::AMDGPU_CS:
500 case CallingConv::AMDGPU_HS:
501 return true;
502 default:
503 return false;
504 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000505}
506
Tom Stellard2b65ed32015-12-21 18:44:27 +0000507bool isSI(const MCSubtargetInfo &STI) {
508 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
509}
510
511bool isCI(const MCSubtargetInfo &STI) {
512 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
513}
514
515bool isVI(const MCSubtargetInfo &STI) {
516 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
517}
518
Sam Koltonf7659d712017-05-23 10:08:55 +0000519bool isGFX9(const MCSubtargetInfo &STI) {
520 return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
521}
522
523bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
524 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
525 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
526 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
527 Reg == AMDGPU::SCC;
528}
529
Tom Stellard2b65ed32015-12-21 18:44:27 +0000530unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
531
532 switch(Reg) {
533 default: break;
534 case AMDGPU::FLAT_SCR:
535 assert(!isSI(STI));
536 return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi;
537
538 case AMDGPU::FLAT_SCR_LO:
539 assert(!isSI(STI));
540 return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi;
541
542 case AMDGPU::FLAT_SCR_HI:
543 assert(!isSI(STI));
544 return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi;
545 }
546 return Reg;
547}
548
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000549unsigned mc2PseudoReg(unsigned Reg) {
550 switch (Reg) {
551 case AMDGPU::FLAT_SCR_ci:
552 case AMDGPU::FLAT_SCR_vi:
553 return FLAT_SCR;
554
555 case AMDGPU::FLAT_SCR_LO_ci:
556 case AMDGPU::FLAT_SCR_LO_vi:
557 return AMDGPU::FLAT_SCR_LO;
558
559 case AMDGPU::FLAT_SCR_HI_ci:
560 case AMDGPU::FLAT_SCR_HI_vi:
561 return AMDGPU::FLAT_SCR_HI;
562
563 default:
564 return Reg;
565 }
566}
567
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000568bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000569 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000570 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000571 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
572 OpType <= AMDGPU::OPERAND_SRC_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000573}
574
575bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000576 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000577 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000578 switch (OpType) {
579 case AMDGPU::OPERAND_REG_IMM_FP32:
580 case AMDGPU::OPERAND_REG_IMM_FP64:
581 case AMDGPU::OPERAND_REG_IMM_FP16:
582 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
583 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
584 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000585 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000586 return true;
587 default:
588 return false;
589 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000590}
591
592bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000593 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000594 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000595 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
596 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000597}
598
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000599// Avoid using MCRegisterClass::getSize, since that function will go away
600// (move from MC* level to Target* level). Return size in bits.
Tom Stellardb133fbb2016-10-27 23:05:31 +0000601unsigned getRegBitWidth(unsigned RCID) {
602 switch (RCID) {
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000603 case AMDGPU::SGPR_32RegClassID:
604 case AMDGPU::VGPR_32RegClassID:
605 case AMDGPU::VS_32RegClassID:
606 case AMDGPU::SReg_32RegClassID:
607 case AMDGPU::SReg_32_XM0RegClassID:
608 return 32;
609 case AMDGPU::SGPR_64RegClassID:
610 case AMDGPU::VS_64RegClassID:
611 case AMDGPU::SReg_64RegClassID:
612 case AMDGPU::VReg_64RegClassID:
613 return 64;
614 case AMDGPU::VReg_96RegClassID:
615 return 96;
616 case AMDGPU::SGPR_128RegClassID:
617 case AMDGPU::SReg_128RegClassID:
618 case AMDGPU::VReg_128RegClassID:
619 return 128;
620 case AMDGPU::SReg_256RegClassID:
621 case AMDGPU::VReg_256RegClassID:
622 return 256;
623 case AMDGPU::SReg_512RegClassID:
624 case AMDGPU::VReg_512RegClassID:
625 return 512;
626 default:
627 llvm_unreachable("Unexpected register class");
628 }
629}
630
Tom Stellardb133fbb2016-10-27 23:05:31 +0000631unsigned getRegBitWidth(const MCRegisterClass &RC) {
632 return getRegBitWidth(RC.getID());
633}
634
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000635unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
636 unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000637 assert(OpNo < Desc.NumOperands);
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000638 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
639 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000640}
641
Matt Arsenault26faed32016-12-05 22:26:17 +0000642bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000643 if (Literal >= -16 && Literal <= 64)
644 return true;
645
Matt Arsenault26faed32016-12-05 22:26:17 +0000646 uint64_t Val = static_cast<uint64_t>(Literal);
647 return (Val == DoubleToBits(0.0)) ||
648 (Val == DoubleToBits(1.0)) ||
649 (Val == DoubleToBits(-1.0)) ||
650 (Val == DoubleToBits(0.5)) ||
651 (Val == DoubleToBits(-0.5)) ||
652 (Val == DoubleToBits(2.0)) ||
653 (Val == DoubleToBits(-2.0)) ||
654 (Val == DoubleToBits(4.0)) ||
655 (Val == DoubleToBits(-4.0)) ||
656 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000657}
658
Matt Arsenault26faed32016-12-05 22:26:17 +0000659bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000660 if (Literal >= -16 && Literal <= 64)
661 return true;
662
Matt Arsenault4bd72362016-12-10 00:39:12 +0000663 // The actual type of the operand does not seem to matter as long
664 // as the bits match one of the inline immediate values. For example:
665 //
666 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
667 // so it is a legal inline immediate.
668 //
669 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
670 // floating-point, so it is a legal inline immediate.
671
Matt Arsenault26faed32016-12-05 22:26:17 +0000672 uint32_t Val = static_cast<uint32_t>(Literal);
673 return (Val == FloatToBits(0.0f)) ||
674 (Val == FloatToBits(1.0f)) ||
675 (Val == FloatToBits(-1.0f)) ||
676 (Val == FloatToBits(0.5f)) ||
677 (Val == FloatToBits(-0.5f)) ||
678 (Val == FloatToBits(2.0f)) ||
679 (Val == FloatToBits(-2.0f)) ||
680 (Val == FloatToBits(4.0f)) ||
681 (Val == FloatToBits(-4.0f)) ||
682 (Val == 0x3e22f983 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000683}
684
Matt Arsenault4bd72362016-12-10 00:39:12 +0000685bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
Sam Kolton9dffada2017-01-17 15:26:02 +0000686 if (!HasInv2Pi)
687 return false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000688
689 if (Literal >= -16 && Literal <= 64)
690 return true;
691
692 uint16_t Val = static_cast<uint16_t>(Literal);
693 return Val == 0x3C00 || // 1.0
694 Val == 0xBC00 || // -1.0
695 Val == 0x3800 || // 0.5
696 Val == 0xB800 || // -0.5
697 Val == 0x4000 || // 2.0
698 Val == 0xC000 || // -2.0
699 Val == 0x4400 || // 4.0
700 Val == 0xC400 || // -4.0
701 Val == 0x3118; // 1/2pi
702}
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000703
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000704bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
705 assert(HasInv2Pi);
706
Konstantin Zhuravlyov3d1cc882017-04-21 19:45:22 +0000707 if (!EnablePackedInlinableLiterals)
708 return false;
709
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000710 int16_t Lo16 = static_cast<int16_t>(Literal);
711 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
712 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
713}
714
Tom Stellard08efb7e2017-01-27 18:41:14 +0000715bool isUniformMMO(const MachineMemOperand *MMO) {
716 const Value *Ptr = MMO->getValue();
717 // UndefValue means this is a load of a kernel input. These are uniform.
718 // Sometimes LDS instructions have constant pointers.
719 // If Ptr is null, then that means this mem operand contains a
720 // PseudoSourceValue like GOT.
721 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
722 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
723 return true;
724
725 const Instruction *I = dyn_cast<Instruction>(Ptr);
726 return I && I->getMetadata("amdgpu.uniform");
727}
728
729int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
730 if (isSI(ST) || isCI(ST))
731 return ByteOffset >> 2;
732
733 return ByteOffset;
734}
735
736bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
737 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
Marek Olsak8973a0a2017-05-24 14:53:50 +0000738 return isSI(ST) || isCI(ST) ? isUInt<8>(EncodedOffset) :
Tom Stellard08efb7e2017-01-27 18:41:14 +0000739 isUInt<20>(EncodedOffset);
740}
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000741} // end namespace AMDGPU
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000742
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000743} // end namespace llvm
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000744
745const unsigned AMDGPUAS::MAX_COMMON_ADDRESS;
746const unsigned AMDGPUAS::GLOBAL_ADDRESS;
747const unsigned AMDGPUAS::LOCAL_ADDRESS;
748const unsigned AMDGPUAS::PARAM_D_ADDRESS;
749const unsigned AMDGPUAS::PARAM_I_ADDRESS;
750const unsigned AMDGPUAS::CONSTANT_BUFFER_0;
751const unsigned AMDGPUAS::CONSTANT_BUFFER_1;
752const unsigned AMDGPUAS::CONSTANT_BUFFER_2;
753const unsigned AMDGPUAS::CONSTANT_BUFFER_3;
754const unsigned AMDGPUAS::CONSTANT_BUFFER_4;
755const unsigned AMDGPUAS::CONSTANT_BUFFER_5;
756const unsigned AMDGPUAS::CONSTANT_BUFFER_6;
757const unsigned AMDGPUAS::CONSTANT_BUFFER_7;
758const unsigned AMDGPUAS::CONSTANT_BUFFER_8;
759const unsigned AMDGPUAS::CONSTANT_BUFFER_9;
760const unsigned AMDGPUAS::CONSTANT_BUFFER_10;
761const unsigned AMDGPUAS::CONSTANT_BUFFER_11;
762const unsigned AMDGPUAS::CONSTANT_BUFFER_12;
763const unsigned AMDGPUAS::CONSTANT_BUFFER_13;
764const unsigned AMDGPUAS::CONSTANT_BUFFER_14;
765const unsigned AMDGPUAS::CONSTANT_BUFFER_15;
766const unsigned AMDGPUAS::UNKNOWN_ADDRESS_SPACE;
767
768namespace llvm {
769namespace AMDGPU {
770
771AMDGPUAS getAMDGPUAS(Triple T) {
772 auto Env = T.getEnvironmentName();
773 AMDGPUAS AS;
774 if (Env == "amdgiz" || Env == "amdgizcl") {
775 AS.FLAT_ADDRESS = 0;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000776 AS.PRIVATE_ADDRESS = 5;
Yaxun Liu76ae47c2017-04-06 19:17:32 +0000777 AS.REGION_ADDRESS = 4;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000778 }
779 else {
780 AS.FLAT_ADDRESS = 4;
Yaxun Liu1a14bfa2017-03-27 14:04:01 +0000781 AS.PRIVATE_ADDRESS = 0;
782 AS.REGION_ADDRESS = 5;
783 }
784 return AS;
785}
786
787AMDGPUAS getAMDGPUAS(const TargetMachine &M) {
788 return getAMDGPUAS(M.getTargetTriple());
789}
790
791AMDGPUAS getAMDGPUAS(const Module &M) {
792 return getAMDGPUAS(Triple(M.getTargetTriple()));
793}
794} // namespace AMDGPU
795} // namespace llvm