blob: e4eaaba9ec90c6de450ae7bc46f89d65db54c07b [file] [log] [blame]
Tom Stellard347ac792015-06-26 21:15:07 +00001//===-- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information--------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9#include "AMDGPUBaseInfo.h"
Tom Stellarde3b5aea2015-12-02 17:00:42 +000010#include "AMDGPU.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000011#include "SIDefines.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000012#include "llvm/CodeGen/MachineMemOperand.h"
Tom Stellardac00eb52015-12-15 16:26:16 +000013#include "llvm/IR/LLVMContext.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000014#include "llvm/IR/Constants.h"
Tom Stellardac00eb52015-12-15 16:26:16 +000015#include "llvm/IR/Function.h"
Tom Stellarde3b5aea2015-12-02 17:00:42 +000016#include "llvm/IR/GlobalValue.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000017#include "llvm/MC/MCContext.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000018#include "llvm/MC/MCInstrInfo.h"
19#include "llvm/MC/MCRegisterInfo.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000020#include "llvm/MC/MCSectionELF.h"
Tom Stellard2b65ed32015-12-21 18:44:27 +000021#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard347ac792015-06-26 21:15:07 +000022#include "llvm/MC/SubtargetFeature.h"
23
24#define GET_SUBTARGETINFO_ENUM
25#include "AMDGPUGenSubtargetInfo.inc"
26#undef GET_SUBTARGETINFO_ENUM
27
Tom Stellard2b65ed32015-12-21 18:44:27 +000028#define GET_REGINFO_ENUM
29#include "AMDGPUGenRegisterInfo.inc"
30#undef GET_REGINFO_ENUM
31
Sam Koltona3ec5c12016-10-07 14:46:06 +000032#define GET_INSTRINFO_NAMED_OPS
33#define GET_INSTRINFO_ENUM
34#include "AMDGPUGenInstrInfo.inc"
35#undef GET_INSTRINFO_NAMED_OPS
36#undef GET_INSTRINFO_ENUM
37
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000038namespace {
39
40/// \returns Bit mask for given bit \p Shift and bit \p Width.
41unsigned getBitMask(unsigned Shift, unsigned Width) {
42 return ((1 << Width) - 1) << Shift;
43}
44
45/// \brief Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
46///
47/// \returns Packed \p Dst.
48unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
49 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
50 Dst |= (Src << Shift) & getBitMask(Shift, Width);
51 return Dst;
52}
53
54/// \brief Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
55///
56/// \returns Unpacked bits.
57unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
58 return (Src & getBitMask(Shift, Width)) >> Shift;
59}
60
61/// \returns Vmcnt bit shift.
62unsigned getVmcntBitShift() { return 0; }
63
64/// \returns Vmcnt bit width.
65unsigned getVmcntBitWidth() { return 4; }
66
67/// \returns Expcnt bit shift.
68unsigned getExpcntBitShift() { return 4; }
69
70/// \returns Expcnt bit width.
71unsigned getExpcntBitWidth() { return 3; }
72
73/// \returns Lgkmcnt bit shift.
74unsigned getLgkmcntBitShift() { return 8; }
75
76/// \returns Lgkmcnt bit width.
77unsigned getLgkmcntBitWidth() { return 4; }
78
79} // anonymous namespace
80
Tom Stellard347ac792015-06-26 21:15:07 +000081namespace llvm {
82namespace AMDGPU {
83
84IsaVersion getIsaVersion(const FeatureBitset &Features) {
85
86 if (Features.test(FeatureISAVersion7_0_0))
87 return {7, 0, 0};
88
89 if (Features.test(FeatureISAVersion7_0_1))
90 return {7, 0, 1};
91
Yaxun Liu94add852016-10-26 16:37:56 +000092 if (Features.test(FeatureISAVersion7_0_2))
93 return {7, 0, 2};
94
Tom Stellard347ac792015-06-26 21:15:07 +000095 if (Features.test(FeatureISAVersion8_0_0))
96 return {8, 0, 0};
97
98 if (Features.test(FeatureISAVersion8_0_1))
99 return {8, 0, 1};
100
Changpeng Fang98317d22016-10-11 16:00:47 +0000101 if (Features.test(FeatureISAVersion8_0_2))
102 return {8, 0, 2};
103
Changpeng Fangc16be002016-01-13 20:39:25 +0000104 if (Features.test(FeatureISAVersion8_0_3))
105 return {8, 0, 3};
106
Yaxun Liu94add852016-10-26 16:37:56 +0000107 if (Features.test(FeatureISAVersion8_0_4))
108 return {8, 0, 4};
109
110 if (Features.test(FeatureISAVersion8_1_0))
111 return {8, 1, 0};
112
Tom Stellard347ac792015-06-26 21:15:07 +0000113 return {0, 0, 0};
114}
115
Tom Stellardff7416b2015-06-26 21:58:31 +0000116void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
117 const FeatureBitset &Features) {
118
119 IsaVersion ISA = getIsaVersion(Features);
120
121 memset(&Header, 0, sizeof(Header));
122
123 Header.amd_kernel_code_version_major = 1;
124 Header.amd_kernel_code_version_minor = 0;
125 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
126 Header.amd_machine_version_major = ISA.Major;
127 Header.amd_machine_version_minor = ISA.Minor;
128 Header.amd_machine_version_stepping = ISA.Stepping;
129 Header.kernel_code_entry_byte_offset = sizeof(Header);
130 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
131 Header.wavefront_size = 6;
Matt Arsenault5d910192017-01-25 20:21:57 +0000132
133 // If the code object does not support indirect functions, then the value must
134 // be 0xffffffff.
135 Header.call_convention = -1;
136
Tom Stellardff7416b2015-06-26 21:58:31 +0000137 // These alignment values are specified in powers of two, so alignment =
138 // 2^n. The minimum alignment is 2^4 = 16.
139 Header.kernarg_segment_alignment = 4;
140 Header.group_segment_alignment = 4;
141 Header.private_segment_alignment = 4;
142}
143
Tom Stellarde135ffd2015-09-25 21:41:28 +0000144MCSection *getHSATextSection(MCContext &Ctx) {
145 return Ctx.getELFSection(".hsatext", ELF::SHT_PROGBITS,
146 ELF::SHF_ALLOC | ELF::SHF_WRITE |
147 ELF::SHF_EXECINSTR |
148 ELF::SHF_AMDGPU_HSA_AGENT |
149 ELF::SHF_AMDGPU_HSA_CODE);
150}
151
Tom Stellard00f2f912015-12-02 19:47:57 +0000152MCSection *getHSADataGlobalAgentSection(MCContext &Ctx) {
153 return Ctx.getELFSection(".hsadata_global_agent", ELF::SHT_PROGBITS,
154 ELF::SHF_ALLOC | ELF::SHF_WRITE |
155 ELF::SHF_AMDGPU_HSA_GLOBAL |
156 ELF::SHF_AMDGPU_HSA_AGENT);
157}
158
159MCSection *getHSADataGlobalProgramSection(MCContext &Ctx) {
160 return Ctx.getELFSection(".hsadata_global_program", ELF::SHT_PROGBITS,
161 ELF::SHF_ALLOC | ELF::SHF_WRITE |
162 ELF::SHF_AMDGPU_HSA_GLOBAL);
163}
164
Tom Stellard9760f032015-12-03 03:34:32 +0000165MCSection *getHSARodataReadonlyAgentSection(MCContext &Ctx) {
166 return Ctx.getELFSection(".hsarodata_readonly_agent", ELF::SHT_PROGBITS,
167 ELF::SHF_ALLOC | ELF::SHF_AMDGPU_HSA_READONLY |
168 ELF::SHF_AMDGPU_HSA_AGENT);
169}
170
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000171bool isGroupSegment(const GlobalValue *GV) {
172 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
173}
174
Tom Stellard00f2f912015-12-02 19:47:57 +0000175bool isGlobalSegment(const GlobalValue *GV) {
176 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
177}
178
179bool isReadOnlySegment(const GlobalValue *GV) {
180 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS;
181}
182
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +0000183bool shouldEmitConstantsToTextSection(const Triple &TT) {
184 return TT.getOS() != Triple::AMDHSA;
185}
186
Matt Arsenault83002722016-05-12 02:45:18 +0000187int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
Marek Olsakfccabaf2016-01-13 11:45:36 +0000188 Attribute A = F.getFnAttribute(Name);
Matt Arsenault83002722016-05-12 02:45:18 +0000189 int Result = Default;
Tom Stellardac00eb52015-12-15 16:26:16 +0000190
191 if (A.isStringAttribute()) {
192 StringRef Str = A.getValueAsString();
Marek Olsakfccabaf2016-01-13 11:45:36 +0000193 if (Str.getAsInteger(0, Result)) {
Tom Stellardac00eb52015-12-15 16:26:16 +0000194 LLVMContext &Ctx = F.getContext();
Matt Arsenault83002722016-05-12 02:45:18 +0000195 Ctx.emitError("can't parse integer attribute " + Name);
Tom Stellardac00eb52015-12-15 16:26:16 +0000196 }
197 }
Matt Arsenault83002722016-05-12 02:45:18 +0000198
Marek Olsakfccabaf2016-01-13 11:45:36 +0000199 return Result;
200}
201
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000202std::pair<int, int> getIntegerPairAttribute(const Function &F,
203 StringRef Name,
204 std::pair<int, int> Default,
205 bool OnlyFirstRequired) {
206 Attribute A = F.getFnAttribute(Name);
207 if (!A.isStringAttribute())
208 return Default;
209
210 LLVMContext &Ctx = F.getContext();
211 std::pair<int, int> Ints = Default;
212 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
213 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
214 Ctx.emitError("can't parse first integer attribute " + Name);
215 return Default;
216 }
217 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
218 if (!OnlyFirstRequired || Strs.second.trim().size()) {
219 Ctx.emitError("can't parse second integer attribute " + Name);
220 return Default;
221 }
222 }
223
224 return Ints;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000225}
226
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000227unsigned getWaitcntBitMask(IsaVersion Version) {
228 unsigned Vmcnt = getBitMask(getVmcntBitShift(), getVmcntBitWidth());
229 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
230 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(), getLgkmcntBitWidth());
231 return Vmcnt | Expcnt | Lgkmcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000232}
233
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000234unsigned getVmcntBitMask(IsaVersion Version) {
235 return (1 << getVmcntBitWidth()) - 1;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000236}
237
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000238unsigned getExpcntBitMask(IsaVersion Version) {
239 return (1 << getExpcntBitWidth()) - 1;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000240}
241
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000242unsigned getLgkmcntBitMask(IsaVersion Version) {
243 return (1 << getLgkmcntBitWidth()) - 1;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000244}
245
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000246unsigned decodeVmcnt(IsaVersion Version, unsigned Waitcnt) {
247 return unpackBits(Waitcnt, getVmcntBitShift(), getVmcntBitWidth());
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000248}
249
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000250unsigned decodeExpcnt(IsaVersion Version, unsigned Waitcnt) {
251 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
252}
253
254unsigned decodeLgkmcnt(IsaVersion Version, unsigned Waitcnt) {
255 return unpackBits(Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
256}
257
258void decodeWaitcnt(IsaVersion Version, unsigned Waitcnt,
259 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
260 Vmcnt = decodeVmcnt(Version, Waitcnt);
261 Expcnt = decodeExpcnt(Version, Waitcnt);
262 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
263}
264
265unsigned encodeVmcnt(IsaVersion Version, unsigned Waitcnt, unsigned Vmcnt) {
266 return packBits(Vmcnt, Waitcnt, getVmcntBitShift(), getVmcntBitWidth());
267}
268
269unsigned encodeExpcnt(IsaVersion Version, unsigned Waitcnt, unsigned Expcnt) {
270 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
271}
272
273unsigned encodeLgkmcnt(IsaVersion Version, unsigned Waitcnt, unsigned Lgkmcnt) {
274 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(), getLgkmcntBitWidth());
275}
276
277unsigned encodeWaitcnt(IsaVersion Version,
278 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
Konstantin Zhuravlyov31dbb032017-01-06 17:23:21 +0000279 unsigned Waitcnt = getWaitcntBitMask(Version);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000280 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
281 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
282 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
283 return Waitcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000284}
285
Marek Olsakfccabaf2016-01-13 11:45:36 +0000286unsigned getInitialPSInputAddr(const Function &F) {
287 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
Tom Stellardac00eb52015-12-15 16:26:16 +0000288}
289
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000290bool isShader(CallingConv::ID cc) {
291 switch(cc) {
292 case CallingConv::AMDGPU_VS:
293 case CallingConv::AMDGPU_GS:
294 case CallingConv::AMDGPU_PS:
295 case CallingConv::AMDGPU_CS:
296 return true;
297 default:
298 return false;
299 }
300}
301
302bool isCompute(CallingConv::ID cc) {
303 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
304}
305
Tom Stellard2b65ed32015-12-21 18:44:27 +0000306bool isSI(const MCSubtargetInfo &STI) {
307 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
308}
309
310bool isCI(const MCSubtargetInfo &STI) {
311 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
312}
313
314bool isVI(const MCSubtargetInfo &STI) {
315 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
316}
317
318unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
319
320 switch(Reg) {
321 default: break;
322 case AMDGPU::FLAT_SCR:
323 assert(!isSI(STI));
324 return isCI(STI) ? AMDGPU::FLAT_SCR_ci : AMDGPU::FLAT_SCR_vi;
325
326 case AMDGPU::FLAT_SCR_LO:
327 assert(!isSI(STI));
328 return isCI(STI) ? AMDGPU::FLAT_SCR_LO_ci : AMDGPU::FLAT_SCR_LO_vi;
329
330 case AMDGPU::FLAT_SCR_HI:
331 assert(!isSI(STI));
332 return isCI(STI) ? AMDGPU::FLAT_SCR_HI_ci : AMDGPU::FLAT_SCR_HI_vi;
333 }
334 return Reg;
335}
336
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000337bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
338 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000339 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
340 OpType <= AMDGPU::OPERAND_SRC_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000341}
342
343bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
344 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000345 switch (OpType) {
346 case AMDGPU::OPERAND_REG_IMM_FP32:
347 case AMDGPU::OPERAND_REG_IMM_FP64:
348 case AMDGPU::OPERAND_REG_IMM_FP16:
349 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
350 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
351 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
352 return true;
353 default:
354 return false;
355 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000356}
357
358bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
359 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000360 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
361 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000362}
363
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000364// Avoid using MCRegisterClass::getSize, since that function will go away
365// (move from MC* level to Target* level). Return size in bits.
Tom Stellardb133fbb2016-10-27 23:05:31 +0000366unsigned getRegBitWidth(unsigned RCID) {
367 switch (RCID) {
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000368 case AMDGPU::SGPR_32RegClassID:
369 case AMDGPU::VGPR_32RegClassID:
370 case AMDGPU::VS_32RegClassID:
371 case AMDGPU::SReg_32RegClassID:
372 case AMDGPU::SReg_32_XM0RegClassID:
373 return 32;
374 case AMDGPU::SGPR_64RegClassID:
375 case AMDGPU::VS_64RegClassID:
376 case AMDGPU::SReg_64RegClassID:
377 case AMDGPU::VReg_64RegClassID:
378 return 64;
379 case AMDGPU::VReg_96RegClassID:
380 return 96;
381 case AMDGPU::SGPR_128RegClassID:
382 case AMDGPU::SReg_128RegClassID:
383 case AMDGPU::VReg_128RegClassID:
384 return 128;
385 case AMDGPU::SReg_256RegClassID:
386 case AMDGPU::VReg_256RegClassID:
387 return 256;
388 case AMDGPU::SReg_512RegClassID:
389 case AMDGPU::VReg_512RegClassID:
390 return 512;
391 default:
392 llvm_unreachable("Unexpected register class");
393 }
394}
395
Tom Stellardb133fbb2016-10-27 23:05:31 +0000396unsigned getRegBitWidth(const MCRegisterClass &RC) {
397 return getRegBitWidth(RC.getID());
398}
399
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000400unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
401 unsigned OpNo) {
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000402 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
403 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000404}
405
Matt Arsenault26faed32016-12-05 22:26:17 +0000406bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000407 if (Literal >= -16 && Literal <= 64)
408 return true;
409
Matt Arsenault26faed32016-12-05 22:26:17 +0000410 uint64_t Val = static_cast<uint64_t>(Literal);
411 return (Val == DoubleToBits(0.0)) ||
412 (Val == DoubleToBits(1.0)) ||
413 (Val == DoubleToBits(-1.0)) ||
414 (Val == DoubleToBits(0.5)) ||
415 (Val == DoubleToBits(-0.5)) ||
416 (Val == DoubleToBits(2.0)) ||
417 (Val == DoubleToBits(-2.0)) ||
418 (Val == DoubleToBits(4.0)) ||
419 (Val == DoubleToBits(-4.0)) ||
420 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000421}
422
Matt Arsenault26faed32016-12-05 22:26:17 +0000423bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000424 if (Literal >= -16 && Literal <= 64)
425 return true;
426
Matt Arsenault4bd72362016-12-10 00:39:12 +0000427 // The actual type of the operand does not seem to matter as long
428 // as the bits match one of the inline immediate values. For example:
429 //
430 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
431 // so it is a legal inline immediate.
432 //
433 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
434 // floating-point, so it is a legal inline immediate.
435
Matt Arsenault26faed32016-12-05 22:26:17 +0000436 uint32_t Val = static_cast<uint32_t>(Literal);
437 return (Val == FloatToBits(0.0f)) ||
438 (Val == FloatToBits(1.0f)) ||
439 (Val == FloatToBits(-1.0f)) ||
440 (Val == FloatToBits(0.5f)) ||
441 (Val == FloatToBits(-0.5f)) ||
442 (Val == FloatToBits(2.0f)) ||
443 (Val == FloatToBits(-2.0f)) ||
444 (Val == FloatToBits(4.0f)) ||
445 (Val == FloatToBits(-4.0f)) ||
446 (Val == 0x3e22f983 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000447}
448
Matt Arsenault4bd72362016-12-10 00:39:12 +0000449bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
Sam Kolton9dffada2017-01-17 15:26:02 +0000450 if (!HasInv2Pi)
451 return false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000452
453 if (Literal >= -16 && Literal <= 64)
454 return true;
455
456 uint16_t Val = static_cast<uint16_t>(Literal);
457 return Val == 0x3C00 || // 1.0
458 Val == 0xBC00 || // -1.0
459 Val == 0x3800 || // 0.5
460 Val == 0xB800 || // -0.5
461 Val == 0x4000 || // 2.0
462 Val == 0xC000 || // -2.0
463 Val == 0x4400 || // 4.0
464 Val == 0xC400 || // -4.0
465 Val == 0x3118; // 1/2pi
466}
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000467
Tom Stellard08efb7e2017-01-27 18:41:14 +0000468bool isUniformMMO(const MachineMemOperand *MMO) {
469 const Value *Ptr = MMO->getValue();
470 // UndefValue means this is a load of a kernel input. These are uniform.
471 // Sometimes LDS instructions have constant pointers.
472 // If Ptr is null, then that means this mem operand contains a
473 // PseudoSourceValue like GOT.
474 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
475 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
476 return true;
477
478 const Instruction *I = dyn_cast<Instruction>(Ptr);
479 return I && I->getMetadata("amdgpu.uniform");
480}
481
482int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
483 if (isSI(ST) || isCI(ST))
484 return ByteOffset >> 2;
485
486 return ByteOffset;
487}
488
489bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
490 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
491 return isSI(ST) || isCI(ST) ? isUInt<8>(EncodedOffset) :
492 isUInt<20>(EncodedOffset);
493}
494
Tom Stellard347ac792015-06-26 21:15:07 +0000495} // End namespace AMDGPU
496} // End namespace llvm