blob: a78e30257f2214492ee852aeaa6c84b2ddfb6156 [file] [log] [blame]
Eugene Zelenkod96089b2017-02-14 00:33:36 +00001//===- AMDGPUBaseInfo.cpp - AMDGPU Base encoding information --------------===//
Tom Stellard347ac792015-06-26 21:15:07 +00002//
Chandler Carruth2946cd72019-01-19 08:50:56 +00003// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
Tom Stellard347ac792015-06-26 21:15:07 +00006//
7//===----------------------------------------------------------------------===//
Eugene Zelenkod96089b2017-02-14 00:33:36 +00008
Eugene Zelenkod96089b2017-02-14 00:33:36 +00009#include "AMDGPUBaseInfo.h"
Alexander Timofeev2e5eece2018-03-05 15:12:21 +000010#include "AMDGPUTargetTransformInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000011#include "AMDGPU.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000012#include "SIDefines.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000013#include "llvm/ADT/StringRef.h"
14#include "llvm/ADT/Triple.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000015#include "llvm/BinaryFormat/ELF.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000016#include "llvm/CodeGen/MachineMemOperand.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000017#include "llvm/IR/Attributes.h"
Tom Stellard08efb7e2017-01-27 18:41:14 +000018#include "llvm/IR/Constants.h"
Tom Stellardac00eb52015-12-15 16:26:16 +000019#include "llvm/IR/Function.h"
Tom Stellarde3b5aea2015-12-02 17:00:42 +000020#include "llvm/IR/GlobalValue.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000021#include "llvm/IR/Instruction.h"
Tom Stellardca166212017-01-30 21:56:46 +000022#include "llvm/IR/LLVMContext.h"
Yaxun Liu1a14bfa2017-03-27 14:04:01 +000023#include "llvm/IR/Module.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000024#include "llvm/MC/MCContext.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000025#include "llvm/MC/MCInstrDesc.h"
Matt Arsenaultcad7fa82017-12-13 21:07:51 +000026#include "llvm/MC/MCInstrInfo.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000027#include "llvm/MC/MCRegisterInfo.h"
Tom Stellarde135ffd2015-09-25 21:41:28 +000028#include "llvm/MC/MCSectionELF.h"
Tom Stellard2b65ed32015-12-21 18:44:27 +000029#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard347ac792015-06-26 21:15:07 +000030#include "llvm/MC/SubtargetFeature.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000031#include "llvm/Support/Casting.h"
Eugene Zelenkod96089b2017-02-14 00:33:36 +000032#include "llvm/Support/ErrorHandling.h"
33#include "llvm/Support/MathExtras.h"
34#include <algorithm>
35#include <cassert>
36#include <cstdint>
37#include <cstring>
38#include <utility>
Tom Stellard347ac792015-06-26 21:15:07 +000039
Matt Arsenault678e1112017-04-10 17:58:06 +000040#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000041
Sam Koltona3ec5c12016-10-07 14:46:06 +000042#define GET_INSTRINFO_NAMED_OPS
Matt Arsenaultcad7fa82017-12-13 21:07:51 +000043#define GET_INSTRMAP_INFO
Sam Koltona3ec5c12016-10-07 14:46:06 +000044#include "AMDGPUGenInstrInfo.inc"
Matt Arsenaultcad7fa82017-12-13 21:07:51 +000045#undef GET_INSTRMAP_INFO
Sam Koltona3ec5c12016-10-07 14:46:06 +000046#undef GET_INSTRINFO_NAMED_OPS
Sam Koltona3ec5c12016-10-07 14:46:06 +000047
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000048namespace {
49
50/// \returns Bit mask for given bit \p Shift and bit \p Width.
51unsigned getBitMask(unsigned Shift, unsigned Width) {
52 return ((1 << Width) - 1) << Shift;
53}
54
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000055/// Packs \p Src into \p Dst for given bit \p Shift and bit \p Width.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000056///
57/// \returns Packed \p Dst.
58unsigned packBits(unsigned Src, unsigned Dst, unsigned Shift, unsigned Width) {
59 Dst &= ~(1 << Shift) & ~getBitMask(Shift, Width);
60 Dst |= (Src << Shift) & getBitMask(Shift, Width);
61 return Dst;
62}
63
Adrian Prantl5f8f34e42018-05-01 15:54:18 +000064/// Unpacks bits from \p Src for given bit \p Shift and bit \p Width.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000065///
66/// \returns Unpacked bits.
67unsigned unpackBits(unsigned Src, unsigned Shift, unsigned Width) {
68 return (Src & getBitMask(Shift, Width)) >> Shift;
69}
70
Matt Arsenaulte823d922017-02-18 18:29:53 +000071/// \returns Vmcnt bit shift (lower bits).
72unsigned getVmcntBitShiftLo() { return 0; }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000073
Matt Arsenaulte823d922017-02-18 18:29:53 +000074/// \returns Vmcnt bit width (lower bits).
75unsigned getVmcntBitWidthLo() { return 4; }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000076
77/// \returns Expcnt bit shift.
78unsigned getExpcntBitShift() { return 4; }
79
80/// \returns Expcnt bit width.
81unsigned getExpcntBitWidth() { return 3; }
82
83/// \returns Lgkmcnt bit shift.
84unsigned getLgkmcntBitShift() { return 8; }
85
86/// \returns Lgkmcnt bit width.
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +000087unsigned getLgkmcntBitWidth(unsigned VersionMajor) {
88 return (VersionMajor >= 10) ? 6 : 4;
89}
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000090
Matt Arsenaulte823d922017-02-18 18:29:53 +000091/// \returns Vmcnt bit shift (higher bits).
92unsigned getVmcntBitShiftHi() { return 14; }
93
94/// \returns Vmcnt bit width (higher bits).
95unsigned getVmcntBitWidthHi() { return 2; }
96
Eugene Zelenkod96089b2017-02-14 00:33:36 +000097} // end namespace anonymous
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +000098
Tom Stellard347ac792015-06-26 21:15:07 +000099namespace llvm {
Konstantin Zhuravlyov3d1cc882017-04-21 19:45:22 +0000100
Tom Stellard347ac792015-06-26 21:15:07 +0000101namespace AMDGPU {
102
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +0000103#define GET_MIMGBaseOpcodesTable_IMPL
104#define GET_MIMGDimInfoTable_IMPL
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +0000105#define GET_MIMGInfoTable_IMPL
Ryan Taylor894c8fd2018-08-01 12:12:01 +0000106#define GET_MIMGLZMappingTable_IMPL
Piotr Sobczak9b11e932019-06-10 15:58:51 +0000107#define GET_MIMGMIPMappingTable_IMPL
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +0000108#include "AMDGPUGenSearchableTables.inc"
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000109
Nicolai Haehnle7a9c03f2018-06-21 13:36:57 +0000110int getMIMGOpcode(unsigned BaseOpcode, unsigned MIMGEncoding,
111 unsigned VDataDwords, unsigned VAddrDwords) {
112 const MIMGInfo *Info = getMIMGOpcodeHelper(BaseOpcode, MIMGEncoding,
113 VDataDwords, VAddrDwords);
114 return Info ? Info->Opcode : -1;
115}
116
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000117const MIMGBaseOpcodeInfo *getMIMGBaseOpcode(unsigned Opc) {
118 const MIMGInfo *Info = getMIMGInfo(Opc);
119 return Info ? getMIMGBaseOpcodeInfo(Info->BaseOpcode) : nullptr;
120}
121
Nicolai Haehnle0ab200b2018-06-21 13:36:44 +0000122int getMaskedMIMGOp(unsigned Opc, unsigned NewChannels) {
123 const MIMGInfo *OrigInfo = getMIMGInfo(Opc);
124 const MIMGInfo *NewInfo =
125 getMIMGOpcodeHelper(OrigInfo->BaseOpcode, OrigInfo->MIMGEncoding,
126 NewChannels, OrigInfo->VAddrDwords);
127 return NewInfo ? NewInfo->Opcode : -1;
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000128}
129
Neil Henning76504a42018-12-12 16:15:21 +0000130struct MUBUFInfo {
131 uint16_t Opcode;
132 uint16_t BaseOpcode;
133 uint8_t dwords;
134 bool has_vaddr;
135 bool has_srsrc;
136 bool has_soffset;
137};
138
139#define GET_MUBUFInfoTable_DECL
140#define GET_MUBUFInfoTable_IMPL
141#include "AMDGPUGenSearchableTables.inc"
142
143int getMUBUFBaseOpcode(unsigned Opc) {
144 const MUBUFInfo *Info = getMUBUFInfoFromOpcode(Opc);
145 return Info ? Info->BaseOpcode : -1;
146}
147
148int getMUBUFOpcode(unsigned BaseOpc, unsigned Dwords) {
149 const MUBUFInfo *Info = getMUBUFInfoFromBaseOpcodeAndDwords(BaseOpc, Dwords);
150 return Info ? Info->Opcode : -1;
151}
152
153int getMUBUFDwords(unsigned Opc) {
154 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
155 return Info ? Info->dwords : 0;
156}
157
158bool getMUBUFHasVAddr(unsigned Opc) {
159 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
160 return Info ? Info->has_vaddr : false;
161}
162
163bool getMUBUFHasSrsrc(unsigned Opc) {
164 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
165 return Info ? Info->has_srsrc : false;
166}
167
168bool getMUBUFHasSoffset(unsigned Opc) {
169 const MUBUFInfo *Info = getMUBUFOpcodeHelper(Opc);
170 return Info ? Info->has_soffset : false;
171}
172
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000173// Wrapper for Tablegen'd function. enum Subtarget is not defined in any
174// header files, so we need to wrap it in a function that takes unsigned
175// instead.
176int getMCOpcode(uint16_t Opcode, unsigned Gen) {
177 return getMCOpcodeGen(Opcode, static_cast<Subtarget>(Gen));
178}
179
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000180namespace IsaInfo {
Tom Stellard347ac792015-06-26 21:15:07 +0000181
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000182void streamIsaVersion(const MCSubtargetInfo *STI, raw_ostream &Stream) {
183 auto TargetTriple = STI->getTargetTriple();
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000184 auto Version = getIsaVersion(STI->getCPU());
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000185
186 Stream << TargetTriple.getArchName() << '-'
187 << TargetTriple.getVendorName() << '-'
188 << TargetTriple.getOSName() << '-'
189 << TargetTriple.getEnvironmentName() << '-'
190 << "gfx"
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000191 << Version.Major
192 << Version.Minor
193 << Version.Stepping;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000194
195 if (hasXNACK(*STI))
196 Stream << "+xnack";
Konstantin Zhuravlyov108927b2018-11-05 22:44:19 +0000197 if (hasSRAMECC(*STI))
198 Stream << "+sram-ecc";
Scott Linder1e8c2c72018-06-21 19:38:56 +0000199
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000200 Stream.flush();
201}
202
Konstantin Zhuravlyov00f2cb12018-06-12 18:02:46 +0000203bool hasCodeObjectV3(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyovaf7b5d72018-11-15 23:14:23 +0000204 return STI->getTargetTriple().getOS() == Triple::AMDHSA &&
205 STI->getFeatureBits().test(FeatureCodeObjectV3);
Konstantin Zhuravlyoveda425e2017-10-14 15:59:07 +0000206}
207
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000208unsigned getWavefrontSize(const MCSubtargetInfo *STI) {
209 if (STI->getFeatureBits().test(FeatureWavefrontSize16))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000210 return 16;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000211 if (STI->getFeatureBits().test(FeatureWavefrontSize32))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000212 return 32;
213
214 return 64;
215}
216
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000217unsigned getLocalMemorySize(const MCSubtargetInfo *STI) {
218 if (STI->getFeatureBits().test(FeatureLocalMemorySize32768))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000219 return 32768;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000220 if (STI->getFeatureBits().test(FeatureLocalMemorySize65536))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000221 return 65536;
222
223 return 0;
224}
225
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000226unsigned getEUsPerCU(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000227 return 4;
228}
229
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000230unsigned getMaxWorkGroupsPerCU(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000231 unsigned FlatWorkGroupSize) {
Matt Arsenaultd7047272019-02-08 19:18:01 +0000232 assert(FlatWorkGroupSize != 0);
233 if (STI->getTargetTriple().getArch() != Triple::amdgcn)
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000234 return 8;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000235 unsigned N = getWavesPerWorkGroup(STI, FlatWorkGroupSize);
Stanislav Mekhanoshin19f98c62017-02-15 01:03:59 +0000236 if (N == 1)
237 return 40;
238 N = 40 / N;
239 return std::min(N, 16u);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000240}
241
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000242unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI) {
243 return getMaxWavesPerEU() * getEUsPerCU(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000244}
245
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000246unsigned getMaxWavesPerCU(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000247 unsigned FlatWorkGroupSize) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000248 return getWavesPerWorkGroup(STI, FlatWorkGroupSize);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000249}
250
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000251unsigned getMinWavesPerEU(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000252 return 1;
253}
254
Tom Stellardc5a154d2018-06-28 23:47:12 +0000255unsigned getMaxWavesPerEU() {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000256 // FIXME: Need to take scratch memory into account.
257 return 10;
258}
259
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000260unsigned getMaxWavesPerEU(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000261 unsigned FlatWorkGroupSize) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000262 return alignTo(getMaxWavesPerCU(STI, FlatWorkGroupSize),
263 getEUsPerCU(STI)) / getEUsPerCU(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000264}
265
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000266unsigned getMinFlatWorkGroupSize(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000267 return 1;
268}
269
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000270unsigned getMaxFlatWorkGroupSize(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000271 return 2048;
272}
273
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000274unsigned getWavesPerWorkGroup(const MCSubtargetInfo *STI,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000275 unsigned FlatWorkGroupSize) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000276 return alignTo(FlatWorkGroupSize, getWavefrontSize(STI)) /
277 getWavefrontSize(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000278}
279
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000280unsigned getSGPRAllocGranule(const MCSubtargetInfo *STI) {
281 IsaVersion Version = getIsaVersion(STI->getCPU());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000282 if (Version.Major >= 10)
283 return getAddressableNumSGPRs(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000284 if (Version.Major >= 8)
285 return 16;
286 return 8;
287}
288
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000289unsigned getSGPREncodingGranule(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000290 return 8;
291}
292
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000293unsigned getTotalNumSGPRs(const MCSubtargetInfo *STI) {
294 IsaVersion Version = getIsaVersion(STI->getCPU());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000295 if (Version.Major >= 8)
296 return 800;
297 return 512;
298}
299
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000300unsigned getAddressableNumSGPRs(const MCSubtargetInfo *STI) {
301 if (STI->getFeatureBits().test(FeatureSGPRInitBug))
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000302 return FIXED_NUM_SGPRS_FOR_INIT_BUG;
303
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000304 IsaVersion Version = getIsaVersion(STI->getCPU());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000305 if (Version.Major >= 10)
306 return 106;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000307 if (Version.Major >= 8)
308 return 102;
309 return 104;
310}
311
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000312unsigned getMinNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000313 assert(WavesPerEU != 0);
314
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000315 IsaVersion Version = getIsaVersion(STI->getCPU());
316 if (Version.Major >= 10)
317 return 0;
318
Tom Stellardc5a154d2018-06-28 23:47:12 +0000319 if (WavesPerEU >= getMaxWavesPerEU())
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000320 return 0;
Konstantin Zhuravlyovc72ece62018-05-16 20:47:48 +0000321
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000322 unsigned MinNumSGPRs = getTotalNumSGPRs(STI) / (WavesPerEU + 1);
323 if (STI->getFeatureBits().test(FeatureTrapHandler))
Konstantin Zhuravlyovc72ece62018-05-16 20:47:48 +0000324 MinNumSGPRs -= std::min(MinNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000325 MinNumSGPRs = alignDown(MinNumSGPRs, getSGPRAllocGranule(STI)) + 1;
326 return std::min(MinNumSGPRs, getAddressableNumSGPRs(STI));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000327}
328
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000329unsigned getMaxNumSGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000330 bool Addressable) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000331 assert(WavesPerEU != 0);
332
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000333 unsigned AddressableNumSGPRs = getAddressableNumSGPRs(STI);
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000334 IsaVersion Version = getIsaVersion(STI->getCPU());
335 if (Version.Major >= 10)
336 return Addressable ? AddressableNumSGPRs : 108;
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000337 if (Version.Major >= 8 && !Addressable)
338 AddressableNumSGPRs = 112;
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000339 unsigned MaxNumSGPRs = getTotalNumSGPRs(STI) / WavesPerEU;
340 if (STI->getFeatureBits().test(FeatureTrapHandler))
Konstantin Zhuravlyovc72ece62018-05-16 20:47:48 +0000341 MaxNumSGPRs -= std::min(MaxNumSGPRs, (unsigned)TRAP_NUM_SGPRS);
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000342 MaxNumSGPRs = alignDown(MaxNumSGPRs, getSGPRAllocGranule(STI));
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000343 return std::min(MaxNumSGPRs, AddressableNumSGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000344}
345
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000346unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
Scott Linder1e8c2c72018-06-21 19:38:56 +0000347 bool FlatScrUsed, bool XNACKUsed) {
348 unsigned ExtraSGPRs = 0;
349 if (VCCUsed)
350 ExtraSGPRs = 2;
351
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000352 IsaVersion Version = getIsaVersion(STI->getCPU());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000353 if (Version.Major >= 10)
354 return ExtraSGPRs;
355
Scott Linder1e8c2c72018-06-21 19:38:56 +0000356 if (Version.Major < 8) {
357 if (FlatScrUsed)
358 ExtraSGPRs = 4;
359 } else {
360 if (XNACKUsed)
361 ExtraSGPRs = 4;
362
363 if (FlatScrUsed)
364 ExtraSGPRs = 6;
365 }
366
367 return ExtraSGPRs;
368}
369
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000370unsigned getNumExtraSGPRs(const MCSubtargetInfo *STI, bool VCCUsed,
Scott Linder1e8c2c72018-06-21 19:38:56 +0000371 bool FlatScrUsed) {
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000372 return getNumExtraSGPRs(STI, VCCUsed, FlatScrUsed,
373 STI->getFeatureBits().test(AMDGPU::FeatureXNACK));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000374}
375
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000376unsigned getNumSGPRBlocks(const MCSubtargetInfo *STI, unsigned NumSGPRs) {
377 NumSGPRs = alignTo(std::max(1u, NumSGPRs), getSGPREncodingGranule(STI));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000378 // SGPRBlocks is actual number of SGPR blocks minus 1.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000379 return NumSGPRs / getSGPREncodingGranule(STI) - 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000380}
381
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000382unsigned getVGPRAllocGranule(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000383 return 4;
384}
385
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000386unsigned getVGPREncodingGranule(const MCSubtargetInfo *STI) {
387 return getVGPRAllocGranule(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000388}
389
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000390unsigned getTotalNumVGPRs(const MCSubtargetInfo *STI) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000391 return 256;
392}
393
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000394unsigned getAddressableNumVGPRs(const MCSubtargetInfo *STI) {
395 return getTotalNumVGPRs(STI);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000396}
397
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000398unsigned getMinNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000399 assert(WavesPerEU != 0);
400
Tom Stellardc5a154d2018-06-28 23:47:12 +0000401 if (WavesPerEU >= getMaxWavesPerEU())
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000402 return 0;
403 unsigned MinNumVGPRs =
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000404 alignDown(getTotalNumVGPRs(STI) / (WavesPerEU + 1),
405 getVGPRAllocGranule(STI)) + 1;
406 return std::min(MinNumVGPRs, getAddressableNumVGPRs(STI));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000407}
408
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000409unsigned getMaxNumVGPRs(const MCSubtargetInfo *STI, unsigned WavesPerEU) {
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000410 assert(WavesPerEU != 0);
411
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000412 unsigned MaxNumVGPRs = alignDown(getTotalNumVGPRs(STI) / WavesPerEU,
413 getVGPRAllocGranule(STI));
414 unsigned AddressableNumVGPRs = getAddressableNumVGPRs(STI);
Konstantin Zhuravlyovfd871372017-02-09 21:33:23 +0000415 return std::min(MaxNumVGPRs, AddressableNumVGPRs);
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000416}
417
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000418unsigned getNumVGPRBlocks(const MCSubtargetInfo *STI, unsigned NumVGPRs) {
419 NumVGPRs = alignTo(std::max(1u, NumVGPRs), getVGPREncodingGranule(STI));
Scott Linder1e8c2c72018-06-21 19:38:56 +0000420 // VGPRBlocks is actual number of VGPR blocks minus 1.
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000421 return NumVGPRs / getVGPREncodingGranule(STI) - 1;
Scott Linder1e8c2c72018-06-21 19:38:56 +0000422}
423
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000424} // end namespace IsaInfo
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000425
Tom Stellardff7416b2015-06-26 21:58:31 +0000426void initDefaultAMDKernelCodeT(amd_kernel_code_t &Header,
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000427 const MCSubtargetInfo *STI) {
428 IsaVersion Version = getIsaVersion(STI->getCPU());
Tom Stellardff7416b2015-06-26 21:58:31 +0000429
430 memset(&Header, 0, sizeof(Header));
431
432 Header.amd_kernel_code_version_major = 1;
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +0000433 Header.amd_kernel_code_version_minor = 2;
Tom Stellardff7416b2015-06-26 21:58:31 +0000434 Header.amd_machine_kind = 1; // AMD_MACHINE_KIND_AMDGPU
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000435 Header.amd_machine_version_major = Version.Major;
436 Header.amd_machine_version_minor = Version.Minor;
437 Header.amd_machine_version_stepping = Version.Stepping;
Tom Stellardff7416b2015-06-26 21:58:31 +0000438 Header.kernel_code_entry_byte_offset = sizeof(Header);
439 // wavefront_size is specified as a power of 2: 2^6 = 64 threads.
440 Header.wavefront_size = 6;
Matt Arsenault5d910192017-01-25 20:21:57 +0000441
442 // If the code object does not support indirect functions, then the value must
443 // be 0xffffffff.
444 Header.call_convention = -1;
445
Tom Stellardff7416b2015-06-26 21:58:31 +0000446 // These alignment values are specified in powers of two, so alignment =
447 // 2^n. The minimum alignment is 2^4 = 16.
448 Header.kernarg_segment_alignment = 4;
449 Header.group_segment_alignment = 4;
450 Header.private_segment_alignment = 4;
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000451
452 if (Version.Major >= 10) {
453 Header.compute_pgm_resource_registers |=
454 S_00B848_WGP_MODE(STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1) |
455 S_00B848_MEM_ORDERED(1);
456 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000457}
458
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000459amdhsa::kernel_descriptor_t getDefaultAmdhsaKernelDescriptor(
460 const MCSubtargetInfo *STI) {
461 IsaVersion Version = getIsaVersion(STI->getCPU());
462
Scott Linder1e8c2c72018-06-21 19:38:56 +0000463 amdhsa::kernel_descriptor_t KD;
464 memset(&KD, 0, sizeof(KD));
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000465
Scott Linder1e8c2c72018-06-21 19:38:56 +0000466 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
467 amdhsa::COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64,
468 amdhsa::FLOAT_DENORM_MODE_FLUSH_NONE);
469 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
470 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, 1);
471 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
472 amdhsa::COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE, 1);
473 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2,
474 amdhsa::COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, 1);
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000475 if (Version.Major >= 10) {
476 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
477 amdhsa::COMPUTE_PGM_RSRC1_WGP_MODE,
478 STI->getFeatureBits().test(FeatureCuMode) ? 0 : 1);
479 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
480 amdhsa::COMPUTE_PGM_RSRC1_MEM_ORDERED, 1);
481 }
Scott Linder1e8c2c72018-06-21 19:38:56 +0000482 return KD;
483}
484
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000485bool isGroupSegment(const GlobalValue *GV) {
486 return GV->getType()->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS;
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000487}
488
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000489bool isGlobalSegment(const GlobalValue *GV) {
490 return GV->getType()->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
Tom Stellard00f2f912015-12-02 19:47:57 +0000491}
492
Konstantin Zhuravlyov435151a2017-11-01 19:12:38 +0000493bool isReadOnlySegment(const GlobalValue *GV) {
Matt Arsenault923712b2018-02-09 16:57:57 +0000494 return GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS ||
495 GV->getType()->getAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT;
Tom Stellard00f2f912015-12-02 19:47:57 +0000496}
497
Konstantin Zhuravlyov08326b62016-10-20 18:12:38 +0000498bool shouldEmitConstantsToTextSection(const Triple &TT) {
499 return TT.getOS() != Triple::AMDHSA;
500}
501
Matt Arsenault83002722016-05-12 02:45:18 +0000502int getIntegerAttribute(const Function &F, StringRef Name, int Default) {
Marek Olsakfccabaf2016-01-13 11:45:36 +0000503 Attribute A = F.getFnAttribute(Name);
Matt Arsenault83002722016-05-12 02:45:18 +0000504 int Result = Default;
Tom Stellardac00eb52015-12-15 16:26:16 +0000505
506 if (A.isStringAttribute()) {
507 StringRef Str = A.getValueAsString();
Marek Olsakfccabaf2016-01-13 11:45:36 +0000508 if (Str.getAsInteger(0, Result)) {
Tom Stellardac00eb52015-12-15 16:26:16 +0000509 LLVMContext &Ctx = F.getContext();
Matt Arsenault83002722016-05-12 02:45:18 +0000510 Ctx.emitError("can't parse integer attribute " + Name);
Tom Stellardac00eb52015-12-15 16:26:16 +0000511 }
512 }
Matt Arsenault83002722016-05-12 02:45:18 +0000513
Marek Olsakfccabaf2016-01-13 11:45:36 +0000514 return Result;
515}
516
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000517std::pair<int, int> getIntegerPairAttribute(const Function &F,
518 StringRef Name,
519 std::pair<int, int> Default,
520 bool OnlyFirstRequired) {
521 Attribute A = F.getFnAttribute(Name);
522 if (!A.isStringAttribute())
523 return Default;
524
525 LLVMContext &Ctx = F.getContext();
526 std::pair<int, int> Ints = Default;
527 std::pair<StringRef, StringRef> Strs = A.getValueAsString().split(',');
528 if (Strs.first.trim().getAsInteger(0, Ints.first)) {
529 Ctx.emitError("can't parse first integer attribute " + Name);
530 return Default;
531 }
532 if (Strs.second.trim().getAsInteger(0, Ints.second)) {
Eugene Zelenkod96089b2017-02-14 00:33:36 +0000533 if (!OnlyFirstRequired || !Strs.second.trim().empty()) {
Konstantin Zhuravlyov1d650262016-09-06 20:22:28 +0000534 Ctx.emitError("can't parse second integer attribute " + Name);
535 return Default;
536 }
537 }
538
539 return Ints;
Tom Stellard79a1fd72016-04-14 16:27:07 +0000540}
541
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000542unsigned getVmcntBitMask(const IsaVersion &Version) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000543 unsigned VmcntLo = (1 << getVmcntBitWidthLo()) - 1;
544 if (Version.Major < 9)
545 return VmcntLo;
546
547 unsigned VmcntHi = ((1 << getVmcntBitWidthHi()) - 1) << getVmcntBitWidthLo();
548 return VmcntLo | VmcntHi;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000549}
550
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000551unsigned getExpcntBitMask(const IsaVersion &Version) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000552 return (1 << getExpcntBitWidth()) - 1;
553}
554
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000555unsigned getLgkmcntBitMask(const IsaVersion &Version) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000556 return (1 << getLgkmcntBitWidth(Version.Major)) - 1;
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000557}
558
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000559unsigned getWaitcntBitMask(const IsaVersion &Version) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000560 unsigned VmcntLo = getBitMask(getVmcntBitShiftLo(), getVmcntBitWidthLo());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000561 unsigned Expcnt = getBitMask(getExpcntBitShift(), getExpcntBitWidth());
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000562 unsigned Lgkmcnt = getBitMask(getLgkmcntBitShift(),
563 getLgkmcntBitWidth(Version.Major));
Matt Arsenaulte823d922017-02-18 18:29:53 +0000564 unsigned Waitcnt = VmcntLo | Expcnt | Lgkmcnt;
565 if (Version.Major < 9)
566 return Waitcnt;
567
568 unsigned VmcntHi = getBitMask(getVmcntBitShiftHi(), getVmcntBitWidthHi());
569 return Waitcnt | VmcntHi;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000570}
571
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000572unsigned decodeVmcnt(const IsaVersion &Version, unsigned Waitcnt) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000573 unsigned VmcntLo =
574 unpackBits(Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
575 if (Version.Major < 9)
576 return VmcntLo;
577
578 unsigned VmcntHi =
579 unpackBits(Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
580 VmcntHi <<= getVmcntBitWidthLo();
581 return VmcntLo | VmcntHi;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000582}
583
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000584unsigned decodeExpcnt(const IsaVersion &Version, unsigned Waitcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000585 return unpackBits(Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
586}
587
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000588unsigned decodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000589 return unpackBits(Waitcnt, getLgkmcntBitShift(),
590 getLgkmcntBitWidth(Version.Major));
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000591}
592
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000593void decodeWaitcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000594 unsigned &Vmcnt, unsigned &Expcnt, unsigned &Lgkmcnt) {
595 Vmcnt = decodeVmcnt(Version, Waitcnt);
596 Expcnt = decodeExpcnt(Version, Waitcnt);
597 Lgkmcnt = decodeLgkmcnt(Version, Waitcnt);
598}
599
Nicolai Haehnle1a94cbb2018-11-29 11:06:06 +0000600Waitcnt decodeWaitcnt(const IsaVersion &Version, unsigned Encoded) {
601 Waitcnt Decoded;
602 Decoded.VmCnt = decodeVmcnt(Version, Encoded);
603 Decoded.ExpCnt = decodeExpcnt(Version, Encoded);
604 Decoded.LgkmCnt = decodeLgkmcnt(Version, Encoded);
605 return Decoded;
606}
607
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000608unsigned encodeVmcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000609 unsigned Vmcnt) {
Matt Arsenaulte823d922017-02-18 18:29:53 +0000610 Waitcnt =
611 packBits(Vmcnt, Waitcnt, getVmcntBitShiftLo(), getVmcntBitWidthLo());
612 if (Version.Major < 9)
613 return Waitcnt;
614
615 Vmcnt >>= getVmcntBitWidthLo();
616 return packBits(Vmcnt, Waitcnt, getVmcntBitShiftHi(), getVmcntBitWidthHi());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000617}
618
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000619unsigned encodeExpcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000620 unsigned Expcnt) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000621 return packBits(Expcnt, Waitcnt, getExpcntBitShift(), getExpcntBitWidth());
622}
623
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000624unsigned encodeLgkmcnt(const IsaVersion &Version, unsigned Waitcnt,
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000625 unsigned Lgkmcnt) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000626 return packBits(Lgkmcnt, Waitcnt, getLgkmcntBitShift(),
627 getLgkmcntBitWidth(Version.Major));
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000628}
629
Konstantin Zhuravlyov71e43ee2018-09-12 18:50:47 +0000630unsigned encodeWaitcnt(const IsaVersion &Version,
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000631 unsigned Vmcnt, unsigned Expcnt, unsigned Lgkmcnt) {
Konstantin Zhuravlyov31dbb032017-01-06 17:23:21 +0000632 unsigned Waitcnt = getWaitcntBitMask(Version);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +0000633 Waitcnt = encodeVmcnt(Version, Waitcnt, Vmcnt);
634 Waitcnt = encodeExpcnt(Version, Waitcnt, Expcnt);
635 Waitcnt = encodeLgkmcnt(Version, Waitcnt, Lgkmcnt);
636 return Waitcnt;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +0000637}
638
Nicolai Haehnle1a94cbb2018-11-29 11:06:06 +0000639unsigned encodeWaitcnt(const IsaVersion &Version, const Waitcnt &Decoded) {
640 return encodeWaitcnt(Version, Decoded.VmCnt, Decoded.ExpCnt, Decoded.LgkmCnt);
641}
642
Marek Olsakfccabaf2016-01-13 11:45:36 +0000643unsigned getInitialPSInputAddr(const Function &F) {
644 return getIntegerAttribute(F, "InitialPSInputAddr", 0);
Tom Stellardac00eb52015-12-15 16:26:16 +0000645}
646
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000647bool isShader(CallingConv::ID cc) {
648 switch(cc) {
649 case CallingConv::AMDGPU_VS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000650 case CallingConv::AMDGPU_LS:
Marek Olsaka302a7362017-05-02 15:41:10 +0000651 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000652 case CallingConv::AMDGPU_ES:
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000653 case CallingConv::AMDGPU_GS:
654 case CallingConv::AMDGPU_PS:
655 case CallingConv::AMDGPU_CS:
656 return true;
657 default:
658 return false;
659 }
660}
661
662bool isCompute(CallingConv::ID cc) {
663 return !isShader(cc) || cc == CallingConv::AMDGPU_CS;
664}
665
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000666bool isEntryFunctionCC(CallingConv::ID CC) {
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000667 switch (CC) {
668 case CallingConv::AMDGPU_KERNEL:
669 case CallingConv::SPIR_KERNEL:
670 case CallingConv::AMDGPU_VS:
671 case CallingConv::AMDGPU_GS:
672 case CallingConv::AMDGPU_PS:
673 case CallingConv::AMDGPU_CS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000674 case CallingConv::AMDGPU_ES:
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000675 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000676 case CallingConv::AMDGPU_LS:
Matt Arsenault2b1f9aa2017-05-17 21:56:25 +0000677 return true;
678 default:
679 return false;
680 }
Matt Arsenaulte622dc32017-04-11 22:29:24 +0000681}
682
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000683bool hasXNACK(const MCSubtargetInfo &STI) {
684 return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
685}
686
Konstantin Zhuravlyov108927b2018-11-05 22:44:19 +0000687bool hasSRAMECC(const MCSubtargetInfo &STI) {
688 return STI.getFeatureBits()[AMDGPU::FeatureSRAMECC];
689}
690
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000691bool hasMIMG_R128(const MCSubtargetInfo &STI) {
692 return STI.getFeatureBits()[AMDGPU::FeatureMIMG_R128];
693}
694
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000695bool hasPackedD16(const MCSubtargetInfo &STI) {
696 return !STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem];
697}
698
Tom Stellard2b65ed32015-12-21 18:44:27 +0000699bool isSI(const MCSubtargetInfo &STI) {
700 return STI.getFeatureBits()[AMDGPU::FeatureSouthernIslands];
701}
702
703bool isCI(const MCSubtargetInfo &STI) {
704 return STI.getFeatureBits()[AMDGPU::FeatureSeaIslands];
705}
706
707bool isVI(const MCSubtargetInfo &STI) {
708 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
709}
710
Sam Koltonf7659d712017-05-23 10:08:55 +0000711bool isGFX9(const MCSubtargetInfo &STI) {
712 return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
713}
714
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000715bool isGFX10(const MCSubtargetInfo &STI) {
716 return STI.getFeatureBits()[AMDGPU::FeatureGFX10];
717}
718
Matt Arsenault8728c5f2017-08-07 14:58:04 +0000719bool isGCN3Encoding(const MCSubtargetInfo &STI) {
720 return STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding];
721}
722
Sam Koltonf7659d712017-05-23 10:08:55 +0000723bool isSGPR(unsigned Reg, const MCRegisterInfo* TRI) {
724 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
725 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
726 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
727 Reg == AMDGPU::SCC;
728}
729
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +0000730bool isRegIntersect(unsigned Reg0, unsigned Reg1, const MCRegisterInfo* TRI) {
Dmitry Preobrazhensky00deef82017-07-18 11:14:02 +0000731 for (MCRegAliasIterator R(Reg0, TRI, true); R.isValid(); ++R) {
732 if (*R == Reg1) return true;
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +0000733 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +0000734 return false;
735}
736
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000737#define MAP_REG2REG \
738 using namespace AMDGPU; \
739 switch(Reg) { \
740 default: return Reg; \
741 CASE_CI_VI(FLAT_SCR) \
742 CASE_CI_VI(FLAT_SCR_LO) \
743 CASE_CI_VI(FLAT_SCR_HI) \
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000744 CASE_VI_GFX9_GFX10(TTMP0) \
745 CASE_VI_GFX9_GFX10(TTMP1) \
746 CASE_VI_GFX9_GFX10(TTMP2) \
747 CASE_VI_GFX9_GFX10(TTMP3) \
748 CASE_VI_GFX9_GFX10(TTMP4) \
749 CASE_VI_GFX9_GFX10(TTMP5) \
750 CASE_VI_GFX9_GFX10(TTMP6) \
751 CASE_VI_GFX9_GFX10(TTMP7) \
752 CASE_VI_GFX9_GFX10(TTMP8) \
753 CASE_VI_GFX9_GFX10(TTMP9) \
754 CASE_VI_GFX9_GFX10(TTMP10) \
755 CASE_VI_GFX9_GFX10(TTMP11) \
756 CASE_VI_GFX9_GFX10(TTMP12) \
757 CASE_VI_GFX9_GFX10(TTMP13) \
758 CASE_VI_GFX9_GFX10(TTMP14) \
759 CASE_VI_GFX9_GFX10(TTMP15) \
760 CASE_VI_GFX9_GFX10(TTMP0_TTMP1) \
761 CASE_VI_GFX9_GFX10(TTMP2_TTMP3) \
762 CASE_VI_GFX9_GFX10(TTMP4_TTMP5) \
763 CASE_VI_GFX9_GFX10(TTMP6_TTMP7) \
764 CASE_VI_GFX9_GFX10(TTMP8_TTMP9) \
765 CASE_VI_GFX9_GFX10(TTMP10_TTMP11) \
766 CASE_VI_GFX9_GFX10(TTMP12_TTMP13) \
767 CASE_VI_GFX9_GFX10(TTMP14_TTMP15) \
768 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3) \
769 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7) \
770 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11) \
771 CASE_VI_GFX9_GFX10(TTMP12_TTMP13_TTMP14_TTMP15) \
772 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7) \
773 CASE_VI_GFX9_GFX10(TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11) \
774 CASE_VI_GFX9_GFX10(TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
775 CASE_VI_GFX9_GFX10(TTMP0_TTMP1_TTMP2_TTMP3_TTMP4_TTMP5_TTMP6_TTMP7_TTMP8_TTMP9_TTMP10_TTMP11_TTMP12_TTMP13_TTMP14_TTMP15) \
Tom Stellard2b65ed32015-12-21 18:44:27 +0000776 }
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000777
778#define CASE_CI_VI(node) \
779 assert(!isSI(STI)); \
780 case node: return isCI(STI) ? node##_ci : node##_vi;
781
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000782#define CASE_VI_GFX9_GFX10(node) \
783 case node: return (isGFX9(STI) || isGFX10(STI)) ? node##_gfx9_gfx10 : node##_vi;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000784
785unsigned getMCReg(unsigned Reg, const MCSubtargetInfo &STI) {
Tom Stellardc5a154d2018-06-28 23:47:12 +0000786 if (STI.getTargetTriple().getArch() == Triple::r600)
787 return Reg;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000788 MAP_REG2REG
Tom Stellard2b65ed32015-12-21 18:44:27 +0000789}
790
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000791#undef CASE_CI_VI
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000792#undef CASE_VI_GFX9_GFX10
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000793
794#define CASE_CI_VI(node) case node##_ci: case node##_vi: return node;
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000795#define CASE_VI_GFX9_GFX10(node) case node##_vi: case node##_gfx9_gfx10: return node;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000796
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000797unsigned mc2PseudoReg(unsigned Reg) {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000798 MAP_REG2REG
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000799}
800
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000801#undef CASE_CI_VI
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +0000802#undef CASE_VI_GFX9_GFX10
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000803#undef MAP_REG2REG
804
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000805bool isSISrcOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000806 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000807 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000808 return OpType >= AMDGPU::OPERAND_SRC_FIRST &&
809 OpType <= AMDGPU::OPERAND_SRC_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000810}
811
812bool isSISrcFPOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000813 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000814 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000815 switch (OpType) {
816 case AMDGPU::OPERAND_REG_IMM_FP32:
817 case AMDGPU::OPERAND_REG_IMM_FP64:
818 case AMDGPU::OPERAND_REG_IMM_FP16:
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000819 case AMDGPU::OPERAND_REG_IMM_V2FP16:
820 case AMDGPU::OPERAND_REG_IMM_V2INT16:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000821 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
822 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
823 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000824 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000825 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000826 return true;
827 default:
828 return false;
829 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000830}
831
832bool isSISrcInlinableOperand(const MCInstrDesc &Desc, unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000833 assert(OpNo < Desc.NumOperands);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000834 unsigned OpType = Desc.OpInfo[OpNo].OperandType;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000835 return OpType >= AMDGPU::OPERAND_REG_INLINE_C_FIRST &&
836 OpType <= AMDGPU::OPERAND_REG_INLINE_C_LAST;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000837}
838
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000839// Avoid using MCRegisterClass::getSize, since that function will go away
840// (move from MC* level to Target* level). Return size in bits.
Tom Stellardb133fbb2016-10-27 23:05:31 +0000841unsigned getRegBitWidth(unsigned RCID) {
842 switch (RCID) {
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000843 case AMDGPU::SGPR_32RegClassID:
844 case AMDGPU::VGPR_32RegClassID:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000845 case AMDGPU::VRegOrLds_32RegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000846 case AMDGPU::VS_32RegClassID:
847 case AMDGPU::SReg_32RegClassID:
848 case AMDGPU::SReg_32_XM0RegClassID:
Dmitry Preobrazhensky6023d592019-03-04 12:48:32 +0000849 case AMDGPU::SRegOrLds_32RegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000850 return 32;
851 case AMDGPU::SGPR_64RegClassID:
852 case AMDGPU::VS_64RegClassID:
853 case AMDGPU::SReg_64RegClassID:
854 case AMDGPU::VReg_64RegClassID:
Ron Liebermancac749a2018-11-16 01:13:34 +0000855 case AMDGPU::SReg_64_XEXECRegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000856 return 64;
Tim Renouf361b5b22019-03-21 12:01:21 +0000857 case AMDGPU::SGPR_96RegClassID:
858 case AMDGPU::SReg_96RegClassID:
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000859 case AMDGPU::VReg_96RegClassID:
860 return 96;
861 case AMDGPU::SGPR_128RegClassID:
862 case AMDGPU::SReg_128RegClassID:
863 case AMDGPU::VReg_128RegClassID:
864 return 128;
Tim Renouf033f99a2019-03-22 10:11:21 +0000865 case AMDGPU::SGPR_160RegClassID:
866 case AMDGPU::SReg_160RegClassID:
867 case AMDGPU::VReg_160RegClassID:
868 return 160;
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000869 case AMDGPU::SReg_256RegClassID:
870 case AMDGPU::VReg_256RegClassID:
871 return 256;
872 case AMDGPU::SReg_512RegClassID:
873 case AMDGPU::VReg_512RegClassID:
874 return 512;
875 default:
876 llvm_unreachable("Unexpected register class");
877 }
878}
879
Tom Stellardb133fbb2016-10-27 23:05:31 +0000880unsigned getRegBitWidth(const MCRegisterClass &RC) {
881 return getRegBitWidth(RC.getID());
882}
883
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000884unsigned getRegOperandSize(const MCRegisterInfo *MRI, const MCInstrDesc &Desc,
885 unsigned OpNo) {
Artem Tamazov43b61562017-02-03 12:47:30 +0000886 assert(OpNo < Desc.NumOperands);
Krzysztof Parzyszekc8715502016-10-19 17:40:36 +0000887 unsigned RCID = Desc.OpInfo[OpNo].RegClass;
888 return getRegBitWidth(MRI->getRegClass(RCID)) / 8;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000889}
890
Matt Arsenault26faed32016-12-05 22:26:17 +0000891bool isInlinableLiteral64(int64_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000892 if (Literal >= -16 && Literal <= 64)
893 return true;
894
Matt Arsenault26faed32016-12-05 22:26:17 +0000895 uint64_t Val = static_cast<uint64_t>(Literal);
896 return (Val == DoubleToBits(0.0)) ||
897 (Val == DoubleToBits(1.0)) ||
898 (Val == DoubleToBits(-1.0)) ||
899 (Val == DoubleToBits(0.5)) ||
900 (Val == DoubleToBits(-0.5)) ||
901 (Val == DoubleToBits(2.0)) ||
902 (Val == DoubleToBits(-2.0)) ||
903 (Val == DoubleToBits(4.0)) ||
904 (Val == DoubleToBits(-4.0)) ||
905 (Val == 0x3fc45f306dc9c882 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000906}
907
Matt Arsenault26faed32016-12-05 22:26:17 +0000908bool isInlinableLiteral32(int32_t Literal, bool HasInv2Pi) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000909 if (Literal >= -16 && Literal <= 64)
910 return true;
911
Matt Arsenault4bd72362016-12-10 00:39:12 +0000912 // The actual type of the operand does not seem to matter as long
913 // as the bits match one of the inline immediate values. For example:
914 //
915 // -nan has the hexadecimal encoding of 0xfffffffe which is -2 in decimal,
916 // so it is a legal inline immediate.
917 //
918 // 1065353216 has the hexadecimal encoding 0x3f800000 which is 1.0f in
919 // floating-point, so it is a legal inline immediate.
920
Matt Arsenault26faed32016-12-05 22:26:17 +0000921 uint32_t Val = static_cast<uint32_t>(Literal);
922 return (Val == FloatToBits(0.0f)) ||
923 (Val == FloatToBits(1.0f)) ||
924 (Val == FloatToBits(-1.0f)) ||
925 (Val == FloatToBits(0.5f)) ||
926 (Val == FloatToBits(-0.5f)) ||
927 (Val == FloatToBits(2.0f)) ||
928 (Val == FloatToBits(-2.0f)) ||
929 (Val == FloatToBits(4.0f)) ||
930 (Val == FloatToBits(-4.0f)) ||
931 (Val == 0x3e22f983 && HasInv2Pi);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000932}
933
Matt Arsenault4bd72362016-12-10 00:39:12 +0000934bool isInlinableLiteral16(int16_t Literal, bool HasInv2Pi) {
Sam Kolton9dffada2017-01-17 15:26:02 +0000935 if (!HasInv2Pi)
936 return false;
Matt Arsenault4bd72362016-12-10 00:39:12 +0000937
938 if (Literal >= -16 && Literal <= 64)
939 return true;
940
941 uint16_t Val = static_cast<uint16_t>(Literal);
942 return Val == 0x3C00 || // 1.0
943 Val == 0xBC00 || // -1.0
944 Val == 0x3800 || // 0.5
945 Val == 0xB800 || // -0.5
946 Val == 0x4000 || // 2.0
947 Val == 0xC000 || // -2.0
948 Val == 0x4400 || // 4.0
949 Val == 0xC400 || // -4.0
950 Val == 0x3118; // 1/2pi
951}
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000952
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000953bool isInlinableLiteralV216(int32_t Literal, bool HasInv2Pi) {
954 assert(HasInv2Pi);
955
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000956 if (isInt<16>(Literal) || isUInt<16>(Literal)) {
957 int16_t Trunc = static_cast<int16_t>(Literal);
958 return AMDGPU::isInlinableLiteral16(Trunc, HasInv2Pi);
959 }
960 if (!(Literal & 0xffff))
961 return AMDGPU::isInlinableLiteral16(Literal >> 16, HasInv2Pi);
962
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000963 int16_t Lo16 = static_cast<int16_t>(Literal);
964 int16_t Hi16 = static_cast<int16_t>(Literal >> 16);
965 return Lo16 == Hi16 && isInlinableLiteral16(Lo16, HasInv2Pi);
966}
967
Matt Arsenault894e53d2017-07-26 20:39:42 +0000968bool isArgPassedInSGPR(const Argument *A) {
969 const Function *F = A->getParent();
970
971 // Arguments to compute shaders are never a source of divergence.
972 CallingConv::ID CC = F->getCallingConv();
973 switch (CC) {
974 case CallingConv::AMDGPU_KERNEL:
975 case CallingConv::SPIR_KERNEL:
976 return true;
977 case CallingConv::AMDGPU_VS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000978 case CallingConv::AMDGPU_LS:
Matt Arsenault894e53d2017-07-26 20:39:42 +0000979 case CallingConv::AMDGPU_HS:
Tim Renoufef1ae8f2017-09-29 09:51:22 +0000980 case CallingConv::AMDGPU_ES:
Matt Arsenault894e53d2017-07-26 20:39:42 +0000981 case CallingConv::AMDGPU_GS:
982 case CallingConv::AMDGPU_PS:
983 case CallingConv::AMDGPU_CS:
984 // For non-compute shaders, SGPR inputs are marked with either inreg or byval.
985 // Everything else is in VGPRs.
986 return F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::InReg) ||
987 F->getAttributes().hasParamAttribute(A->getArgNo(), Attribute::ByVal);
988 default:
989 // TODO: Should calls support inreg for SGPR inputs?
990 return false;
991 }
992}
993
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000994static bool hasSMEMByteOffset(const MCSubtargetInfo &ST) {
995 return isGCN3Encoding(ST) || isGFX10(ST);
996}
997
Tom Stellard08efb7e2017-01-27 18:41:14 +0000998int64_t getSMRDEncodedOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +0000999 if (hasSMEMByteOffset(ST))
Matt Arsenault8728c5f2017-08-07 14:58:04 +00001000 return ByteOffset;
1001 return ByteOffset >> 2;
Tom Stellard08efb7e2017-01-27 18:41:14 +00001002}
1003
1004bool isLegalSMRDImmOffset(const MCSubtargetInfo &ST, int64_t ByteOffset) {
1005 int64_t EncodedOffset = getSMRDEncodedOffset(ST, ByteOffset);
Stanislav Mekhanoshin956b0be2019-04-25 18:53:41 +00001006 return (hasSMEMByteOffset(ST)) ?
Matt Arsenault8728c5f2017-08-07 14:58:04 +00001007 isUInt<20>(EncodedOffset) : isUInt<8>(EncodedOffset);
Tom Stellard08efb7e2017-01-27 18:41:14 +00001008}
Matt Arsenaultcad7fa82017-12-13 21:07:51 +00001009
Tim Renouf4f703f52018-08-21 11:07:10 +00001010// Given Imm, split it into the values to put into the SOffset and ImmOffset
1011// fields in an MUBUF instruction. Return false if it is not possible (due to a
1012// hardware bug needing a workaround).
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00001013//
1014// The required alignment ensures that individual address components remain
1015// aligned if they are aligned to begin with. It also ensures that additional
1016// offsets within the given alignment can be added to the resulting ImmOffset.
Tim Renouf4f703f52018-08-21 11:07:10 +00001017bool splitMUBUFOffset(uint32_t Imm, uint32_t &SOffset, uint32_t &ImmOffset,
Nicolai Haehnlea7b00052018-11-30 22:55:38 +00001018 const GCNSubtarget *Subtarget, uint32_t Align) {
Tim Renouf4f703f52018-08-21 11:07:10 +00001019 const uint32_t MaxImm = alignDown(4095, Align);
1020 uint32_t Overflow = 0;
1021
1022 if (Imm > MaxImm) {
1023 if (Imm <= MaxImm + 64) {
1024 // Use an SOffset inline constant for 4..64
1025 Overflow = Imm - MaxImm;
1026 Imm = MaxImm;
1027 } else {
1028 // Try to keep the same value in SOffset for adjacent loads, so that
1029 // the corresponding register contents can be re-used.
1030 //
1031 // Load values with all low-bits (except for alignment bits) set into
1032 // SOffset, so that a larger range of values can be covered using
1033 // s_movk_i32.
1034 //
1035 // Atomic operations fail to work correctly when individual address
1036 // components are unaligned, even if their sum is aligned.
1037 uint32_t High = (Imm + Align) & ~4095;
1038 uint32_t Low = (Imm + Align) & 4095;
1039 Imm = Low;
1040 Overflow = High - Align;
1041 }
1042 }
1043
1044 // There is a hardware bug in SI and CI which prevents address clamping in
1045 // MUBUF instructions from working correctly with SOffsets. The immediate
1046 // offset is unaffected.
1047 if (Overflow > 0 &&
1048 Subtarget->getGeneration() <= AMDGPUSubtarget::SEA_ISLANDS)
1049 return false;
1050
1051 ImmOffset = Imm;
1052 SOffset = Overflow;
1053 return true;
1054}
1055
Matt Arsenault055e4dc2019-03-29 19:14:54 +00001056SIModeRegisterDefaults::SIModeRegisterDefaults(const Function &F) {
1057 *this = getDefaultForCallingConv(F.getCallingConv());
1058
1059 StringRef IEEEAttr = F.getFnAttribute("amdgpu-ieee").getValueAsString();
1060 if (!IEEEAttr.empty())
1061 IEEE = IEEEAttr == "true";
1062
1063 StringRef DX10ClampAttr
1064 = F.getFnAttribute("amdgpu-dx10-clamp").getValueAsString();
1065 if (!DX10ClampAttr.empty())
1066 DX10Clamp = DX10ClampAttr == "true";
1067}
1068
Nicolai Haehnle4254d452018-04-01 17:09:14 +00001069namespace {
1070
1071struct SourceOfDivergence {
1072 unsigned Intr;
1073};
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +00001074const SourceOfDivergence *lookupSourceOfDivergence(unsigned Intr);
Nicolai Haehnle4254d452018-04-01 17:09:14 +00001075
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +00001076#define GET_SourcesOfDivergence_IMPL
Nicolai Haehnle4254d452018-04-01 17:09:14 +00001077#include "AMDGPUGenSearchableTables.inc"
1078
1079} // end anonymous namespace
1080
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00001081bool isIntrinsicSourceOfDivergence(unsigned IntrID) {
Nicolai Haehnlee741d7e2018-06-21 13:36:33 +00001082 return lookupSourceOfDivergence(IntrID);
Alexander Timofeev2e5eece2018-03-05 15:12:21 +00001083}
Stanislav Mekhanoshincee607e2019-04-24 17:03:15 +00001084
Yaxun Liu1a14bfa2017-03-27 14:04:01 +00001085} // namespace AMDGPU
1086} // namespace llvm