blob: 41fa7df1f8ee333c5a66b43226d48daa1bc7836d [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11///
12/// The AMDGPUAsmPrinter is used to print both assembly string and also binary
13/// code. When passed an MCAsmStreamer it prints assembly and when passed
14/// an MCObjectStreamer it outputs binary code.
15//
16//===----------------------------------------------------------------------===//
17//
18
19#include "AMDGPUAsmPrinter.h"
Tom Stellard347ac792015-06-26 21:15:07 +000020#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "InstPrinter/AMDGPUInstPrinter.h"
Tom Stellard347ac792015-06-26 21:15:07 +000022#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "AMDGPU.h"
24#include "AMDKernelCodeT.h"
25#include "AMDGPUSubtarget.h"
26#include "R600Defines.h"
27#include "R600MachineFunctionInfo.h"
28#include "R600RegisterInfo.h"
29#include "SIDefines.h"
30#include "SIMachineFunctionInfo.h"
Matt Arsenaulta9720c62016-06-20 17:51:32 +000031#include "SIInstrInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "SIRegisterInfo.h"
33#include "llvm/CodeGen/MachineFrameInfo.h"
Matt Arsenaultff982412016-06-20 18:13:04 +000034#include "llvm/IR/DiagnosticInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/MC/MCContext.h"
36#include "llvm/MC/MCSectionELF.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/Support/ELF.h"
39#include "llvm/Support/MathExtras.h"
40#include "llvm/Support/TargetRegistry.h"
41#include "llvm/Target/TargetLoweringObjectFile.h"
Yaxun Liua711cc72016-07-16 05:09:21 +000042#include "AMDGPURuntimeMetadata.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000043
Yaxun Liua711cc72016-07-16 05:09:21 +000044using namespace ::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000045using namespace llvm;
46
47// TODO: This should get the default rounding mode from the kernel. We just set
48// the default here, but this could change if the OpenCL rounding mode pragmas
49// are used.
50//
51// The denormal mode here should match what is reported by the OpenCL runtime
52// for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
53// can also be override to flush with the -cl-denorms-are-zero compiler flag.
54//
55// AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
56// precision, and leaves single precision to flush all and does not report
57// CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
58// CL_FP_DENORM for both.
59//
60// FIXME: It seems some instructions do not support single precision denormals
61// regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
62// and sin_f32, cos_f32 on most parts).
63
64// We want to use these instructions, and using fp32 denormals also causes
65// instructions to run at the double precision rate for the device so it's
66// probably best to just report no single precision denormals.
67static uint32_t getFPMode(const MachineFunction &F) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000068 const SISubtarget& ST = F.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +000069 // TODO: Is there any real use for the flush in only / flush out only modes?
70
71 uint32_t FP32Denormals =
72 ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
73
74 uint32_t FP64Denormals =
75 ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
76
77 return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
78 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
79 FP_DENORM_MODE_SP(FP32Denormals) |
80 FP_DENORM_MODE_DP(FP64Denormals);
81}
82
83static AsmPrinter *
84createAMDGPUAsmPrinterPass(TargetMachine &tm,
85 std::unique_ptr<MCStreamer> &&Streamer) {
86 return new AMDGPUAsmPrinter(tm, std::move(Streamer));
87}
88
89extern "C" void LLVMInitializeAMDGPUAsmPrinter() {
90 TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
91 TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
92}
93
94AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
95 std::unique_ptr<MCStreamer> Streamer)
96 : AsmPrinter(TM, std::move(Streamer)) {}
97
Matt Arsenaultf9245b72016-07-22 17:01:25 +000098const char *AMDGPUAsmPrinter::getPassName() const {
99 return "AMDGPU Assembly Printer";
100}
101
Tom Stellardf4218372016-01-12 17:18:17 +0000102void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) {
103 if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
104 return;
105
106 // Need to construct an MCSubtargetInfo here in case we have no functions
107 // in the module.
108 std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
109 TM.getTargetTriple().str(), TM.getTargetCPU(),
110 TM.getTargetFeatureString()));
111
112 AMDGPUTargetStreamer *TS =
113 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
114
Tom Stellard418beb72016-07-13 14:23:33 +0000115 TS->EmitDirectiveHSACodeObjectVersion(2, 1);
Tom Stellardfcfaea42016-05-05 17:03:33 +0000116
Tom Stellardf4218372016-01-12 17:18:17 +0000117 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(STI->getFeatureBits());
118 TS->EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor, ISA.Stepping,
119 "AMD", "AMDGPU");
Yaxun Liua711cc72016-07-16 05:09:21 +0000120 emitStartOfRuntimeMetadata(M);
Tom Stellardf4218372016-01-12 17:18:17 +0000121}
122
Tom Stellardf151a452015-06-26 21:14:58 +0000123void AMDGPUAsmPrinter::EmitFunctionBodyStart() {
124 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
125 SIProgramInfo KernelInfo;
126 if (STM.isAmdHsaOS()) {
127 getSIProgramInfo(KernelInfo, *MF);
128 EmitAmdKernelCodeT(*MF, KernelInfo);
129 }
130}
131
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000132void AMDGPUAsmPrinter::EmitFunctionEntryLabel() {
133 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
134 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
135 if (MFI->isKernel() && STM.isAmdHsaOS()) {
136 AMDGPUTargetStreamer *TS =
137 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
138 TS->EmitAMDGPUSymbolType(CurrentFnSym->getName(),
139 ELF::STT_AMDGPU_HSA_KERNEL);
140 }
141
142 AsmPrinter::EmitFunctionEntryLabel();
143}
144
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000145void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
146
Tom Stellard00f2f912015-12-02 19:47:57 +0000147 // Group segment variables aren't emitted in HSA.
148 if (AMDGPU::isGroupSegment(GV))
149 return;
150
Tom Stellardfcfaea42016-05-05 17:03:33 +0000151 AsmPrinter::EmitGlobalVariable(GV);
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000152}
153
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
155
156 // The starting address of all shader programs must be 256 bytes aligned.
157 MF.setAlignment(8);
158
159 SetupMachineFunction(MF);
160
161 MCContext &Context = getObjFileLowering().getContext();
162 MCSectionELF *ConfigSection =
163 Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
164 OutStreamer->SwitchSection(ConfigSection);
165
166 const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
167 SIProgramInfo KernelInfo;
Tom Stellardf151a452015-06-26 21:14:58 +0000168 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault297ae312015-08-15 00:12:39 +0000169 getSIProgramInfo(KernelInfo, MF);
Tom Stellardf151a452015-06-26 21:14:58 +0000170 if (!STM.isAmdHsaOS()) {
Tom Stellardf151a452015-06-26 21:14:58 +0000171 EmitProgramInfoSI(MF, KernelInfo);
172 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000173 } else {
174 EmitProgramInfoR600(MF);
175 }
176
177 DisasmLines.clear();
178 HexLines.clear();
179 DisasmLineMaxLen = 0;
180
181 EmitFunctionBody();
182
183 if (isVerbose()) {
184 MCSectionELF *CommentSection =
185 Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
186 OutStreamer->SwitchSection(CommentSection);
187
188 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
189 OutStreamer->emitRawComment(" Kernel info:", false);
190 OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen),
191 false);
192 OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR),
193 false);
194 OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR),
195 false);
196 OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode),
197 false);
198 OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode),
199 false);
200 OutStreamer->emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize),
201 false);
Matt Arsenaultfd8ab092016-04-14 22:11:51 +0000202 OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) +
203 " bytes/workgroup (compile time only)", false);
Matt Arsenaultd41c0db2015-11-05 05:27:07 +0000204
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000205 OutStreamer->emitRawComment(" ReservedVGPRFirst: " + Twine(KernelInfo.ReservedVGPRFirst),
206 false);
207 OutStreamer->emitRawComment(" ReservedVGPRCount: " + Twine(KernelInfo.ReservedVGPRCount),
208 false);
209
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000210 if (MF.getSubtarget<SISubtarget>().debuggerEmitPrologue()) {
211 OutStreamer->emitRawComment(" DebuggerWavefrontPrivateSegmentOffsetSGPR: s" +
212 Twine(KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR), false);
213 OutStreamer->emitRawComment(" DebuggerPrivateSegmentBufferSGPR: s" +
214 Twine(KernelInfo.DebuggerPrivateSegmentBufferSGPR), false);
215 }
216
Matt Arsenaultd41c0db2015-11-05 05:27:07 +0000217 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:USER_SGPR: " +
Matt Arsenault8246d4a2015-11-11 00:27:46 +0000218 Twine(G_00B84C_USER_SGPR(KernelInfo.ComputePGMRSrc2)),
Matt Arsenaultd41c0db2015-11-05 05:27:07 +0000219 false);
Matt Arsenault8246d4a2015-11-11 00:27:46 +0000220 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_X_EN: " +
221 Twine(G_00B84C_TGID_X_EN(KernelInfo.ComputePGMRSrc2)),
222 false);
223 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Y_EN: " +
224 Twine(G_00B84C_TGID_Y_EN(KernelInfo.ComputePGMRSrc2)),
225 false);
226 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Z_EN: " +
227 Twine(G_00B84C_TGID_Z_EN(KernelInfo.ComputePGMRSrc2)),
228 false);
229 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " +
230 Twine(G_00B84C_TIDIG_COMP_CNT(KernelInfo.ComputePGMRSrc2)),
231 false);
232
Tom Stellard45bb48e2015-06-13 03:28:10 +0000233 } else {
234 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
235 OutStreamer->emitRawComment(
Matt Arsenaultf9245b72016-07-22 17:01:25 +0000236 Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->CFStackSize)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237 }
238 }
239
240 if (STM.dumpCode()) {
241
242 OutStreamer->SwitchSection(
243 Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0));
244
245 for (size_t i = 0; i < DisasmLines.size(); ++i) {
246 std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
247 Comment += " ; " + HexLines[i] + "\n";
248
249 OutStreamer->EmitBytes(StringRef(DisasmLines[i]));
250 OutStreamer->EmitBytes(StringRef(Comment));
251 }
252 }
253
Yaxun Liua711cc72016-07-16 05:09:21 +0000254 emitRuntimeMetadata(*MF.getFunction());
255
Tom Stellard45bb48e2015-06-13 03:28:10 +0000256 return false;
257}
258
259void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
260 unsigned MaxGPR = 0;
261 bool killPixel = false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000262 const R600Subtarget &STM = MF.getSubtarget<R600Subtarget>();
263 const R600RegisterInfo *RI = STM.getRegisterInfo();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000264 const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
265
266 for (const MachineBasicBlock &MBB : MF) {
267 for (const MachineInstr &MI : MBB) {
268 if (MI.getOpcode() == AMDGPU::KILLGT)
269 killPixel = true;
270 unsigned numOperands = MI.getNumOperands();
271 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
272 const MachineOperand &MO = MI.getOperand(op_idx);
273 if (!MO.isReg())
274 continue;
275 unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff;
276
277 // Register with value > 127 aren't GPR
278 if (HWReg > 127)
279 continue;
280 MaxGPR = std::max(MaxGPR, HWReg);
281 }
282 }
283 }
284
285 unsigned RsrcReg;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000286 if (STM.getGeneration() >= R600Subtarget::EVERGREEN) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000287 // Evergreen / Northern Islands
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000288 switch (MF.getFunction()->getCallingConv()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000289 default: // Fall through
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000290 case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
291 case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
292 case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
293 case CallingConv::AMDGPU_VS: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000294 }
295 } else {
296 // R600 / R700
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000297 switch (MF.getFunction()->getCallingConv()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000298 default: // Fall through
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000299 case CallingConv::AMDGPU_GS: // Fall through
300 case CallingConv::AMDGPU_CS: // Fall through
301 case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
302 case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000303 }
304 }
305
306 OutStreamer->EmitIntValue(RsrcReg, 4);
307 OutStreamer->EmitIntValue(S_NUM_GPRS(MaxGPR + 1) |
Matt Arsenaultf9245b72016-07-22 17:01:25 +0000308 S_STACK_SIZE(MFI->CFStackSize), 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000309 OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
310 OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
311
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000312 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000313 OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
Matt Arsenault52ef4012016-07-26 16:45:58 +0000314 OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000315 }
316}
317
318void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
319 const MachineFunction &MF) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000320 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
322 uint64_t CodeSize = 0;
323 unsigned MaxSGPR = 0;
324 unsigned MaxVGPR = 0;
325 bool VCCUsed = false;
326 bool FlatUsed = false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000327 const SIRegisterInfo *RI = STM.getRegisterInfo();
328 const SIInstrInfo *TII = STM.getInstrInfo();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000329
330 for (const MachineBasicBlock &MBB : MF) {
331 for (const MachineInstr &MI : MBB) {
332 // TODO: CodeSize should account for multiple functions.
Matt Arsenaultc5746862015-08-12 09:04:44 +0000333
334 // TODO: Should we count size of debug info?
335 if (MI.isDebugValue())
336 continue;
337
Matt Arsenaulta9720c62016-06-20 17:51:32 +0000338 CodeSize += TII->getInstSizeInBytes(MI);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000339
340 unsigned numOperands = MI.getNumOperands();
341 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
342 const MachineOperand &MO = MI.getOperand(op_idx);
343 unsigned width = 0;
344 bool isSGPR = false;
345
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000346 if (!MO.isReg())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000347 continue;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000348
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000349 unsigned reg = MO.getReg();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000350 switch (reg) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000351 case AMDGPU::EXEC:
Nicolai Haehnle74839372016-04-19 21:58:17 +0000352 case AMDGPU::EXEC_LO:
353 case AMDGPU::EXEC_HI:
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000354 case AMDGPU::SCC:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000355 case AMDGPU::M0:
356 continue;
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000357
358 case AMDGPU::VCC:
359 case AMDGPU::VCC_LO:
360 case AMDGPU::VCC_HI:
361 VCCUsed = true;
362 continue;
363
364 case AMDGPU::FLAT_SCR:
365 case AMDGPU::FLAT_SCR_LO:
366 case AMDGPU::FLAT_SCR_HI:
367 FlatUsed = true;
368 continue;
369
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000370 case AMDGPU::TBA:
371 case AMDGPU::TBA_LO:
372 case AMDGPU::TBA_HI:
373 case AMDGPU::TMA:
374 case AMDGPU::TMA_LO:
375 case AMDGPU::TMA_HI:
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000376 llvm_unreachable("trap handler registers should not be used");
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000377
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000378 default:
379 break;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380 }
381
382 if (AMDGPU::SReg_32RegClass.contains(reg)) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000383 assert(!AMDGPU::TTMP_32RegClass.contains(reg) &&
384 "trap handler registers should not be used");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385 isSGPR = true;
386 width = 1;
387 } else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
388 isSGPR = false;
389 width = 1;
390 } else if (AMDGPU::SReg_64RegClass.contains(reg)) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000391 assert(!AMDGPU::TTMP_64RegClass.contains(reg) &&
392 "trap handler registers should not be used");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000393 isSGPR = true;
394 width = 2;
395 } else if (AMDGPU::VReg_64RegClass.contains(reg)) {
396 isSGPR = false;
397 width = 2;
398 } else if (AMDGPU::VReg_96RegClass.contains(reg)) {
399 isSGPR = false;
400 width = 3;
401 } else if (AMDGPU::SReg_128RegClass.contains(reg)) {
402 isSGPR = true;
403 width = 4;
404 } else if (AMDGPU::VReg_128RegClass.contains(reg)) {
405 isSGPR = false;
406 width = 4;
407 } else if (AMDGPU::SReg_256RegClass.contains(reg)) {
408 isSGPR = true;
409 width = 8;
410 } else if (AMDGPU::VReg_256RegClass.contains(reg)) {
411 isSGPR = false;
412 width = 8;
413 } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
414 isSGPR = true;
415 width = 16;
416 } else if (AMDGPU::VReg_512RegClass.contains(reg)) {
417 isSGPR = false;
418 width = 16;
419 } else {
420 llvm_unreachable("Unknown register class");
421 }
422 unsigned hwReg = RI->getEncodingValue(reg) & 0xff;
423 unsigned maxUsed = hwReg + width - 1;
424 if (isSGPR) {
425 MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR;
426 } else {
427 MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR;
428 }
429 }
430 }
431 }
432
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000433 unsigned ExtraSGPRs = 0;
434
435 if (VCCUsed)
436 ExtraSGPRs = 2;
437
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000438 if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) {
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000439 if (FlatUsed)
440 ExtraSGPRs = 4;
441 } else {
442 if (STM.isXNACKEnabled())
443 ExtraSGPRs = 4;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000444
Nicolai Haehnle5b504972016-01-04 23:35:53 +0000445 if (FlatUsed)
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000446 ExtraSGPRs = 6;
Tom Stellardcaaa3aa2015-12-17 17:05:09 +0000447 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000448
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000449 MaxSGPR += ExtraSGPRs;
450
Konstantin Zhuravlyov29ddd2b2016-05-24 18:37:18 +0000451 // Record first reserved register and reserved register count fields, and
452 // update max register counts if "amdgpu-debugger-reserve-regs" attribute was
453 // specified.
454 if (STM.debuggerReserveRegs()) {
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000455 ProgInfo.ReservedVGPRFirst = MaxVGPR + 1;
Konstantin Zhuravlyov29ddd2b2016-05-24 18:37:18 +0000456 ProgInfo.ReservedVGPRCount = MFI->getDebuggerReservedVGPRCount();
457 MaxVGPR += MFI->getDebuggerReservedVGPRCount();
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000458 }
459
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000460 // Update DebuggerWavefrontPrivateSegmentOffsetSGPR and
461 // DebuggerPrivateSegmentBufferSGPR fields if "amdgpu-debugger-emit-prologue"
462 // attribute was specified.
463 if (STM.debuggerEmitPrologue()) {
464 ProgInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR =
465 RI->getHWRegIndex(MFI->getScratchWaveOffsetReg());
466 ProgInfo.DebuggerPrivateSegmentBufferSGPR =
467 RI->getHWRegIndex(MFI->getScratchRSrcReg());
468 }
469
Tom Stellard45bb48e2015-06-13 03:28:10 +0000470 // We found the maximum register index. They start at 0, so add one to get the
471 // number of registers.
472 ProgInfo.NumVGPR = MaxVGPR + 1;
473 ProgInfo.NumSGPR = MaxSGPR + 1;
474
475 if (STM.hasSGPRInitBug()) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000476 if (ProgInfo.NumSGPR > SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) {
Matt Arsenault417c93e2015-06-17 20:55:25 +0000477 LLVMContext &Ctx = MF.getFunction()->getContext();
Matt Arsenaultff982412016-06-20 18:13:04 +0000478 DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
479 "SGPRs with SGPR init bug",
480 ProgInfo.NumSGPR, DS_Error);
481 Ctx.diagnose(Diag);
Matt Arsenault417c93e2015-06-17 20:55:25 +0000482 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000483
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000484 ProgInfo.NumSGPR = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000485 }
486
Matt Arsenault41003af2015-11-30 21:16:07 +0000487 if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) {
488 LLVMContext &Ctx = MF.getFunction()->getContext();
Matt Arsenaultff982412016-06-20 18:13:04 +0000489 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
490 MFI->NumUserSGPRs, DS_Error);
491 Ctx.diagnose(Diag);
Matt Arsenault41003af2015-11-30 21:16:07 +0000492 }
493
Matt Arsenault52ef4012016-07-26 16:45:58 +0000494 if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) {
Matt Arsenault1c4d0ef2016-04-28 19:37:35 +0000495 LLVMContext &Ctx = MF.getFunction()->getContext();
Matt Arsenaultff982412016-06-20 18:13:04 +0000496 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory",
Matt Arsenault52ef4012016-07-26 16:45:58 +0000497 MFI->getLDSSize(), DS_Error);
Matt Arsenaultff982412016-06-20 18:13:04 +0000498 Ctx.diagnose(Diag);
Matt Arsenault1c4d0ef2016-04-28 19:37:35 +0000499 }
500
Tom Stellard45bb48e2015-06-13 03:28:10 +0000501 ProgInfo.VGPRBlocks = (ProgInfo.NumVGPR - 1) / 4;
502 ProgInfo.SGPRBlocks = (ProgInfo.NumSGPR - 1) / 8;
503 // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
504 // register.
505 ProgInfo.FloatMode = getFPMode(MF);
506
Tom Stellard45bb48e2015-06-13 03:28:10 +0000507 ProgInfo.IEEEMode = 0;
508
Matt Arsenault7293f982016-01-28 20:53:35 +0000509 // Make clamp modifier on NaN input returns 0.
510 ProgInfo.DX10Clamp = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000511
Matthias Braun941a7052016-07-28 18:40:00 +0000512 const MachineFrameInfo &FrameInfo = MF.getFrameInfo();
513 ProgInfo.ScratchSize = FrameInfo.getStackSize();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000514
515 ProgInfo.FlatUsed = FlatUsed;
516 ProgInfo.VCCUsed = VCCUsed;
517 ProgInfo.CodeLen = CodeSize;
518
519 unsigned LDSAlignShift;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000520 if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000521 // LDS is allocated in 64 dword blocks.
522 LDSAlignShift = 8;
523 } else {
524 // LDS is allocated in 128 dword blocks.
525 LDSAlignShift = 9;
526 }
527
528 unsigned LDSSpillSize = MFI->LDSWaveSpillSize *
529 MFI->getMaximumWorkGroupSize(MF);
530
Matt Arsenault52ef4012016-07-26 16:45:58 +0000531 ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000532 ProgInfo.LDSBlocks =
Aaron Ballmanef0fe1e2016-03-30 21:30:00 +0000533 alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000534
535 // Scratch is allocated in 256 dword blocks.
536 unsigned ScratchAlignShift = 10;
537 // We need to program the hardware with the amount of scratch memory that
538 // is used by the entire wave. ProgInfo.ScratchSize is the amount of
539 // scratch memory used per thread.
540 ProgInfo.ScratchBlocks =
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000541 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
Aaron Ballmanef0fe1e2016-03-30 21:30:00 +0000542 1ULL << ScratchAlignShift) >>
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000543 ScratchAlignShift;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000544
545 ProgInfo.ComputePGMRSrc1 =
546 S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
547 S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
548 S_00B848_PRIORITY(ProgInfo.Priority) |
549 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
550 S_00B848_PRIV(ProgInfo.Priv) |
551 S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000552 S_00B848_DEBUG_MODE(ProgInfo.DebugMode) |
Tom Stellard45bb48e2015-06-13 03:28:10 +0000553 S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
554
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000555 // 0 = X, 1 = XY, 2 = XYZ
556 unsigned TIDIGCompCnt = 0;
557 if (MFI->hasWorkItemIDZ())
558 TIDIGCompCnt = 2;
559 else if (MFI->hasWorkItemIDY())
560 TIDIGCompCnt = 1;
561
Tom Stellard45bb48e2015-06-13 03:28:10 +0000562 ProgInfo.ComputePGMRSrc2 =
563 S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000564 S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) |
565 S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) |
566 S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) |
567 S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) |
568 S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
569 S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
570 S_00B84C_EXCP_EN_MSB(0) |
571 S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) |
572 S_00B84C_EXCP_EN(0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000573}
574
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000575static unsigned getRsrcReg(CallingConv::ID CallConv) {
576 switch (CallConv) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577 default: // Fall through
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000578 case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
579 case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
580 case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
581 case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000582 }
583}
584
585void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
586 const SIProgramInfo &KernelInfo) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000587 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000588 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000589 unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000590
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000591 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000592 OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
593
594 OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc1, 4);
595
596 OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
597 OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc2, 4);
598
599 OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
600 OutStreamer->EmitIntValue(S_00B860_WAVESIZE(KernelInfo.ScratchBlocks), 4);
601
602 // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
603 // 0" comment but I don't see a corresponding field in the register spec.
604 } else {
605 OutStreamer->EmitIntValue(RsrcReg, 4);
606 OutStreamer->EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) |
607 S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4);
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000608 if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000609 OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
610 OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4);
611 }
612 }
613
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000614 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000615 OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
616 OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4);
617 OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
Marek Olsakfccabaf2016-01-13 11:45:36 +0000618 OutStreamer->EmitIntValue(MFI->PSInputEna, 4);
619 OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
620 OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000621 }
Marek Olsak0532c192016-07-13 17:35:15 +0000622
623 OutStreamer->EmitIntValue(R_SPILLED_SGPRS, 4);
624 OutStreamer->EmitIntValue(MFI->getNumSpilledSGPRs(), 4);
625 OutStreamer->EmitIntValue(R_SPILLED_VGPRS, 4);
626 OutStreamer->EmitIntValue(MFI->getNumSpilledVGPRs(), 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000627}
628
Matt Arsenault24ee0782016-02-12 02:40:47 +0000629// This is supposed to be log2(Size)
630static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
631 switch (Size) {
632 case 4:
633 return AMD_ELEMENT_4_BYTES;
634 case 8:
635 return AMD_ELEMENT_8_BYTES;
636 case 16:
637 return AMD_ELEMENT_16_BYTES;
638 default:
639 llvm_unreachable("invalid private_element_size");
640 }
641}
642
Tom Stellard45bb48e2015-06-13 03:28:10 +0000643void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
Tom Stellardff7416b2015-06-26 21:58:31 +0000644 const SIProgramInfo &KernelInfo) const {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000645 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000646 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000647 amd_kernel_code_t header;
648
Tom Stellardff7416b2015-06-26 21:58:31 +0000649 AMDGPU::initDefaultAMDKernelCodeT(header, STM.getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000650
651 header.compute_pgm_resource_registers =
652 KernelInfo.ComputePGMRSrc1 |
653 (KernelInfo.ComputePGMRSrc2 << 32);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000654 header.code_properties = AMD_CODE_PROPERTY_IS_PTR64;
655
Matt Arsenault24ee0782016-02-12 02:40:47 +0000656
657 AMD_HSA_BITS_SET(header.code_properties,
658 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
659 getElementByteSizeValue(STM.getMaxPrivateElementSize()));
660
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000661 if (MFI->hasPrivateSegmentBuffer()) {
662 header.code_properties |=
663 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
664 }
665
666 if (MFI->hasDispatchPtr())
667 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
668
669 if (MFI->hasQueuePtr())
670 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
671
672 if (MFI->hasKernargSegmentPtr())
673 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
674
675 if (MFI->hasDispatchID())
676 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
677
678 if (MFI->hasFlatScratchInit())
679 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
680
681 // TODO: Private segment size
682
683 if (MFI->hasGridWorkgroupCountX()) {
684 header.code_properties |=
685 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X;
686 }
687
688 if (MFI->hasGridWorkgroupCountY()) {
689 header.code_properties |=
690 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y;
691 }
692
693 if (MFI->hasGridWorkgroupCountZ()) {
694 header.code_properties |=
695 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z;
696 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697
Tom Stellard48f29f22015-11-26 00:43:29 +0000698 if (MFI->hasDispatchPtr())
699 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
700
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000701 if (STM.debuggerSupported())
702 header.code_properties |= AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED;
703
Nicolai Haehnle5b504972016-01-04 23:35:53 +0000704 if (STM.isXNACKEnabled())
705 header.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
706
Matt Arsenault52ef4012016-07-26 16:45:58 +0000707 // FIXME: Should use getKernArgSize
708 header.kernarg_segment_byte_size = MFI->getABIArgOffset();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000709 header.wavefront_sgpr_count = KernelInfo.NumSGPR;
710 header.workitem_vgpr_count = KernelInfo.NumVGPR;
Tom Stellarda4953072015-12-15 22:55:30 +0000711 header.workitem_private_segment_byte_size = KernelInfo.ScratchSize;
Tom Stellard7750f4e2015-12-15 23:15:25 +0000712 header.workgroup_group_segment_byte_size = KernelInfo.LDSSize;
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000713 header.reserved_vgpr_first = KernelInfo.ReservedVGPRFirst;
714 header.reserved_vgpr_count = KernelInfo.ReservedVGPRCount;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000715
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000716 if (STM.debuggerEmitPrologue()) {
717 header.debug_wavefront_private_segment_offset_sgpr =
718 KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR;
719 header.debug_private_segment_buffer_sgpr =
720 KernelInfo.DebuggerPrivateSegmentBufferSGPR;
721 }
722
Tom Stellardff7416b2015-06-26 21:58:31 +0000723 AMDGPUTargetStreamer *TS =
724 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
Tom Stellardfcfaea42016-05-05 17:03:33 +0000725
726 OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
Tom Stellardff7416b2015-06-26 21:58:31 +0000727 TS->EmitAMDKernelCodeT(header);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000728}
729
730bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
731 unsigned AsmVariant,
732 const char *ExtraCode, raw_ostream &O) {
733 if (ExtraCode && ExtraCode[0]) {
734 if (ExtraCode[1] != 0)
735 return true; // Unknown modifier.
736
737 switch (ExtraCode[0]) {
738 default:
739 // See if this is a generic print operand
740 return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
741 case 'r':
742 break;
743 }
744 }
745
746 AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), O,
747 *TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo());
748 return false;
749}
Yaxun Liua711cc72016-07-16 05:09:21 +0000750
751// Emit a key and an integer value for runtime metadata.
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000752static void emitRuntimeMDIntValue(MCStreamer &Streamer,
Yaxun Liua711cc72016-07-16 05:09:21 +0000753 RuntimeMD::Key K, uint64_t V,
754 unsigned Size) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000755 Streamer.EmitIntValue(K, 1);
756 Streamer.EmitIntValue(V, Size);
Yaxun Liua711cc72016-07-16 05:09:21 +0000757}
758
759// Emit a key and a string value for runtime metadata.
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000760static void emitRuntimeMDStringValue(MCStreamer &Streamer,
Yaxun Liua711cc72016-07-16 05:09:21 +0000761 RuntimeMD::Key K, StringRef S) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000762 Streamer.EmitIntValue(K, 1);
763 Streamer.EmitIntValue(S.size(), 4);
764 Streamer.EmitBytes(S);
Yaxun Liua711cc72016-07-16 05:09:21 +0000765}
766
767// Emit a key and three integer values for runtime metadata.
768// The three integer values are obtained from MDNode \p Node;
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000769static void emitRuntimeMDThreeIntValues(MCStreamer &Streamer,
Yaxun Liua711cc72016-07-16 05:09:21 +0000770 RuntimeMD::Key K, MDNode *Node,
771 unsigned Size) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000772 assert(Node->getNumOperands() == 3);
773
774 Streamer.EmitIntValue(K, 1);
775 for (const MDOperand &Op : Node->operands()) {
776 const ConstantInt *CI = mdconst::extract<ConstantInt>(Op);
777 Streamer.EmitIntValue(CI->getZExtValue(), Size);
778 }
Yaxun Liua711cc72016-07-16 05:09:21 +0000779}
780
781void AMDGPUAsmPrinter::emitStartOfRuntimeMetadata(const Module &M) {
782 OutStreamer->SwitchSection(getObjFileLowering().getContext()
783 .getELFSection(RuntimeMD::SectionName, ELF::SHT_PROGBITS, 0));
784
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000785 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyMDVersion,
Yaxun Liua711cc72016-07-16 05:09:21 +0000786 RuntimeMD::MDVersion << 8 | RuntimeMD::MDRevision, 2);
787 if (auto MD = M.getNamedMetadata("opencl.ocl.version")) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000788 if (MD->getNumOperands() != 0) {
Yaxun Liu4b1d9f72016-07-20 14:38:06 +0000789 auto Node = MD->getOperand(0);
790 if (Node->getNumOperands() > 1) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000791 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyLanguage,
Yaxun Liu4b1d9f72016-07-20 14:38:06 +0000792 RuntimeMD::OpenCL_C, 1);
793 uint16_t Major = mdconst::extract<ConstantInt>(Node->getOperand(0))
794 ->getZExtValue();
795 uint16_t Minor = mdconst::extract<ConstantInt>(Node->getOperand(1))
796 ->getZExtValue();
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000797 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyLanguageVersion,
Yaxun Liu4b1d9f72016-07-20 14:38:06 +0000798 Major * 100 + Minor * 10, 2);
799 }
800 }
Yaxun Liua711cc72016-07-16 05:09:21 +0000801 }
802}
803
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000804static std::string getOCLTypeName(Type *Ty, bool Signed) {
Yaxun Liua711cc72016-07-16 05:09:21 +0000805 switch (Ty->getTypeID()) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000806 case Type::HalfTyID:
807 return "half";
808 case Type::FloatTyID:
809 return "float";
810 case Type::DoubleTyID:
811 return "double";
Yaxun Liua711cc72016-07-16 05:09:21 +0000812 case Type::IntegerTyID: {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000813 if (!Signed)
814 return (Twine('u') + getOCLTypeName(Ty, true)).str();
815 unsigned BW = Ty->getIntegerBitWidth();
Yaxun Liua711cc72016-07-16 05:09:21 +0000816 switch (BW) {
817 case 8:
818 return "char";
819 case 16:
820 return "short";
821 case 32:
822 return "int";
823 case 64:
824 return "long";
825 default:
826 return (Twine('i') + Twine(BW)).str();
827 }
828 }
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000829 case Type::VectorTyID: {
830 VectorType *VecTy = cast<VectorType>(Ty);
831 Type *EleTy = VecTy->getElementType();
832 unsigned Size = VecTy->getVectorNumElements();
833 return (Twine(getOCLTypeName(EleTy, Signed)) + Twine(Size)).str();
834 }
Yaxun Liua711cc72016-07-16 05:09:21 +0000835 default:
836 llvm_unreachable("invalid type");
837 }
838}
839
840static RuntimeMD::KernelArg::ValueType getRuntimeMDValueType(
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000841 Type *Ty, StringRef TypeName) {
842 switch (Ty->getTypeID()) {
843 case Type::HalfTyID:
Yaxun Liua711cc72016-07-16 05:09:21 +0000844 return RuntimeMD::KernelArg::F16;
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000845 case Type::FloatTyID:
Yaxun Liua711cc72016-07-16 05:09:21 +0000846 return RuntimeMD::KernelArg::F32;
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000847 case Type::DoubleTyID:
Yaxun Liua711cc72016-07-16 05:09:21 +0000848 return RuntimeMD::KernelArg::F64;
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000849 case Type::IntegerTyID: {
Yaxun Liua711cc72016-07-16 05:09:21 +0000850 bool Signed = !TypeName.startswith("u");
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000851 switch (Ty->getIntegerBitWidth()) {
Yaxun Liua711cc72016-07-16 05:09:21 +0000852 case 8:
853 return Signed ? RuntimeMD::KernelArg::I8 : RuntimeMD::KernelArg::U8;
854 case 16:
855 return Signed ? RuntimeMD::KernelArg::I16 : RuntimeMD::KernelArg::U16;
856 case 32:
857 return Signed ? RuntimeMD::KernelArg::I32 : RuntimeMD::KernelArg::U32;
858 case 64:
859 return Signed ? RuntimeMD::KernelArg::I64 : RuntimeMD::KernelArg::U64;
860 default:
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000861 // Runtime does not recognize other integer types. Report as struct type.
Yaxun Liua711cc72016-07-16 05:09:21 +0000862 return RuntimeMD::KernelArg::Struct;
863 }
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000864 }
865 case Type::VectorTyID:
866 return getRuntimeMDValueType(Ty->getVectorElementType(), TypeName);
867 case Type::PointerTyID:
868 return getRuntimeMDValueType(Ty->getPointerElementType(), TypeName);
869 default:
Yaxun Liua711cc72016-07-16 05:09:21 +0000870 return RuntimeMD::KernelArg::Struct;
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000871 }
Yaxun Liua711cc72016-07-16 05:09:21 +0000872}
873
874void AMDGPUAsmPrinter::emitRuntimeMetadata(const Function &F) {
875 if (!F.getMetadata("kernel_arg_type"))
876 return;
877
878 MCContext &Context = getObjFileLowering().getContext();
879 OutStreamer->SwitchSection(
880 Context.getELFSection(RuntimeMD::SectionName, ELF::SHT_PROGBITS, 0));
881 OutStreamer->EmitIntValue(RuntimeMD::KeyKernelBegin, 1);
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000882 emitRuntimeMDStringValue(*OutStreamer, RuntimeMD::KeyKernelName, F.getName());
Yaxun Liua711cc72016-07-16 05:09:21 +0000883
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000884 for (auto &Arg : F.args()) {
Yaxun Liua711cc72016-07-16 05:09:21 +0000885 // Emit KeyArgBegin.
886 unsigned I = Arg.getArgNo();
887 OutStreamer->EmitIntValue(RuntimeMD::KeyArgBegin, 1);
888
889 // Emit KeyArgSize and KeyArgAlign.
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000890 Type *T = Arg.getType();
891 const DataLayout &DL = F.getParent()->getDataLayout();
892 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyArgSize,
Yaxun Liua711cc72016-07-16 05:09:21 +0000893 DL.getTypeAllocSize(T), 4);
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000894 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyArgAlign,
Yaxun Liua711cc72016-07-16 05:09:21 +0000895 DL.getABITypeAlignment(T), 4);
896
897 // Emit KeyArgTypeName.
898 auto TypeName = dyn_cast<MDString>(F.getMetadata(
899 "kernel_arg_type")->getOperand(I))->getString();
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000900 emitRuntimeMDStringValue(*OutStreamer, RuntimeMD::KeyArgTypeName, TypeName);
Yaxun Liua711cc72016-07-16 05:09:21 +0000901
902 // Emit KeyArgName.
903 if (auto ArgNameMD = F.getMetadata("kernel_arg_name")) {
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000904 auto ArgName = cast<MDString>(ArgNameMD->getOperand(I))->getString();
905 emitRuntimeMDStringValue(*OutStreamer, RuntimeMD::KeyArgName, ArgName);
Yaxun Liua711cc72016-07-16 05:09:21 +0000906 }
907
908 // Emit KeyArgIsVolatile, KeyArgIsRestrict, KeyArgIsConst and KeyArgIsPipe.
909 auto TypeQual = cast<MDString>(F.getMetadata(
910 "kernel_arg_type_qual")->getOperand(I))->getString();
911 SmallVector<StringRef, 1> SplitQ;
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000912 TypeQual.split(SplitQ, " ", -1, false /* Drop empty entry */);
913
914 for (StringRef KeyName : SplitQ) {
915 auto Key = StringSwitch<RuntimeMD::Key>(KeyName)
Yaxun Liua711cc72016-07-16 05:09:21 +0000916 .Case("volatile", RuntimeMD::KeyArgIsVolatile)
917 .Case("restrict", RuntimeMD::KeyArgIsRestrict)
918 .Case("const", RuntimeMD::KeyArgIsConst)
919 .Case("pipe", RuntimeMD::KeyArgIsPipe)
920 .Default(RuntimeMD::KeyNull);
921 OutStreamer->EmitIntValue(Key, 1);
922 }
923
924 // Emit KeyArgTypeKind.
925 auto BaseTypeName = cast<MDString>(
926 F.getMetadata("kernel_arg_base_type")->getOperand(I))->getString();
927 auto TypeKind = StringSwitch<RuntimeMD::KernelArg::TypeKind>(BaseTypeName)
928 .Case("sampler_t", RuntimeMD::KernelArg::Sampler)
929 .Case("queue_t", RuntimeMD::KernelArg::Queue)
930 .Cases("image1d_t", "image1d_array_t", "image1d_buffer_t",
931 "image2d_t" , "image2d_array_t", RuntimeMD::KernelArg::Image)
932 .Cases("image2d_depth_t", "image2d_array_depth_t",
933 "image2d_msaa_t", "image2d_array_msaa_t",
934 "image2d_msaa_depth_t", RuntimeMD::KernelArg::Image)
935 .Cases("image2d_array_msaa_depth_t", "image3d_t",
936 RuntimeMD::KernelArg::Image)
937 .Default(isa<PointerType>(T) ? RuntimeMD::KernelArg::Pointer :
938 RuntimeMD::KernelArg::Value);
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000939 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyArgTypeKind, TypeKind, 1);
Yaxun Liua711cc72016-07-16 05:09:21 +0000940
941 // Emit KeyArgValueType.
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000942 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyArgValueType,
Yaxun Liua711cc72016-07-16 05:09:21 +0000943 getRuntimeMDValueType(T, BaseTypeName), 2);
944
945 // Emit KeyArgAccQual.
946 auto AccQual = cast<MDString>(F.getMetadata(
947 "kernel_arg_access_qual")->getOperand(I))->getString();
948 auto AQ = StringSwitch<RuntimeMD::KernelArg::AccessQualifer>(AccQual)
949 .Case("read_only", RuntimeMD::KernelArg::ReadOnly)
950 .Case("write_only", RuntimeMD::KernelArg::WriteOnly)
951 .Case("read_write", RuntimeMD::KernelArg::ReadWrite)
952 .Default(RuntimeMD::KernelArg::None);
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000953 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyArgAccQual, AQ, 1);
Yaxun Liua711cc72016-07-16 05:09:21 +0000954
955 // Emit KeyArgAddrQual.
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000956 if (auto *PT = dyn_cast<PointerType>(T)) {
957 emitRuntimeMDIntValue(*OutStreamer, RuntimeMD::KeyArgAddrQual,
958 PT->getAddressSpace(), 1);
959 }
Yaxun Liua711cc72016-07-16 05:09:21 +0000960
961 // Emit KeyArgEnd
962 OutStreamer->EmitIntValue(RuntimeMD::KeyArgEnd, 1);
963 }
964
965 // Emit KeyReqdWorkGroupSize, KeyWorkGroupSizeHint, and KeyVecTypeHint.
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000966 if (auto RWGS = F.getMetadata("reqd_work_group_size")) {
967 emitRuntimeMDThreeIntValues(*OutStreamer, RuntimeMD::KeyReqdWorkGroupSize,
Yaxun Liua711cc72016-07-16 05:09:21 +0000968 RWGS, 4);
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000969 }
970
971 if (auto WGSH = F.getMetadata("work_group_size_hint")) {
972 emitRuntimeMDThreeIntValues(*OutStreamer, RuntimeMD::KeyWorkGroupSizeHint,
Yaxun Liua711cc72016-07-16 05:09:21 +0000973 WGSH, 4);
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000974 }
975
Yaxun Liua711cc72016-07-16 05:09:21 +0000976 if (auto VTH = F.getMetadata("vec_type_hint")) {
977 auto TypeName = getOCLTypeName(cast<ValueAsMetadata>(
978 VTH->getOperand(0))->getType(), mdconst::extract<ConstantInt>(
979 VTH->getOperand(1))->getZExtValue());
Matt Arsenaultb06db8f2016-07-26 21:03:36 +0000980 emitRuntimeMDStringValue(*OutStreamer, RuntimeMD::KeyVecTypeHint, TypeName);
Yaxun Liua711cc72016-07-16 05:09:21 +0000981 }
982
983 // Emit KeyKernelEnd
984 OutStreamer->EmitIntValue(RuntimeMD::KeyKernelEnd, 1);
985}