blob: f679ea5b98c9d36407a4c8499169662465cb648f [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmPrinter.cpp - AMDGPU Assebly printer --------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11///
12/// The AMDGPUAsmPrinter is used to print both assembly string and also binary
13/// code. When passed an MCAsmStreamer it prints assembly and when passed
14/// an MCObjectStreamer it outputs binary code.
15//
16//===----------------------------------------------------------------------===//
17//
18
19#include "AMDGPUAsmPrinter.h"
Tom Stellard347ac792015-06-26 21:15:07 +000020#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "InstPrinter/AMDGPUInstPrinter.h"
Tom Stellard347ac792015-06-26 21:15:07 +000022#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "AMDGPU.h"
24#include "AMDKernelCodeT.h"
25#include "AMDGPUSubtarget.h"
26#include "R600Defines.h"
27#include "R600MachineFunctionInfo.h"
28#include "R600RegisterInfo.h"
29#include "SIDefines.h"
30#include "SIMachineFunctionInfo.h"
Matt Arsenaulta9720c62016-06-20 17:51:32 +000031#include "SIInstrInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "SIRegisterInfo.h"
33#include "llvm/CodeGen/MachineFrameInfo.h"
Matt Arsenaultff982412016-06-20 18:13:04 +000034#include "llvm/IR/DiagnosticInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/MC/MCContext.h"
36#include "llvm/MC/MCSectionELF.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/Support/ELF.h"
39#include "llvm/Support/MathExtras.h"
40#include "llvm/Support/TargetRegistry.h"
41#include "llvm/Target/TargetLoweringObjectFile.h"
Yaxun Liua711cc72016-07-16 05:09:21 +000042#include "AMDGPURuntimeMetadata.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000043
Yaxun Liua711cc72016-07-16 05:09:21 +000044using namespace ::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000045using namespace llvm;
46
47// TODO: This should get the default rounding mode from the kernel. We just set
48// the default here, but this could change if the OpenCL rounding mode pragmas
49// are used.
50//
51// The denormal mode here should match what is reported by the OpenCL runtime
52// for the CL_FP_DENORM bit from CL_DEVICE_{HALF|SINGLE|DOUBLE}_FP_CONFIG, but
53// can also be override to flush with the -cl-denorms-are-zero compiler flag.
54//
55// AMD OpenCL only sets flush none and reports CL_FP_DENORM for double
56// precision, and leaves single precision to flush all and does not report
57// CL_FP_DENORM for CL_DEVICE_SINGLE_FP_CONFIG. Mesa's OpenCL currently reports
58// CL_FP_DENORM for both.
59//
60// FIXME: It seems some instructions do not support single precision denormals
61// regardless of the mode (exp_*_f32, rcp_*_f32, rsq_*_f32, rsq_*f32, sqrt_f32,
62// and sin_f32, cos_f32 on most parts).
63
64// We want to use these instructions, and using fp32 denormals also causes
65// instructions to run at the double precision rate for the device so it's
66// probably best to just report no single precision denormals.
67static uint32_t getFPMode(const MachineFunction &F) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +000068 const SISubtarget& ST = F.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +000069 // TODO: Is there any real use for the flush in only / flush out only modes?
70
71 uint32_t FP32Denormals =
72 ST.hasFP32Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
73
74 uint32_t FP64Denormals =
75 ST.hasFP64Denormals() ? FP_DENORM_FLUSH_NONE : FP_DENORM_FLUSH_IN_FLUSH_OUT;
76
77 return FP_ROUND_MODE_SP(FP_ROUND_ROUND_TO_NEAREST) |
78 FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEAREST) |
79 FP_DENORM_MODE_SP(FP32Denormals) |
80 FP_DENORM_MODE_DP(FP64Denormals);
81}
82
83static AsmPrinter *
84createAMDGPUAsmPrinterPass(TargetMachine &tm,
85 std::unique_ptr<MCStreamer> &&Streamer) {
86 return new AMDGPUAsmPrinter(tm, std::move(Streamer));
87}
88
89extern "C" void LLVMInitializeAMDGPUAsmPrinter() {
90 TargetRegistry::RegisterAsmPrinter(TheAMDGPUTarget, createAMDGPUAsmPrinterPass);
91 TargetRegistry::RegisterAsmPrinter(TheGCNTarget, createAMDGPUAsmPrinterPass);
92}
93
94AMDGPUAsmPrinter::AMDGPUAsmPrinter(TargetMachine &TM,
95 std::unique_ptr<MCStreamer> Streamer)
96 : AsmPrinter(TM, std::move(Streamer)) {}
97
Matt Arsenaultf9245b72016-07-22 17:01:25 +000098const char *AMDGPUAsmPrinter::getPassName() const {
99 return "AMDGPU Assembly Printer";
100}
101
Tom Stellardf4218372016-01-12 17:18:17 +0000102void AMDGPUAsmPrinter::EmitStartOfAsmFile(Module &M) {
103 if (TM.getTargetTriple().getOS() != Triple::AMDHSA)
104 return;
105
106 // Need to construct an MCSubtargetInfo here in case we have no functions
107 // in the module.
108 std::unique_ptr<MCSubtargetInfo> STI(TM.getTarget().createMCSubtargetInfo(
109 TM.getTargetTriple().str(), TM.getTargetCPU(),
110 TM.getTargetFeatureString()));
111
112 AMDGPUTargetStreamer *TS =
113 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
114
Tom Stellard418beb72016-07-13 14:23:33 +0000115 TS->EmitDirectiveHSACodeObjectVersion(2, 1);
Tom Stellardfcfaea42016-05-05 17:03:33 +0000116
Tom Stellardf4218372016-01-12 17:18:17 +0000117 AMDGPU::IsaVersion ISA = AMDGPU::getIsaVersion(STI->getFeatureBits());
118 TS->EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor, ISA.Stepping,
119 "AMD", "AMDGPU");
Yaxun Liua711cc72016-07-16 05:09:21 +0000120 emitStartOfRuntimeMetadata(M);
Tom Stellardf4218372016-01-12 17:18:17 +0000121}
122
Tom Stellardf151a452015-06-26 21:14:58 +0000123void AMDGPUAsmPrinter::EmitFunctionBodyStart() {
124 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
125 SIProgramInfo KernelInfo;
126 if (STM.isAmdHsaOS()) {
127 getSIProgramInfo(KernelInfo, *MF);
128 EmitAmdKernelCodeT(*MF, KernelInfo);
129 }
130}
131
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000132void AMDGPUAsmPrinter::EmitFunctionEntryLabel() {
133 const SIMachineFunctionInfo *MFI = MF->getInfo<SIMachineFunctionInfo>();
134 const AMDGPUSubtarget &STM = MF->getSubtarget<AMDGPUSubtarget>();
135 if (MFI->isKernel() && STM.isAmdHsaOS()) {
136 AMDGPUTargetStreamer *TS =
137 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
138 TS->EmitAMDGPUSymbolType(CurrentFnSym->getName(),
139 ELF::STT_AMDGPU_HSA_KERNEL);
140 }
141
142 AsmPrinter::EmitFunctionEntryLabel();
143}
144
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000145void AMDGPUAsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
146
Tom Stellard00f2f912015-12-02 19:47:57 +0000147 // Group segment variables aren't emitted in HSA.
148 if (AMDGPU::isGroupSegment(GV))
149 return;
150
Tom Stellardfcfaea42016-05-05 17:03:33 +0000151 AsmPrinter::EmitGlobalVariable(GV);
Tom Stellarde3b5aea2015-12-02 17:00:42 +0000152}
153
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154bool AMDGPUAsmPrinter::runOnMachineFunction(MachineFunction &MF) {
155
156 // The starting address of all shader programs must be 256 bytes aligned.
157 MF.setAlignment(8);
158
159 SetupMachineFunction(MF);
160
161 MCContext &Context = getObjFileLowering().getContext();
162 MCSectionELF *ConfigSection =
163 Context.getELFSection(".AMDGPU.config", ELF::SHT_PROGBITS, 0);
164 OutStreamer->SwitchSection(ConfigSection);
165
166 const AMDGPUSubtarget &STM = MF.getSubtarget<AMDGPUSubtarget>();
167 SIProgramInfo KernelInfo;
Tom Stellardf151a452015-06-26 21:14:58 +0000168 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
Matt Arsenault297ae312015-08-15 00:12:39 +0000169 getSIProgramInfo(KernelInfo, MF);
Tom Stellardf151a452015-06-26 21:14:58 +0000170 if (!STM.isAmdHsaOS()) {
Tom Stellardf151a452015-06-26 21:14:58 +0000171 EmitProgramInfoSI(MF, KernelInfo);
172 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000173 } else {
174 EmitProgramInfoR600(MF);
175 }
176
177 DisasmLines.clear();
178 HexLines.clear();
179 DisasmLineMaxLen = 0;
180
181 EmitFunctionBody();
182
183 if (isVerbose()) {
184 MCSectionELF *CommentSection =
185 Context.getELFSection(".AMDGPU.csdata", ELF::SHT_PROGBITS, 0);
186 OutStreamer->SwitchSection(CommentSection);
187
188 if (STM.getGeneration() >= AMDGPUSubtarget::SOUTHERN_ISLANDS) {
189 OutStreamer->emitRawComment(" Kernel info:", false);
190 OutStreamer->emitRawComment(" codeLenInByte = " + Twine(KernelInfo.CodeLen),
191 false);
192 OutStreamer->emitRawComment(" NumSgprs: " + Twine(KernelInfo.NumSGPR),
193 false);
194 OutStreamer->emitRawComment(" NumVgprs: " + Twine(KernelInfo.NumVGPR),
195 false);
196 OutStreamer->emitRawComment(" FloatMode: " + Twine(KernelInfo.FloatMode),
197 false);
198 OutStreamer->emitRawComment(" IeeeMode: " + Twine(KernelInfo.IEEEMode),
199 false);
200 OutStreamer->emitRawComment(" ScratchSize: " + Twine(KernelInfo.ScratchSize),
201 false);
Matt Arsenaultfd8ab092016-04-14 22:11:51 +0000202 OutStreamer->emitRawComment(" LDSByteSize: " + Twine(KernelInfo.LDSSize) +
203 " bytes/workgroup (compile time only)", false);
Matt Arsenaultd41c0db2015-11-05 05:27:07 +0000204
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000205 OutStreamer->emitRawComment(" ReservedVGPRFirst: " + Twine(KernelInfo.ReservedVGPRFirst),
206 false);
207 OutStreamer->emitRawComment(" ReservedVGPRCount: " + Twine(KernelInfo.ReservedVGPRCount),
208 false);
209
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000210 if (MF.getSubtarget<SISubtarget>().debuggerEmitPrologue()) {
211 OutStreamer->emitRawComment(" DebuggerWavefrontPrivateSegmentOffsetSGPR: s" +
212 Twine(KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR), false);
213 OutStreamer->emitRawComment(" DebuggerPrivateSegmentBufferSGPR: s" +
214 Twine(KernelInfo.DebuggerPrivateSegmentBufferSGPR), false);
215 }
216
Matt Arsenaultd41c0db2015-11-05 05:27:07 +0000217 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:USER_SGPR: " +
Matt Arsenault8246d4a2015-11-11 00:27:46 +0000218 Twine(G_00B84C_USER_SGPR(KernelInfo.ComputePGMRSrc2)),
Matt Arsenaultd41c0db2015-11-05 05:27:07 +0000219 false);
Matt Arsenault8246d4a2015-11-11 00:27:46 +0000220 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_X_EN: " +
221 Twine(G_00B84C_TGID_X_EN(KernelInfo.ComputePGMRSrc2)),
222 false);
223 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Y_EN: " +
224 Twine(G_00B84C_TGID_Y_EN(KernelInfo.ComputePGMRSrc2)),
225 false);
226 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TGID_Z_EN: " +
227 Twine(G_00B84C_TGID_Z_EN(KernelInfo.ComputePGMRSrc2)),
228 false);
229 OutStreamer->emitRawComment(" COMPUTE_PGM_RSRC2:TIDIG_COMP_CNT: " +
230 Twine(G_00B84C_TIDIG_COMP_CNT(KernelInfo.ComputePGMRSrc2)),
231 false);
232
Tom Stellard45bb48e2015-06-13 03:28:10 +0000233 } else {
234 R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
235 OutStreamer->emitRawComment(
Matt Arsenaultf9245b72016-07-22 17:01:25 +0000236 Twine("SQ_PGM_RESOURCES:STACK_SIZE = " + Twine(MFI->CFStackSize)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237 }
238 }
239
240 if (STM.dumpCode()) {
241
242 OutStreamer->SwitchSection(
243 Context.getELFSection(".AMDGPU.disasm", ELF::SHT_NOTE, 0));
244
245 for (size_t i = 0; i < DisasmLines.size(); ++i) {
246 std::string Comment(DisasmLineMaxLen - DisasmLines[i].size(), ' ');
247 Comment += " ; " + HexLines[i] + "\n";
248
249 OutStreamer->EmitBytes(StringRef(DisasmLines[i]));
250 OutStreamer->EmitBytes(StringRef(Comment));
251 }
252 }
253
Yaxun Liua711cc72016-07-16 05:09:21 +0000254 emitRuntimeMetadata(*MF.getFunction());
255
Tom Stellard45bb48e2015-06-13 03:28:10 +0000256 return false;
257}
258
259void AMDGPUAsmPrinter::EmitProgramInfoR600(const MachineFunction &MF) {
260 unsigned MaxGPR = 0;
261 bool killPixel = false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000262 const R600Subtarget &STM = MF.getSubtarget<R600Subtarget>();
263 const R600RegisterInfo *RI = STM.getRegisterInfo();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000264 const R600MachineFunctionInfo *MFI = MF.getInfo<R600MachineFunctionInfo>();
265
266 for (const MachineBasicBlock &MBB : MF) {
267 for (const MachineInstr &MI : MBB) {
268 if (MI.getOpcode() == AMDGPU::KILLGT)
269 killPixel = true;
270 unsigned numOperands = MI.getNumOperands();
271 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
272 const MachineOperand &MO = MI.getOperand(op_idx);
273 if (!MO.isReg())
274 continue;
275 unsigned HWReg = RI->getEncodingValue(MO.getReg()) & 0xff;
276
277 // Register with value > 127 aren't GPR
278 if (HWReg > 127)
279 continue;
280 MaxGPR = std::max(MaxGPR, HWReg);
281 }
282 }
283 }
284
285 unsigned RsrcReg;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000286 if (STM.getGeneration() >= R600Subtarget::EVERGREEN) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000287 // Evergreen / Northern Islands
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000288 switch (MF.getFunction()->getCallingConv()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000289 default: // Fall through
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000290 case CallingConv::AMDGPU_CS: RsrcReg = R_0288D4_SQ_PGM_RESOURCES_LS; break;
291 case CallingConv::AMDGPU_GS: RsrcReg = R_028878_SQ_PGM_RESOURCES_GS; break;
292 case CallingConv::AMDGPU_PS: RsrcReg = R_028844_SQ_PGM_RESOURCES_PS; break;
293 case CallingConv::AMDGPU_VS: RsrcReg = R_028860_SQ_PGM_RESOURCES_VS; break;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000294 }
295 } else {
296 // R600 / R700
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000297 switch (MF.getFunction()->getCallingConv()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000298 default: // Fall through
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000299 case CallingConv::AMDGPU_GS: // Fall through
300 case CallingConv::AMDGPU_CS: // Fall through
301 case CallingConv::AMDGPU_VS: RsrcReg = R_028868_SQ_PGM_RESOURCES_VS; break;
302 case CallingConv::AMDGPU_PS: RsrcReg = R_028850_SQ_PGM_RESOURCES_PS; break;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000303 }
304 }
305
306 OutStreamer->EmitIntValue(RsrcReg, 4);
307 OutStreamer->EmitIntValue(S_NUM_GPRS(MaxGPR + 1) |
Matt Arsenaultf9245b72016-07-22 17:01:25 +0000308 S_STACK_SIZE(MFI->CFStackSize), 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000309 OutStreamer->EmitIntValue(R_02880C_DB_SHADER_CONTROL, 4);
310 OutStreamer->EmitIntValue(S_02880C_KILL_ENABLE(killPixel), 4);
311
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000312 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000313 OutStreamer->EmitIntValue(R_0288E8_SQ_LDS_ALLOC, 4);
Matt Arsenault52ef4012016-07-26 16:45:58 +0000314 OutStreamer->EmitIntValue(alignTo(MFI->getLDSSize(), 4) >> 2, 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000315 }
316}
317
318void AMDGPUAsmPrinter::getSIProgramInfo(SIProgramInfo &ProgInfo,
319 const MachineFunction &MF) const {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000320 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
322 uint64_t CodeSize = 0;
323 unsigned MaxSGPR = 0;
324 unsigned MaxVGPR = 0;
325 bool VCCUsed = false;
326 bool FlatUsed = false;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000327 const SIRegisterInfo *RI = STM.getRegisterInfo();
328 const SIInstrInfo *TII = STM.getInstrInfo();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000329
330 for (const MachineBasicBlock &MBB : MF) {
331 for (const MachineInstr &MI : MBB) {
332 // TODO: CodeSize should account for multiple functions.
Matt Arsenaultc5746862015-08-12 09:04:44 +0000333
334 // TODO: Should we count size of debug info?
335 if (MI.isDebugValue())
336 continue;
337
Matt Arsenaulta9720c62016-06-20 17:51:32 +0000338 CodeSize += TII->getInstSizeInBytes(MI);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000339
340 unsigned numOperands = MI.getNumOperands();
341 for (unsigned op_idx = 0; op_idx < numOperands; op_idx++) {
342 const MachineOperand &MO = MI.getOperand(op_idx);
343 unsigned width = 0;
344 bool isSGPR = false;
345
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000346 if (!MO.isReg())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000347 continue;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000348
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000349 unsigned reg = MO.getReg();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000350 switch (reg) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000351 case AMDGPU::EXEC:
Nicolai Haehnle74839372016-04-19 21:58:17 +0000352 case AMDGPU::EXEC_LO:
353 case AMDGPU::EXEC_HI:
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000354 case AMDGPU::SCC:
Tom Stellard45bb48e2015-06-13 03:28:10 +0000355 case AMDGPU::M0:
356 continue;
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000357
358 case AMDGPU::VCC:
359 case AMDGPU::VCC_LO:
360 case AMDGPU::VCC_HI:
361 VCCUsed = true;
362 continue;
363
364 case AMDGPU::FLAT_SCR:
365 case AMDGPU::FLAT_SCR_LO:
366 case AMDGPU::FLAT_SCR_HI:
367 FlatUsed = true;
368 continue;
369
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000370 case AMDGPU::TBA:
371 case AMDGPU::TBA_LO:
372 case AMDGPU::TBA_HI:
373 case AMDGPU::TMA:
374 case AMDGPU::TMA_LO:
375 case AMDGPU::TMA_HI:
376 llvm_unreachable("Trap Handler registers should not be used");
377 continue;
378
Matt Arsenaultd2c75892015-10-01 21:51:59 +0000379 default:
380 break;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000381 }
382
383 if (AMDGPU::SReg_32RegClass.contains(reg)) {
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000384 if (AMDGPU::TTMP_32RegClass.contains(reg)) {
385 llvm_unreachable("Trap Handler registers should not be used");
386 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000387 isSGPR = true;
388 width = 1;
389 } else if (AMDGPU::VGPR_32RegClass.contains(reg)) {
390 isSGPR = false;
391 width = 1;
392 } else if (AMDGPU::SReg_64RegClass.contains(reg)) {
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000393 if (AMDGPU::TTMP_64RegClass.contains(reg)) {
394 llvm_unreachable("Trap Handler registers should not be used");
395 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000396 isSGPR = true;
397 width = 2;
398 } else if (AMDGPU::VReg_64RegClass.contains(reg)) {
399 isSGPR = false;
400 width = 2;
401 } else if (AMDGPU::VReg_96RegClass.contains(reg)) {
402 isSGPR = false;
403 width = 3;
404 } else if (AMDGPU::SReg_128RegClass.contains(reg)) {
405 isSGPR = true;
406 width = 4;
407 } else if (AMDGPU::VReg_128RegClass.contains(reg)) {
408 isSGPR = false;
409 width = 4;
410 } else if (AMDGPU::SReg_256RegClass.contains(reg)) {
411 isSGPR = true;
412 width = 8;
413 } else if (AMDGPU::VReg_256RegClass.contains(reg)) {
414 isSGPR = false;
415 width = 8;
416 } else if (AMDGPU::SReg_512RegClass.contains(reg)) {
417 isSGPR = true;
418 width = 16;
419 } else if (AMDGPU::VReg_512RegClass.contains(reg)) {
420 isSGPR = false;
421 width = 16;
422 } else {
423 llvm_unreachable("Unknown register class");
424 }
425 unsigned hwReg = RI->getEncodingValue(reg) & 0xff;
426 unsigned maxUsed = hwReg + width - 1;
427 if (isSGPR) {
428 MaxSGPR = maxUsed > MaxSGPR ? maxUsed : MaxSGPR;
429 } else {
430 MaxVGPR = maxUsed > MaxVGPR ? maxUsed : MaxVGPR;
431 }
432 }
433 }
434 }
435
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000436 unsigned ExtraSGPRs = 0;
437
438 if (VCCUsed)
439 ExtraSGPRs = 2;
440
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000441 if (STM.getGeneration() < SISubtarget::VOLCANIC_ISLANDS) {
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000442 if (FlatUsed)
443 ExtraSGPRs = 4;
444 } else {
445 if (STM.isXNACKEnabled())
446 ExtraSGPRs = 4;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000447
Nicolai Haehnle5b504972016-01-04 23:35:53 +0000448 if (FlatUsed)
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000449 ExtraSGPRs = 6;
Tom Stellardcaaa3aa2015-12-17 17:05:09 +0000450 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000451
Nicolai Haehnle3c05d6d2016-01-07 17:10:20 +0000452 MaxSGPR += ExtraSGPRs;
453
Konstantin Zhuravlyov29ddd2b2016-05-24 18:37:18 +0000454 // Record first reserved register and reserved register count fields, and
455 // update max register counts if "amdgpu-debugger-reserve-regs" attribute was
456 // specified.
457 if (STM.debuggerReserveRegs()) {
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000458 ProgInfo.ReservedVGPRFirst = MaxVGPR + 1;
Konstantin Zhuravlyov29ddd2b2016-05-24 18:37:18 +0000459 ProgInfo.ReservedVGPRCount = MFI->getDebuggerReservedVGPRCount();
460 MaxVGPR += MFI->getDebuggerReservedVGPRCount();
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000461 }
462
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000463 // Update DebuggerWavefrontPrivateSegmentOffsetSGPR and
464 // DebuggerPrivateSegmentBufferSGPR fields if "amdgpu-debugger-emit-prologue"
465 // attribute was specified.
466 if (STM.debuggerEmitPrologue()) {
467 ProgInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR =
468 RI->getHWRegIndex(MFI->getScratchWaveOffsetReg());
469 ProgInfo.DebuggerPrivateSegmentBufferSGPR =
470 RI->getHWRegIndex(MFI->getScratchRSrcReg());
471 }
472
Tom Stellard45bb48e2015-06-13 03:28:10 +0000473 // We found the maximum register index. They start at 0, so add one to get the
474 // number of registers.
475 ProgInfo.NumVGPR = MaxVGPR + 1;
476 ProgInfo.NumSGPR = MaxSGPR + 1;
477
478 if (STM.hasSGPRInitBug()) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000479 if (ProgInfo.NumSGPR > SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG) {
Matt Arsenault417c93e2015-06-17 20:55:25 +0000480 LLVMContext &Ctx = MF.getFunction()->getContext();
Matt Arsenaultff982412016-06-20 18:13:04 +0000481 DiagnosticInfoResourceLimit Diag(*MF.getFunction(),
482 "SGPRs with SGPR init bug",
483 ProgInfo.NumSGPR, DS_Error);
484 Ctx.diagnose(Diag);
Matt Arsenault417c93e2015-06-17 20:55:25 +0000485 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000486
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000487 ProgInfo.NumSGPR = SISubtarget::FIXED_SGPR_COUNT_FOR_INIT_BUG;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000488 }
489
Matt Arsenault41003af2015-11-30 21:16:07 +0000490 if (MFI->NumUserSGPRs > STM.getMaxNumUserSGPRs()) {
491 LLVMContext &Ctx = MF.getFunction()->getContext();
Matt Arsenaultff982412016-06-20 18:13:04 +0000492 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "user SGPRs",
493 MFI->NumUserSGPRs, DS_Error);
494 Ctx.diagnose(Diag);
Matt Arsenault41003af2015-11-30 21:16:07 +0000495 }
496
Matt Arsenault52ef4012016-07-26 16:45:58 +0000497 if (MFI->getLDSSize() > static_cast<unsigned>(STM.getLocalMemorySize())) {
Matt Arsenault1c4d0ef2016-04-28 19:37:35 +0000498 LLVMContext &Ctx = MF.getFunction()->getContext();
Matt Arsenaultff982412016-06-20 18:13:04 +0000499 DiagnosticInfoResourceLimit Diag(*MF.getFunction(), "local memory",
Matt Arsenault52ef4012016-07-26 16:45:58 +0000500 MFI->getLDSSize(), DS_Error);
Matt Arsenaultff982412016-06-20 18:13:04 +0000501 Ctx.diagnose(Diag);
Matt Arsenault1c4d0ef2016-04-28 19:37:35 +0000502 }
503
Tom Stellard45bb48e2015-06-13 03:28:10 +0000504 ProgInfo.VGPRBlocks = (ProgInfo.NumVGPR - 1) / 4;
505 ProgInfo.SGPRBlocks = (ProgInfo.NumSGPR - 1) / 8;
506 // Set the value to initialize FP_ROUND and FP_DENORM parts of the mode
507 // register.
508 ProgInfo.FloatMode = getFPMode(MF);
509
Tom Stellard45bb48e2015-06-13 03:28:10 +0000510 ProgInfo.IEEEMode = 0;
511
Matt Arsenault7293f982016-01-28 20:53:35 +0000512 // Make clamp modifier on NaN input returns 0.
513 ProgInfo.DX10Clamp = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000514
515 const MachineFrameInfo *FrameInfo = MF.getFrameInfo();
Matt Arsenaultf4dfc1a2016-03-01 04:58:20 +0000516 ProgInfo.ScratchSize = FrameInfo->getStackSize();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000517
518 ProgInfo.FlatUsed = FlatUsed;
519 ProgInfo.VCCUsed = VCCUsed;
520 ProgInfo.CodeLen = CodeSize;
521
522 unsigned LDSAlignShift;
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000523 if (STM.getGeneration() < SISubtarget::SEA_ISLANDS) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000524 // LDS is allocated in 64 dword blocks.
525 LDSAlignShift = 8;
526 } else {
527 // LDS is allocated in 128 dword blocks.
528 LDSAlignShift = 9;
529 }
530
531 unsigned LDSSpillSize = MFI->LDSWaveSpillSize *
532 MFI->getMaximumWorkGroupSize(MF);
533
Matt Arsenault52ef4012016-07-26 16:45:58 +0000534 ProgInfo.LDSSize = MFI->getLDSSize() + LDSSpillSize;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000535 ProgInfo.LDSBlocks =
Aaron Ballmanef0fe1e2016-03-30 21:30:00 +0000536 alignTo(ProgInfo.LDSSize, 1ULL << LDSAlignShift) >> LDSAlignShift;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000537
538 // Scratch is allocated in 256 dword blocks.
539 unsigned ScratchAlignShift = 10;
540 // We need to program the hardware with the amount of scratch memory that
541 // is used by the entire wave. ProgInfo.ScratchSize is the amount of
542 // scratch memory used per thread.
543 ProgInfo.ScratchBlocks =
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000544 alignTo(ProgInfo.ScratchSize * STM.getWavefrontSize(),
Aaron Ballmanef0fe1e2016-03-30 21:30:00 +0000545 1ULL << ScratchAlignShift) >>
Rui Ueyamada00f2f2016-01-14 21:06:47 +0000546 ScratchAlignShift;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000547
548 ProgInfo.ComputePGMRSrc1 =
549 S_00B848_VGPRS(ProgInfo.VGPRBlocks) |
550 S_00B848_SGPRS(ProgInfo.SGPRBlocks) |
551 S_00B848_PRIORITY(ProgInfo.Priority) |
552 S_00B848_FLOAT_MODE(ProgInfo.FloatMode) |
553 S_00B848_PRIV(ProgInfo.Priv) |
554 S_00B848_DX10_CLAMP(ProgInfo.DX10Clamp) |
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000555 S_00B848_DEBUG_MODE(ProgInfo.DebugMode) |
Tom Stellard45bb48e2015-06-13 03:28:10 +0000556 S_00B848_IEEE_MODE(ProgInfo.IEEEMode);
557
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000558 // 0 = X, 1 = XY, 2 = XYZ
559 unsigned TIDIGCompCnt = 0;
560 if (MFI->hasWorkItemIDZ())
561 TIDIGCompCnt = 2;
562 else if (MFI->hasWorkItemIDY())
563 TIDIGCompCnt = 1;
564
Tom Stellard45bb48e2015-06-13 03:28:10 +0000565 ProgInfo.ComputePGMRSrc2 =
566 S_00B84C_SCRATCH_EN(ProgInfo.ScratchBlocks > 0) |
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000567 S_00B84C_USER_SGPR(MFI->getNumUserSGPRs()) |
568 S_00B84C_TGID_X_EN(MFI->hasWorkGroupIDX()) |
569 S_00B84C_TGID_Y_EN(MFI->hasWorkGroupIDY()) |
570 S_00B84C_TGID_Z_EN(MFI->hasWorkGroupIDZ()) |
571 S_00B84C_TG_SIZE_EN(MFI->hasWorkGroupInfo()) |
572 S_00B84C_TIDIG_COMP_CNT(TIDIGCompCnt) |
573 S_00B84C_EXCP_EN_MSB(0) |
574 S_00B84C_LDS_SIZE(ProgInfo.LDSBlocks) |
575 S_00B84C_EXCP_EN(0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576}
577
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000578static unsigned getRsrcReg(CallingConv::ID CallConv) {
579 switch (CallConv) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000580 default: // Fall through
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000581 case CallingConv::AMDGPU_CS: return R_00B848_COMPUTE_PGM_RSRC1;
582 case CallingConv::AMDGPU_GS: return R_00B228_SPI_SHADER_PGM_RSRC1_GS;
583 case CallingConv::AMDGPU_PS: return R_00B028_SPI_SHADER_PGM_RSRC1_PS;
584 case CallingConv::AMDGPU_VS: return R_00B128_SPI_SHADER_PGM_RSRC1_VS;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000585 }
586}
587
588void AMDGPUAsmPrinter::EmitProgramInfoSI(const MachineFunction &MF,
589 const SIProgramInfo &KernelInfo) {
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000590 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000591 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000592 unsigned RsrcReg = getRsrcReg(MF.getFunction()->getCallingConv());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000593
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000594 if (AMDGPU::isCompute(MF.getFunction()->getCallingConv())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000595 OutStreamer->EmitIntValue(R_00B848_COMPUTE_PGM_RSRC1, 4);
596
597 OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc1, 4);
598
599 OutStreamer->EmitIntValue(R_00B84C_COMPUTE_PGM_RSRC2, 4);
600 OutStreamer->EmitIntValue(KernelInfo.ComputePGMRSrc2, 4);
601
602 OutStreamer->EmitIntValue(R_00B860_COMPUTE_TMPRING_SIZE, 4);
603 OutStreamer->EmitIntValue(S_00B860_WAVESIZE(KernelInfo.ScratchBlocks), 4);
604
605 // TODO: Should probably note flat usage somewhere. SC emits a "FlatPtr32 =
606 // 0" comment but I don't see a corresponding field in the register spec.
607 } else {
608 OutStreamer->EmitIntValue(RsrcReg, 4);
609 OutStreamer->EmitIntValue(S_00B028_VGPRS(KernelInfo.VGPRBlocks) |
610 S_00B028_SGPRS(KernelInfo.SGPRBlocks), 4);
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000611 if (STM.isVGPRSpillingEnabled(*MF.getFunction())) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000612 OutStreamer->EmitIntValue(R_0286E8_SPI_TMPRING_SIZE, 4);
613 OutStreamer->EmitIntValue(S_0286E8_WAVESIZE(KernelInfo.ScratchBlocks), 4);
614 }
615 }
616
Nicolai Haehnledf3a20c2016-04-06 19:40:20 +0000617 if (MF.getFunction()->getCallingConv() == CallingConv::AMDGPU_PS) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000618 OutStreamer->EmitIntValue(R_00B02C_SPI_SHADER_PGM_RSRC2_PS, 4);
619 OutStreamer->EmitIntValue(S_00B02C_EXTRA_LDS_SIZE(KernelInfo.LDSBlocks), 4);
620 OutStreamer->EmitIntValue(R_0286CC_SPI_PS_INPUT_ENA, 4);
Marek Olsakfccabaf2016-01-13 11:45:36 +0000621 OutStreamer->EmitIntValue(MFI->PSInputEna, 4);
622 OutStreamer->EmitIntValue(R_0286D0_SPI_PS_INPUT_ADDR, 4);
623 OutStreamer->EmitIntValue(MFI->getPSInputAddr(), 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000624 }
Marek Olsak0532c192016-07-13 17:35:15 +0000625
626 OutStreamer->EmitIntValue(R_SPILLED_SGPRS, 4);
627 OutStreamer->EmitIntValue(MFI->getNumSpilledSGPRs(), 4);
628 OutStreamer->EmitIntValue(R_SPILLED_VGPRS, 4);
629 OutStreamer->EmitIntValue(MFI->getNumSpilledVGPRs(), 4);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000630}
631
Matt Arsenault24ee0782016-02-12 02:40:47 +0000632// This is supposed to be log2(Size)
633static amd_element_byte_size_t getElementByteSizeValue(unsigned Size) {
634 switch (Size) {
635 case 4:
636 return AMD_ELEMENT_4_BYTES;
637 case 8:
638 return AMD_ELEMENT_8_BYTES;
639 case 16:
640 return AMD_ELEMENT_16_BYTES;
641 default:
642 llvm_unreachable("invalid private_element_size");
643 }
644}
645
Tom Stellard45bb48e2015-06-13 03:28:10 +0000646void AMDGPUAsmPrinter::EmitAmdKernelCodeT(const MachineFunction &MF,
Tom Stellardff7416b2015-06-26 21:58:31 +0000647 const SIProgramInfo &KernelInfo) const {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000648 const SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
Matt Arsenault43e92fe2016-06-24 06:30:11 +0000649 const SISubtarget &STM = MF.getSubtarget<SISubtarget>();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000650 amd_kernel_code_t header;
651
Tom Stellardff7416b2015-06-26 21:58:31 +0000652 AMDGPU::initDefaultAMDKernelCodeT(header, STM.getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000653
654 header.compute_pgm_resource_registers =
655 KernelInfo.ComputePGMRSrc1 |
656 (KernelInfo.ComputePGMRSrc2 << 32);
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000657 header.code_properties = AMD_CODE_PROPERTY_IS_PTR64;
658
Matt Arsenault24ee0782016-02-12 02:40:47 +0000659
660 AMD_HSA_BITS_SET(header.code_properties,
661 AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE,
662 getElementByteSizeValue(STM.getMaxPrivateElementSize()));
663
Matt Arsenault26f8f3d2015-11-30 21:16:03 +0000664 if (MFI->hasPrivateSegmentBuffer()) {
665 header.code_properties |=
666 AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER;
667 }
668
669 if (MFI->hasDispatchPtr())
670 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
671
672 if (MFI->hasQueuePtr())
673 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR;
674
675 if (MFI->hasKernargSegmentPtr())
676 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR;
677
678 if (MFI->hasDispatchID())
679 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID;
680
681 if (MFI->hasFlatScratchInit())
682 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT;
683
684 // TODO: Private segment size
685
686 if (MFI->hasGridWorkgroupCountX()) {
687 header.code_properties |=
688 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X;
689 }
690
691 if (MFI->hasGridWorkgroupCountY()) {
692 header.code_properties |=
693 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y;
694 }
695
696 if (MFI->hasGridWorkgroupCountZ()) {
697 header.code_properties |=
698 AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z;
699 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000700
Tom Stellard48f29f22015-11-26 00:43:29 +0000701 if (MFI->hasDispatchPtr())
702 header.code_properties |= AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR;
703
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000704 if (STM.debuggerSupported())
705 header.code_properties |= AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED;
706
Nicolai Haehnle5b504972016-01-04 23:35:53 +0000707 if (STM.isXNACKEnabled())
708 header.code_properties |= AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED;
709
Matt Arsenault52ef4012016-07-26 16:45:58 +0000710 // FIXME: Should use getKernArgSize
711 header.kernarg_segment_byte_size = MFI->getABIArgOffset();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000712 header.wavefront_sgpr_count = KernelInfo.NumSGPR;
713 header.workitem_vgpr_count = KernelInfo.NumVGPR;
Tom Stellarda4953072015-12-15 22:55:30 +0000714 header.workitem_private_segment_byte_size = KernelInfo.ScratchSize;
Tom Stellard7750f4e2015-12-15 23:15:25 +0000715 header.workgroup_group_segment_byte_size = KernelInfo.LDSSize;
Konstantin Zhuravlyov1d99c4d2016-04-26 15:43:14 +0000716 header.reserved_vgpr_first = KernelInfo.ReservedVGPRFirst;
717 header.reserved_vgpr_count = KernelInfo.ReservedVGPRCount;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000718
Konstantin Zhuravlyovf2f3d142016-06-25 03:11:28 +0000719 if (STM.debuggerEmitPrologue()) {
720 header.debug_wavefront_private_segment_offset_sgpr =
721 KernelInfo.DebuggerWavefrontPrivateSegmentOffsetSGPR;
722 header.debug_private_segment_buffer_sgpr =
723 KernelInfo.DebuggerPrivateSegmentBufferSGPR;
724 }
725
Tom Stellardff7416b2015-06-26 21:58:31 +0000726 AMDGPUTargetStreamer *TS =
727 static_cast<AMDGPUTargetStreamer *>(OutStreamer->getTargetStreamer());
Tom Stellardfcfaea42016-05-05 17:03:33 +0000728
729 OutStreamer->SwitchSection(getObjFileLowering().getTextSection());
Tom Stellardff7416b2015-06-26 21:58:31 +0000730 TS->EmitAMDKernelCodeT(header);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000731}
732
733bool AMDGPUAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
734 unsigned AsmVariant,
735 const char *ExtraCode, raw_ostream &O) {
736 if (ExtraCode && ExtraCode[0]) {
737 if (ExtraCode[1] != 0)
738 return true; // Unknown modifier.
739
740 switch (ExtraCode[0]) {
741 default:
742 // See if this is a generic print operand
743 return AsmPrinter::PrintAsmOperand(MI, OpNo, AsmVariant, ExtraCode, O);
744 case 'r':
745 break;
746 }
747 }
748
749 AMDGPUInstPrinter::printRegOperand(MI->getOperand(OpNo).getReg(), O,
750 *TM.getSubtargetImpl(*MF->getFunction())->getRegisterInfo());
751 return false;
752}
Yaxun Liua711cc72016-07-16 05:09:21 +0000753
754// Emit a key and an integer value for runtime metadata.
755static void emitRuntimeMDIntValue(std::unique_ptr<MCStreamer> &Streamer,
756 RuntimeMD::Key K, uint64_t V,
757 unsigned Size) {
758 Streamer->EmitIntValue(K, 1);
759 Streamer->EmitIntValue(V, Size);
760}
761
762// Emit a key and a string value for runtime metadata.
763static void emitRuntimeMDStringValue(std::unique_ptr<MCStreamer> &Streamer,
764 RuntimeMD::Key K, StringRef S) {
765 Streamer->EmitIntValue(K, 1);
766 Streamer->EmitIntValue(S.size(), 4);
767 Streamer->EmitBytes(S);
768}
769
770// Emit a key and three integer values for runtime metadata.
771// The three integer values are obtained from MDNode \p Node;
772static void emitRuntimeMDThreeIntValues(std::unique_ptr<MCStreamer> &Streamer,
773 RuntimeMD::Key K, MDNode *Node,
774 unsigned Size) {
775 Streamer->EmitIntValue(K, 1);
776 Streamer->EmitIntValue(mdconst::extract<ConstantInt>(
777 Node->getOperand(0))->getZExtValue(), Size);
778 Streamer->EmitIntValue(mdconst::extract<ConstantInt>(
779 Node->getOperand(1))->getZExtValue(), Size);
780 Streamer->EmitIntValue(mdconst::extract<ConstantInt>(
781 Node->getOperand(2))->getZExtValue(), Size);
782}
783
784void AMDGPUAsmPrinter::emitStartOfRuntimeMetadata(const Module &M) {
785 OutStreamer->SwitchSection(getObjFileLowering().getContext()
786 .getELFSection(RuntimeMD::SectionName, ELF::SHT_PROGBITS, 0));
787
788 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyMDVersion,
789 RuntimeMD::MDVersion << 8 | RuntimeMD::MDRevision, 2);
790 if (auto MD = M.getNamedMetadata("opencl.ocl.version")) {
Yaxun Liu4b1d9f72016-07-20 14:38:06 +0000791 if (MD->getNumOperands()) {
792 auto Node = MD->getOperand(0);
793 if (Node->getNumOperands() > 1) {
794 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyLanguage,
795 RuntimeMD::OpenCL_C, 1);
796 uint16_t Major = mdconst::extract<ConstantInt>(Node->getOperand(0))
797 ->getZExtValue();
798 uint16_t Minor = mdconst::extract<ConstantInt>(Node->getOperand(1))
799 ->getZExtValue();
800 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyLanguageVersion,
801 Major * 100 + Minor * 10, 2);
802 }
803 }
Yaxun Liua711cc72016-07-16 05:09:21 +0000804 }
805}
806
807static std::string getOCLTypeName(Type *Ty, bool isSigned) {
808 if (VectorType* VecTy = dyn_cast<VectorType>(Ty)) {
809 Type* EleTy = VecTy->getElementType();
810 unsigned Size = VecTy->getVectorNumElements();
811 return (Twine(getOCLTypeName(EleTy, isSigned)) + Twine(Size)).str();
812 }
813 switch (Ty->getTypeID()) {
814 case Type::HalfTyID: return "half";
815 case Type::FloatTyID: return "float";
816 case Type::DoubleTyID: return "double";
817 case Type::IntegerTyID: {
818 if (!isSigned)
819 return (Twine('u') + Twine(getOCLTypeName(Ty, true))).str();
820 auto IntTy = cast<IntegerType>(Ty);
821 auto BW = IntTy->getIntegerBitWidth();
822 switch (BW) {
823 case 8:
824 return "char";
825 case 16:
826 return "short";
827 case 32:
828 return "int";
829 case 64:
830 return "long";
831 default:
832 return (Twine('i') + Twine(BW)).str();
833 }
834 }
835 default:
836 llvm_unreachable("invalid type");
837 }
838}
839
840static RuntimeMD::KernelArg::ValueType getRuntimeMDValueType(
841 Type *Ty, StringRef TypeName) {
842 if (auto VT = dyn_cast<VectorType>(Ty))
843 return getRuntimeMDValueType(VT->getElementType(), TypeName);
844 else if (auto PT = dyn_cast<PointerType>(Ty))
845 return getRuntimeMDValueType(PT->getElementType(), TypeName);
846 else if (Ty->isHalfTy())
847 return RuntimeMD::KernelArg::F16;
848 else if (Ty->isFloatTy())
849 return RuntimeMD::KernelArg::F32;
850 else if (Ty->isDoubleTy())
851 return RuntimeMD::KernelArg::F64;
852 else if (IntegerType* intTy = dyn_cast<IntegerType>(Ty)) {
853 bool Signed = !TypeName.startswith("u");
854 switch (intTy->getIntegerBitWidth()) {
855 case 8:
856 return Signed ? RuntimeMD::KernelArg::I8 : RuntimeMD::KernelArg::U8;
857 case 16:
858 return Signed ? RuntimeMD::KernelArg::I16 : RuntimeMD::KernelArg::U16;
859 case 32:
860 return Signed ? RuntimeMD::KernelArg::I32 : RuntimeMD::KernelArg::U32;
861 case 64:
862 return Signed ? RuntimeMD::KernelArg::I64 : RuntimeMD::KernelArg::U64;
863 default:
864 // Runtime does not recognize other integer types. Report as
865 // struct type.
866 return RuntimeMD::KernelArg::Struct;
867 }
868 } else
869 return RuntimeMD::KernelArg::Struct;
870}
871
872void AMDGPUAsmPrinter::emitRuntimeMetadata(const Function &F) {
873 if (!F.getMetadata("kernel_arg_type"))
874 return;
875
876 MCContext &Context = getObjFileLowering().getContext();
877 OutStreamer->SwitchSection(
878 Context.getELFSection(RuntimeMD::SectionName, ELF::SHT_PROGBITS, 0));
879 OutStreamer->EmitIntValue(RuntimeMD::KeyKernelBegin, 1);
880 emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyKernelName, F.getName());
881
882 for (auto &Arg:F.args()) {
883 // Emit KeyArgBegin.
884 unsigned I = Arg.getArgNo();
885 OutStreamer->EmitIntValue(RuntimeMD::KeyArgBegin, 1);
886
887 // Emit KeyArgSize and KeyArgAlign.
888 auto T = Arg.getType();
889 auto DL = F.getParent()->getDataLayout();
890 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgSize,
891 DL.getTypeAllocSize(T), 4);
892 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgAlign,
893 DL.getABITypeAlignment(T), 4);
894
895 // Emit KeyArgTypeName.
896 auto TypeName = dyn_cast<MDString>(F.getMetadata(
897 "kernel_arg_type")->getOperand(I))->getString();
898 emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyArgTypeName, TypeName);
899
900 // Emit KeyArgName.
901 if (auto ArgNameMD = F.getMetadata("kernel_arg_name")) {
902 auto ArgName = cast<MDString>(ArgNameMD->getOperand(
903 I))->getString();
904 emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyArgName, ArgName);
905 }
906
907 // Emit KeyArgIsVolatile, KeyArgIsRestrict, KeyArgIsConst and KeyArgIsPipe.
908 auto TypeQual = cast<MDString>(F.getMetadata(
909 "kernel_arg_type_qual")->getOperand(I))->getString();
910 SmallVector<StringRef, 1> SplitQ;
911 TypeQual.split(SplitQ, " ", -1, false/* drop empty entry*/);
912 for (auto &I:SplitQ) {
913 auto Key = StringSwitch<RuntimeMD::Key>(I)
914 .Case("volatile", RuntimeMD::KeyArgIsVolatile)
915 .Case("restrict", RuntimeMD::KeyArgIsRestrict)
916 .Case("const", RuntimeMD::KeyArgIsConst)
917 .Case("pipe", RuntimeMD::KeyArgIsPipe)
918 .Default(RuntimeMD::KeyNull);
919 OutStreamer->EmitIntValue(Key, 1);
920 }
921
922 // Emit KeyArgTypeKind.
923 auto BaseTypeName = cast<MDString>(
924 F.getMetadata("kernel_arg_base_type")->getOperand(I))->getString();
925 auto TypeKind = StringSwitch<RuntimeMD::KernelArg::TypeKind>(BaseTypeName)
926 .Case("sampler_t", RuntimeMD::KernelArg::Sampler)
927 .Case("queue_t", RuntimeMD::KernelArg::Queue)
928 .Cases("image1d_t", "image1d_array_t", "image1d_buffer_t",
929 "image2d_t" , "image2d_array_t", RuntimeMD::KernelArg::Image)
930 .Cases("image2d_depth_t", "image2d_array_depth_t",
931 "image2d_msaa_t", "image2d_array_msaa_t",
932 "image2d_msaa_depth_t", RuntimeMD::KernelArg::Image)
933 .Cases("image2d_array_msaa_depth_t", "image3d_t",
934 RuntimeMD::KernelArg::Image)
935 .Default(isa<PointerType>(T) ? RuntimeMD::KernelArg::Pointer :
936 RuntimeMD::KernelArg::Value);
937 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgTypeKind, TypeKind, 1);
938
939 // Emit KeyArgValueType.
940 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgValueType,
941 getRuntimeMDValueType(T, BaseTypeName), 2);
942
943 // Emit KeyArgAccQual.
944 auto AccQual = cast<MDString>(F.getMetadata(
945 "kernel_arg_access_qual")->getOperand(I))->getString();
946 auto AQ = StringSwitch<RuntimeMD::KernelArg::AccessQualifer>(AccQual)
947 .Case("read_only", RuntimeMD::KernelArg::ReadOnly)
948 .Case("write_only", RuntimeMD::KernelArg::WriteOnly)
949 .Case("read_write", RuntimeMD::KernelArg::ReadWrite)
950 .Default(RuntimeMD::KernelArg::None);
951 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgAccQual,
952 AQ, 1);
953
954 // Emit KeyArgAddrQual.
955 if (isa<PointerType>(T))
956 emitRuntimeMDIntValue(OutStreamer, RuntimeMD::KeyArgAddrQual,
957 T->getPointerAddressSpace(), 1);
958
959 // Emit KeyArgEnd
960 OutStreamer->EmitIntValue(RuntimeMD::KeyArgEnd, 1);
961 }
962
963 // Emit KeyReqdWorkGroupSize, KeyWorkGroupSizeHint, and KeyVecTypeHint.
964 if (auto RWGS = F.getMetadata("reqd_work_group_size"))
965 emitRuntimeMDThreeIntValues(OutStreamer, RuntimeMD::KeyReqdWorkGroupSize,
966 RWGS, 4);
967 if (auto WGSH = F.getMetadata("work_group_size_hint"))
968 emitRuntimeMDThreeIntValues(OutStreamer, RuntimeMD::KeyWorkGroupSizeHint,
969 WGSH, 4);
970 if (auto VTH = F.getMetadata("vec_type_hint")) {
971 auto TypeName = getOCLTypeName(cast<ValueAsMetadata>(
972 VTH->getOperand(0))->getType(), mdconst::extract<ConstantInt>(
973 VTH->getOperand(1))->getZExtValue());
974 emitRuntimeMDStringValue(OutStreamer, RuntimeMD::KeyVecTypeHint,
975 TypeName);
976 }
977
978 // Emit KeyKernelEnd
979 OutStreamer->EmitIntValue(RuntimeMD::KeyKernelEnd, 1);
980}