blob: 88c92b9582fd01463a669cea8f299a974b230920 [file] [log] [blame]
Tom Stellarde1818af2016-02-18 03:42:32 +00001//===-- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA --------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//===----------------------------------------------------------------------===//
11//
12/// \file
13///
14/// This file contains definition for AMDGPU ISA disassembler
15//
16//===----------------------------------------------------------------------===//
17
18// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19
20#include "AMDGPUDisassembler.h"
21#include "AMDGPU.h"
22#include "AMDGPURegisterInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000023#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Artem Tamazov212a2512016-05-24 12:05:16 +000024#include "SIDefines.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000025#include "Utils/AMDGPUBaseInfo.h"
26
Zachary Turner264b5d92017-06-07 03:48:56 +000027#include "llvm/BinaryFormat/ELF.h"
Nikolay Haustovac106ad2016-03-01 13:57:29 +000028#include "llvm/MC/MCContext.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000029#include "llvm/MC/MCFixedLenDisassembler.h"
30#include "llvm/MC/MCInst.h"
31#include "llvm/MC/MCInstrDesc.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000033#include "llvm/Support/Debug.h"
Nikolay Haustovac106ad2016-03-01 13:57:29 +000034#include "llvm/Support/Endian.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000035#include "llvm/Support/TargetRegistry.h"
36
Tom Stellarde1818af2016-02-18 03:42:32 +000037using namespace llvm;
38
39#define DEBUG_TYPE "amdgpu-disassembler"
40
41typedef llvm::MCDisassembler::DecodeStatus DecodeStatus;
42
43
Nikolay Haustovac106ad2016-03-01 13:57:29 +000044inline static MCDisassembler::DecodeStatus
45addOperand(MCInst &Inst, const MCOperand& Opnd) {
46 Inst.addOperand(Opnd);
47 return Opnd.isValid() ?
48 MCDisassembler::Success :
49 MCDisassembler::SoftFail;
Tom Stellarde1818af2016-02-18 03:42:32 +000050}
51
Sam Kolton3381d7a2016-10-06 13:46:08 +000052static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
53 uint64_t Addr, const void *Decoder) {
54 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
55
56 APInt SignedOffset(18, Imm * 4, true);
57 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
58
59 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
60 return MCDisassembler::Success;
Matt Arsenaultf3dd8632016-11-01 00:55:14 +000061 return addOperand(Inst, MCOperand::createImm(Imm));
Sam Kolton3381d7a2016-10-06 13:46:08 +000062}
63
Sam Kolton363f47a2017-05-26 15:52:00 +000064#define DECODE_OPERAND(StaticDecoderName, DecoderName) \
65static DecodeStatus StaticDecoderName(MCInst &Inst, \
66 unsigned Imm, \
67 uint64_t /*Addr*/, \
68 const void *Decoder) { \
Nikolay Haustovac106ad2016-03-01 13:57:29 +000069 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
Sam Kolton363f47a2017-05-26 15:52:00 +000070 return addOperand(Inst, DAsm->DecoderName(Imm)); \
Tom Stellarde1818af2016-02-18 03:42:32 +000071}
72
Sam Kolton363f47a2017-05-26 15:52:00 +000073#define DECODE_OPERAND_REG(RegClass) \
74DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
Tom Stellarde1818af2016-02-18 03:42:32 +000075
Sam Kolton363f47a2017-05-26 15:52:00 +000076DECODE_OPERAND_REG(VGPR_32)
77DECODE_OPERAND_REG(VS_32)
78DECODE_OPERAND_REG(VS_64)
Nikolay Haustov161a1582016-02-25 16:09:14 +000079
Sam Kolton363f47a2017-05-26 15:52:00 +000080DECODE_OPERAND_REG(VReg_64)
81DECODE_OPERAND_REG(VReg_96)
82DECODE_OPERAND_REG(VReg_128)
Tom Stellarde1818af2016-02-18 03:42:32 +000083
Sam Kolton363f47a2017-05-26 15:52:00 +000084DECODE_OPERAND_REG(SReg_32)
85DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
86DECODE_OPERAND_REG(SReg_64)
87DECODE_OPERAND_REG(SReg_64_XEXEC)
88DECODE_OPERAND_REG(SReg_128)
89DECODE_OPERAND_REG(SReg_256)
90DECODE_OPERAND_REG(SReg_512)
Tom Stellarde1818af2016-02-18 03:42:32 +000091
Matt Arsenault4bd72362016-12-10 00:39:12 +000092
93static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
94 unsigned Imm,
95 uint64_t Addr,
96 const void *Decoder) {
97 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
98 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
99}
100
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000101static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
102 unsigned Imm,
103 uint64_t Addr,
104 const void *Decoder) {
105 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
106 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
107}
108
Sam Kolton363f47a2017-05-26 15:52:00 +0000109#define DECODE_SDWA9(DecName) \
110DECODE_OPERAND(decodeSDWA9##DecName, decodeSDWA9##DecName)
111
112DECODE_SDWA9(Src32)
113DECODE_SDWA9(Src16)
114DECODE_SDWA9(VopcDst)
115
Tom Stellarde1818af2016-02-18 03:42:32 +0000116#include "AMDGPUGenDisassemblerTables.inc"
117
118//===----------------------------------------------------------------------===//
119//
120//===----------------------------------------------------------------------===//
121
Sam Kolton1048fb12016-03-31 14:15:04 +0000122template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
123 assert(Bytes.size() >= sizeof(T));
124 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
125 Bytes = Bytes.slice(sizeof(T));
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000126 return Res;
127}
128
129DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
130 MCInst &MI,
131 uint64_t Inst,
132 uint64_t Address) const {
133 assert(MI.getOpcode() == 0);
134 assert(MI.getNumOperands() == 0);
135 MCInst TmpInst;
Dmitry Preobrazhenskyce941c92017-05-19 14:27:52 +0000136 HasLiteral = false;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000137 const auto SavedBytes = Bytes;
138 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
139 MI = TmpInst;
140 return MCDisassembler::Success;
141 }
142 Bytes = SavedBytes;
143 return MCDisassembler::Fail;
144}
145
Tom Stellarde1818af2016-02-18 03:42:32 +0000146DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000147 ArrayRef<uint8_t> Bytes_,
Nikolay Haustov161a1582016-02-25 16:09:14 +0000148 uint64_t Address,
Tom Stellarde1818af2016-02-18 03:42:32 +0000149 raw_ostream &WS,
150 raw_ostream &CS) const {
151 CommentStream = &CS;
152
153 // ToDo: AMDGPUDisassembler supports only VI ISA.
Matt Arsenaultd122abe2017-02-15 21:50:34 +0000154 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
155 report_fatal_error("Disassembly not yet supported for subtarget");
Tom Stellarde1818af2016-02-18 03:42:32 +0000156
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000157 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
158 Bytes = Bytes_.slice(0, MaxInstBytesNum);
Nikolay Haustov161a1582016-02-25 16:09:14 +0000159
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000160 DecodeStatus Res = MCDisassembler::Fail;
161 do {
Valery Pykhtin824e8042016-03-04 10:59:50 +0000162 // ToDo: better to switch encoding length using some bit predicate
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000163 // but it is unknown yet, so try all we can
Matt Arsenault37fefd62016-06-10 02:18:02 +0000164
Sam Koltonc9bdcb72016-06-09 11:04:45 +0000165 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
166 // encodings
Sam Kolton1048fb12016-03-31 14:15:04 +0000167 if (Bytes.size() >= 8) {
168 const uint64_t QW = eatBytes<uint64_t>(Bytes);
169 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
170 if (Res) break;
Sam Koltonc9bdcb72016-06-09 11:04:45 +0000171
172 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
173 if (Res) break;
Sam Kolton363f47a2017-05-26 15:52:00 +0000174
175 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
176 if (Res) break;
Sam Kolton1048fb12016-03-31 14:15:04 +0000177 }
178
179 // Reinitialize Bytes as DPP64 could have eaten too much
180 Bytes = Bytes_.slice(0, MaxInstBytesNum);
181
182 // Try decode 32-bit instruction
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000183 if (Bytes.size() < 4) break;
Sam Kolton1048fb12016-03-31 14:15:04 +0000184 const uint32_t DW = eatBytes<uint32_t>(Bytes);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000185 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
186 if (Res) break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000187
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000188 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
189 if (Res) break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000190
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000191 if (Bytes.size() < 4) break;
Sam Kolton1048fb12016-03-31 14:15:04 +0000192 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000193 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
194 if (Res) break;
195
196 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
197 } while (false);
198
Matt Arsenault678e1112017-04-10 17:58:06 +0000199 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
200 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
201 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
202 // Insert dummy unused src2_modifiers.
203 int Src2ModIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
204 AMDGPU::OpName::src2_modifiers);
205 auto I = MI.begin();
206 std::advance(I, Src2ModIdx);
207 MI.insert(I, MCOperand::createImm(0));
208 }
209
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000210 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
211 return Res;
Tom Stellarde1818af2016-02-18 03:42:32 +0000212}
213
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000214const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
215 return getContext().getRegisterInfo()->
216 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
Tom Stellarde1818af2016-02-18 03:42:32 +0000217}
218
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000219inline
220MCOperand AMDGPUDisassembler::errOperand(unsigned V,
221 const Twine& ErrMsg) const {
222 *CommentStream << "Error: " + ErrMsg;
223
224 // ToDo: add support for error operands to MCInst.h
225 // return MCOperand::createError(V);
226 return MCOperand();
Nikolay Haustov161a1582016-02-25 16:09:14 +0000227}
228
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000229inline
230MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
231 return MCOperand::createReg(RegId);
Tom Stellarde1818af2016-02-18 03:42:32 +0000232}
233
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000234inline
235MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
236 unsigned Val) const {
237 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
238 if (Val >= RegCl.getNumRegs())
239 return errOperand(Val, Twine(getRegClassName(RegClassID)) +
240 ": unknown register " + Twine(Val));
241 return createRegOperand(RegCl.getRegister(Val));
Tom Stellarde1818af2016-02-18 03:42:32 +0000242}
243
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000244inline
245MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
246 unsigned Val) const {
Tom Stellarde1818af2016-02-18 03:42:32 +0000247 // ToDo: SI/CI have 104 SGPRs, VI - 102
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000248 // Valery: here we accepting as much as we can, let assembler sort it out
249 int shift = 0;
250 switch (SRegClassID) {
251 case AMDGPU::SGPR_32RegClassID:
Artem Tamazov212a2512016-05-24 12:05:16 +0000252 case AMDGPU::TTMP_32RegClassID:
253 break;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000254 case AMDGPU::SGPR_64RegClassID:
Artem Tamazov212a2512016-05-24 12:05:16 +0000255 case AMDGPU::TTMP_64RegClassID:
256 shift = 1;
257 break;
258 case AMDGPU::SGPR_128RegClassID:
259 case AMDGPU::TTMP_128RegClassID:
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000260 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
261 // this bundle?
262 case AMDGPU::SReg_256RegClassID:
263 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
264 // this bundle?
Artem Tamazov212a2512016-05-24 12:05:16 +0000265 case AMDGPU::SReg_512RegClassID:
266 shift = 2;
267 break;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000268 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
269 // this bundle?
Artem Tamazov212a2512016-05-24 12:05:16 +0000270 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +0000271 llvm_unreachable("unhandled register class");
Tom Stellarde1818af2016-02-18 03:42:32 +0000272 }
Matt Arsenault92b355b2016-11-15 19:34:37 +0000273
274 if (Val % (1 << shift)) {
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000275 *CommentStream << "Warning: " << getRegClassName(SRegClassID)
276 << ": scalar reg isn't aligned " << Val;
Matt Arsenault92b355b2016-11-15 19:34:37 +0000277 }
278
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000279 return createRegOperand(SRegClassID, Val >> shift);
Tom Stellarde1818af2016-02-18 03:42:32 +0000280}
281
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000282MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000283 return decodeSrcOp(OPW32, Val);
Tom Stellarde1818af2016-02-18 03:42:32 +0000284}
285
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000286MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000287 return decodeSrcOp(OPW64, Val);
Nikolay Haustov161a1582016-02-25 16:09:14 +0000288}
289
Matt Arsenault4bd72362016-12-10 00:39:12 +0000290MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
291 return decodeSrcOp(OPW16, Val);
292}
293
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000294MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
295 return decodeSrcOp(OPWV216, Val);
296}
297
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000298MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000299 // Some instructions have operand restrictions beyond what the encoding
300 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
301 // high bit.
302 Val &= 255;
303
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000304 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
305}
306
307MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
308 return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
309}
310
311MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
312 return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
313}
314
315MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
316 return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
317}
318
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000319MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
320 // table-gen generated disassembler doesn't care about operand types
321 // leaving only registry class so SSrc_32 operand turns into SReg_32
322 // and therefore we accept immediates and literals here as well
Artem Tamazov212a2512016-05-24 12:05:16 +0000323 return decodeSrcOp(OPW32, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000324}
325
Matt Arsenault640c44b2016-11-29 19:39:53 +0000326MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
327 unsigned Val) const {
328 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
Artem Tamazov38e496b2016-04-29 17:04:50 +0000329 return decodeOperand_SReg_32(Val);
330}
331
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000332MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
Matt Arsenault640c44b2016-11-29 19:39:53 +0000333 return decodeSrcOp(OPW64, Val);
334}
335
336MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000337 return decodeSrcOp(OPW64, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000338}
339
340MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000341 return decodeSrcOp(OPW128, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000342}
343
344MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
345 return createSRegOperand(AMDGPU::SReg_256RegClassID, Val);
346}
347
348MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
349 return createSRegOperand(AMDGPU::SReg_512RegClassID, Val);
350}
351
352
353MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
Nikolay Haustov161a1582016-02-25 16:09:14 +0000354 // For now all literal constants are supposed to be unsigned integer
355 // ToDo: deal with signed/unsigned 64-bit integer constants
356 // ToDo: deal with float/double constants
Dmitry Preobrazhenskyce941c92017-05-19 14:27:52 +0000357 if (!HasLiteral) {
358 if (Bytes.size() < 4) {
359 return errOperand(0, "cannot read literal, inst bytes left " +
360 Twine(Bytes.size()));
361 }
362 HasLiteral = true;
363 Literal = eatBytes<uint32_t>(Bytes);
364 }
365 return MCOperand::createImm(Literal);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000366}
367
368MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
Artem Tamazov212a2512016-05-24 12:05:16 +0000369 using namespace AMDGPU::EncValues;
370 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
371 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
372 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
373 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
374 // Cast prevents negative overflow.
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000375}
376
Matt Arsenault4bd72362016-12-10 00:39:12 +0000377static int64_t getInlineImmVal32(unsigned Imm) {
378 switch (Imm) {
379 case 240:
380 return FloatToBits(0.5f);
381 case 241:
382 return FloatToBits(-0.5f);
383 case 242:
384 return FloatToBits(1.0f);
385 case 243:
386 return FloatToBits(-1.0f);
387 case 244:
388 return FloatToBits(2.0f);
389 case 245:
390 return FloatToBits(-2.0f);
391 case 246:
392 return FloatToBits(4.0f);
393 case 247:
394 return FloatToBits(-4.0f);
395 case 248: // 1 / (2 * PI)
396 return 0x3e22f983;
397 default:
398 llvm_unreachable("invalid fp inline imm");
399 }
400}
401
402static int64_t getInlineImmVal64(unsigned Imm) {
403 switch (Imm) {
404 case 240:
405 return DoubleToBits(0.5);
406 case 241:
407 return DoubleToBits(-0.5);
408 case 242:
409 return DoubleToBits(1.0);
410 case 243:
411 return DoubleToBits(-1.0);
412 case 244:
413 return DoubleToBits(2.0);
414 case 245:
415 return DoubleToBits(-2.0);
416 case 246:
417 return DoubleToBits(4.0);
418 case 247:
419 return DoubleToBits(-4.0);
420 case 248: // 1 / (2 * PI)
421 return 0x3fc45f306dc9c882;
422 default:
423 llvm_unreachable("invalid fp inline imm");
424 }
425}
426
427static int64_t getInlineImmVal16(unsigned Imm) {
428 switch (Imm) {
429 case 240:
430 return 0x3800;
431 case 241:
432 return 0xB800;
433 case 242:
434 return 0x3C00;
435 case 243:
436 return 0xBC00;
437 case 244:
438 return 0x4000;
439 case 245:
440 return 0xC000;
441 case 246:
442 return 0x4400;
443 case 247:
444 return 0xC400;
445 case 248: // 1 / (2 * PI)
446 return 0x3118;
447 default:
448 llvm_unreachable("invalid fp inline imm");
449 }
450}
451
452MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
Artem Tamazov212a2512016-05-24 12:05:16 +0000453 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
454 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000455
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000456 // ToDo: case 248: 1/(2*PI) - is allowed only on VI
Matt Arsenault4bd72362016-12-10 00:39:12 +0000457 switch (Width) {
458 case OPW32:
459 return MCOperand::createImm(getInlineImmVal32(Imm));
460 case OPW64:
461 return MCOperand::createImm(getInlineImmVal64(Imm));
462 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000463 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000464 return MCOperand::createImm(getInlineImmVal16(Imm));
465 default:
466 llvm_unreachable("implement me");
Nikolay Haustov161a1582016-02-25 16:09:14 +0000467 }
Nikolay Haustov161a1582016-02-25 16:09:14 +0000468}
469
Artem Tamazov212a2512016-05-24 12:05:16 +0000470unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000471 using namespace AMDGPU;
Artem Tamazov212a2512016-05-24 12:05:16 +0000472 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
473 switch (Width) {
474 default: // fall
Matt Arsenault4bd72362016-12-10 00:39:12 +0000475 case OPW32:
476 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000477 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000478 return VGPR_32RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000479 case OPW64: return VReg_64RegClassID;
480 case OPW128: return VReg_128RegClassID;
481 }
482}
483
484unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
485 using namespace AMDGPU;
486 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
487 switch (Width) {
488 default: // fall
Matt Arsenault4bd72362016-12-10 00:39:12 +0000489 case OPW32:
490 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000491 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000492 return SGPR_32RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000493 case OPW64: return SGPR_64RegClassID;
494 case OPW128: return SGPR_128RegClassID;
495 }
496}
497
498unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
499 using namespace AMDGPU;
500 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
501 switch (Width) {
502 default: // fall
Matt Arsenault4bd72362016-12-10 00:39:12 +0000503 case OPW32:
504 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000505 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000506 return TTMP_32RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000507 case OPW64: return TTMP_64RegClassID;
508 case OPW128: return TTMP_128RegClassID;
509 }
510}
511
512MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
513 using namespace AMDGPU::EncValues;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000514 assert(Val < 512); // enum9
515
Artem Tamazov212a2512016-05-24 12:05:16 +0000516 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
517 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
518 }
Artem Tamazovb49c3362016-05-26 15:52:16 +0000519 if (Val <= SGPR_MAX) {
520 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
Artem Tamazov212a2512016-05-24 12:05:16 +0000521 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
522 }
523 if (TTMP_MIN <= Val && Val <= TTMP_MAX) {
524 return createSRegOperand(getTtmpClassId(Width), Val - TTMP_MIN);
525 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000526
Matt Arsenault4bd72362016-12-10 00:39:12 +0000527 assert(Width == OPW16 || Width == OPW32 || Width == OPW64);
Artem Tamazov212a2512016-05-24 12:05:16 +0000528
529 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000530 return decodeIntImmed(Val);
531
Artem Tamazov212a2512016-05-24 12:05:16 +0000532 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
Matt Arsenault4bd72362016-12-10 00:39:12 +0000533 return decodeFPImmed(Width, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000534
Artem Tamazov212a2512016-05-24 12:05:16 +0000535 if (Val == LITERAL_CONST)
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000536 return decodeLiteralConstant();
537
Matt Arsenault4bd72362016-12-10 00:39:12 +0000538 switch (Width) {
539 case OPW32:
540 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000541 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000542 return decodeSpecialReg32(Val);
543 case OPW64:
544 return decodeSpecialReg64(Val);
545 default:
546 llvm_unreachable("unexpected immediate type");
547 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000548}
549
550MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
551 using namespace AMDGPU;
552 switch (Val) {
553 case 102: return createRegOperand(getMCReg(FLAT_SCR_LO, STI));
554 case 103: return createRegOperand(getMCReg(FLAT_SCR_HI, STI));
555 // ToDo: no support for xnack_mask_lo/_hi register
556 case 104:
557 case 105: break;
558 case 106: return createRegOperand(VCC_LO);
559 case 107: return createRegOperand(VCC_HI);
Artem Tamazov212a2512016-05-24 12:05:16 +0000560 case 108: return createRegOperand(TBA_LO);
561 case 109: return createRegOperand(TBA_HI);
562 case 110: return createRegOperand(TMA_LO);
563 case 111: return createRegOperand(TMA_HI);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000564 case 124: return createRegOperand(M0);
565 case 126: return createRegOperand(EXEC_LO);
566 case 127: return createRegOperand(EXEC_HI);
Matt Arsenaulta3b3b482017-02-18 18:41:41 +0000567 case 235: return createRegOperand(SRC_SHARED_BASE);
568 case 236: return createRegOperand(SRC_SHARED_LIMIT);
569 case 237: return createRegOperand(SRC_PRIVATE_BASE);
570 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
571 // TODO: SRC_POPS_EXITING_WAVE_ID
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000572 // ToDo: no support for vccz register
573 case 251: break;
574 // ToDo: no support for execz register
575 case 252: break;
576 case 253: return createRegOperand(SCC);
577 default: break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000578 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000579 return errOperand(Val, "unknown operand encoding " + Twine(Val));
Tom Stellarde1818af2016-02-18 03:42:32 +0000580}
581
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000582MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
583 using namespace AMDGPU;
584 switch (Val) {
585 case 102: return createRegOperand(getMCReg(FLAT_SCR, STI));
586 case 106: return createRegOperand(VCC);
Artem Tamazov212a2512016-05-24 12:05:16 +0000587 case 108: return createRegOperand(TBA);
588 case 110: return createRegOperand(TMA);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000589 case 126: return createRegOperand(EXEC);
590 default: break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000591 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000592 return errOperand(Val, "unknown operand encoding " + Twine(Val));
Tom Stellarde1818af2016-02-18 03:42:32 +0000593}
594
Sam Kolton363f47a2017-05-26 15:52:00 +0000595MCOperand AMDGPUDisassembler::decodeSDWA9Src(const OpWidthTy Width,
596 unsigned Val) const {
597 using namespace AMDGPU::SDWA;
598
599 if (SDWA9EncValues::SRC_VGPR_MIN <= Val &&
600 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
601 return createRegOperand(getVgprClassId(Width),
602 Val - SDWA9EncValues::SRC_VGPR_MIN);
603 }
604 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
605 Val <= SDWA9EncValues::SRC_SGPR_MAX) {
606 return createSRegOperand(getSgprClassId(Width),
607 Val - SDWA9EncValues::SRC_SGPR_MIN);
608 }
609
610 return decodeSpecialReg32(Val - SDWA9EncValues::SRC_SGPR_MIN);
611}
612
613MCOperand AMDGPUDisassembler::decodeSDWA9Src16(unsigned Val) const {
614 return decodeSDWA9Src(OPW16, Val);
615}
616
617MCOperand AMDGPUDisassembler::decodeSDWA9Src32(unsigned Val) const {
618 return decodeSDWA9Src(OPW32, Val);
619}
620
621
622MCOperand AMDGPUDisassembler::decodeSDWA9VopcDst(unsigned Val) const {
623 using namespace AMDGPU::SDWA;
624
625 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
626 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
627 if (Val > AMDGPU::EncValues::SGPR_MAX) {
628 return decodeSpecialReg64(Val);
629 } else {
630 return createSRegOperand(getSgprClassId(OPW64), Val);
631 }
632 } else {
633 return createRegOperand(AMDGPU::VCC);
634 }
635}
636
Sam Kolton3381d7a2016-10-06 13:46:08 +0000637//===----------------------------------------------------------------------===//
638// AMDGPUSymbolizer
639//===----------------------------------------------------------------------===//
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000640
Sam Kolton3381d7a2016-10-06 13:46:08 +0000641// Try to find symbol name for specified label
642bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
643 raw_ostream &/*cStream*/, int64_t Value,
644 uint64_t /*Address*/, bool IsBranch,
645 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
646 typedef std::tuple<uint64_t, StringRef, uint8_t> SymbolInfoTy;
647 typedef std::vector<SymbolInfoTy> SectionSymbolsTy;
648
649 if (!IsBranch) {
650 return false;
651 }
652
653 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
654 auto Result = std::find_if(Symbols->begin(), Symbols->end(),
655 [Value](const SymbolInfoTy& Val) {
656 return std::get<0>(Val) == static_cast<uint64_t>(Value)
657 && std::get<2>(Val) == ELF::STT_NOTYPE;
658 });
659 if (Result != Symbols->end()) {
660 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
661 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
662 Inst.addOperand(MCOperand::createExpr(Add));
663 return true;
664 }
665 return false;
666}
667
Matt Arsenault92b355b2016-11-15 19:34:37 +0000668void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
669 int64_t Value,
670 uint64_t Address) {
671 llvm_unreachable("unimplemented");
672}
673
Sam Kolton3381d7a2016-10-06 13:46:08 +0000674//===----------------------------------------------------------------------===//
675// Initialization
676//===----------------------------------------------------------------------===//
677
678static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
679 LLVMOpInfoCallback /*GetOpInfo*/,
680 LLVMSymbolLookupCallback /*SymbolLookUp*/,
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000681 void *DisInfo,
Sam Kolton3381d7a2016-10-06 13:46:08 +0000682 MCContext *Ctx,
683 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
684 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
685}
686
Tom Stellarde1818af2016-02-18 03:42:32 +0000687static MCDisassembler *createAMDGPUDisassembler(const Target &T,
688 const MCSubtargetInfo &STI,
689 MCContext &Ctx) {
690 return new AMDGPUDisassembler(STI, Ctx);
691}
692
693extern "C" void LLVMInitializeAMDGPUDisassembler() {
Mehdi Aminif42454b2016-10-09 23:00:34 +0000694 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
695 createAMDGPUDisassembler);
696 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
697 createAMDGPUSymbolizer);
Tom Stellarde1818af2016-02-18 03:42:32 +0000698}