blob: 75356e9a9d3553e84d8f3d8c8bd67ed3de083ff1 [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUDisassembler.cpp - Disassembler for AMDGPU ISA ---------------===//
Tom Stellarde1818af2016-02-18 03:42:32 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10//===----------------------------------------------------------------------===//
11//
12/// \file
13///
14/// This file contains definition for AMDGPU ISA disassembler
15//
16//===----------------------------------------------------------------------===//
17
18// ToDo: What to do with instruction suffixes (v_mov_b32 vs v_mov_b32_e32)?
19
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000020#include "Disassembler/AMDGPUDisassembler.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000021#include "AMDGPU.h"
22#include "AMDGPURegisterInfo.h"
Artem Tamazov212a2512016-05-24 12:05:16 +000023#include "SIDefines.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000024#include "Utils/AMDGPUBaseInfo.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000025#include "llvm-c/Disassembler.h"
26#include "llvm/ADT/APInt.h"
27#include "llvm/ADT/ArrayRef.h"
28#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000029#include "llvm/BinaryFormat/ELF.h"
Nikolay Haustovac106ad2016-03-01 13:57:29 +000030#include "llvm/MC/MCContext.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000031#include "llvm/MC/MCDisassembler/MCDisassembler.h"
32#include "llvm/MC/MCExpr.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000033#include "llvm/MC/MCFixedLenDisassembler.h"
34#include "llvm/MC/MCInst.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000035#include "llvm/MC/MCSubtargetInfo.h"
Nikolay Haustovac106ad2016-03-01 13:57:29 +000036#include "llvm/Support/Endian.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000037#include "llvm/Support/ErrorHandling.h"
38#include "llvm/Support/MathExtras.h"
Tom Stellarde1818af2016-02-18 03:42:32 +000039#include "llvm/Support/TargetRegistry.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000040#include "llvm/Support/raw_ostream.h"
41#include <algorithm>
42#include <cassert>
43#include <cstddef>
44#include <cstdint>
45#include <iterator>
46#include <tuple>
47#include <vector>
Tom Stellarde1818af2016-02-18 03:42:32 +000048
Tom Stellarde1818af2016-02-18 03:42:32 +000049using namespace llvm;
50
51#define DEBUG_TYPE "amdgpu-disassembler"
52
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000053using DecodeStatus = llvm::MCDisassembler::DecodeStatus;
Tom Stellarde1818af2016-02-18 03:42:32 +000054
Nikolay Haustovac106ad2016-03-01 13:57:29 +000055inline static MCDisassembler::DecodeStatus
56addOperand(MCInst &Inst, const MCOperand& Opnd) {
57 Inst.addOperand(Opnd);
58 return Opnd.isValid() ?
59 MCDisassembler::Success :
60 MCDisassembler::SoftFail;
Tom Stellarde1818af2016-02-18 03:42:32 +000061}
62
Sam Kolton549c89d2017-06-21 08:53:38 +000063static int insertNamedMCOperand(MCInst &MI, const MCOperand &Op,
64 uint16_t NameIdx) {
65 int OpIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(), NameIdx);
66 if (OpIdx != -1) {
67 auto I = MI.begin();
68 std::advance(I, OpIdx);
69 MI.insert(I, Op);
70 }
71 return OpIdx;
72}
73
Sam Kolton3381d7a2016-10-06 13:46:08 +000074static DecodeStatus decodeSoppBrTarget(MCInst &Inst, unsigned Imm,
75 uint64_t Addr, const void *Decoder) {
76 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
77
78 APInt SignedOffset(18, Imm * 4, true);
79 int64_t Offset = (SignedOffset.sext(64) + 4 + Addr).getSExtValue();
80
81 if (DAsm->tryAddingSymbolicOperand(Inst, Offset, Addr, true, 2, 2))
82 return MCDisassembler::Success;
Matt Arsenaultf3dd8632016-11-01 00:55:14 +000083 return addOperand(Inst, MCOperand::createImm(Imm));
Sam Kolton3381d7a2016-10-06 13:46:08 +000084}
85
Sam Kolton363f47a2017-05-26 15:52:00 +000086#define DECODE_OPERAND(StaticDecoderName, DecoderName) \
87static DecodeStatus StaticDecoderName(MCInst &Inst, \
88 unsigned Imm, \
89 uint64_t /*Addr*/, \
90 const void *Decoder) { \
Nikolay Haustovac106ad2016-03-01 13:57:29 +000091 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder); \
Sam Kolton363f47a2017-05-26 15:52:00 +000092 return addOperand(Inst, DAsm->DecoderName(Imm)); \
Tom Stellarde1818af2016-02-18 03:42:32 +000093}
94
Sam Kolton363f47a2017-05-26 15:52:00 +000095#define DECODE_OPERAND_REG(RegClass) \
96DECODE_OPERAND(Decode##RegClass##RegisterClass, decodeOperand_##RegClass)
Tom Stellarde1818af2016-02-18 03:42:32 +000097
Sam Kolton363f47a2017-05-26 15:52:00 +000098DECODE_OPERAND_REG(VGPR_32)
99DECODE_OPERAND_REG(VS_32)
100DECODE_OPERAND_REG(VS_64)
Dmitry Preobrazhensky30fc5232017-07-18 13:12:48 +0000101DECODE_OPERAND_REG(VS_128)
Nikolay Haustov161a1582016-02-25 16:09:14 +0000102
Sam Kolton363f47a2017-05-26 15:52:00 +0000103DECODE_OPERAND_REG(VReg_64)
104DECODE_OPERAND_REG(VReg_96)
105DECODE_OPERAND_REG(VReg_128)
Tom Stellarde1818af2016-02-18 03:42:32 +0000106
Sam Kolton363f47a2017-05-26 15:52:00 +0000107DECODE_OPERAND_REG(SReg_32)
108DECODE_OPERAND_REG(SReg_32_XM0_XEXEC)
Matt Arsenaultca7b0a12017-07-21 15:36:16 +0000109DECODE_OPERAND_REG(SReg_32_XEXEC_HI)
Sam Kolton363f47a2017-05-26 15:52:00 +0000110DECODE_OPERAND_REG(SReg_64)
111DECODE_OPERAND_REG(SReg_64_XEXEC)
112DECODE_OPERAND_REG(SReg_128)
113DECODE_OPERAND_REG(SReg_256)
114DECODE_OPERAND_REG(SReg_512)
Tom Stellarde1818af2016-02-18 03:42:32 +0000115
Matt Arsenault4bd72362016-12-10 00:39:12 +0000116static DecodeStatus decodeOperand_VSrc16(MCInst &Inst,
117 unsigned Imm,
118 uint64_t Addr,
119 const void *Decoder) {
120 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
121 return addOperand(Inst, DAsm->decodeOperand_VSrc16(Imm));
122}
123
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000124static DecodeStatus decodeOperand_VSrcV216(MCInst &Inst,
125 unsigned Imm,
126 uint64_t Addr,
127 const void *Decoder) {
128 auto DAsm = static_cast<const AMDGPUDisassembler*>(Decoder);
129 return addOperand(Inst, DAsm->decodeOperand_VSrcV216(Imm));
130}
131
Sam Kolton549c89d2017-06-21 08:53:38 +0000132#define DECODE_SDWA(DecName) \
133DECODE_OPERAND(decodeSDWA##DecName, decodeSDWA##DecName)
Sam Kolton363f47a2017-05-26 15:52:00 +0000134
Sam Kolton549c89d2017-06-21 08:53:38 +0000135DECODE_SDWA(Src32)
136DECODE_SDWA(Src16)
137DECODE_SDWA(VopcDst)
Sam Kolton363f47a2017-05-26 15:52:00 +0000138
Tom Stellarde1818af2016-02-18 03:42:32 +0000139#include "AMDGPUGenDisassemblerTables.inc"
140
141//===----------------------------------------------------------------------===//
142//
143//===----------------------------------------------------------------------===//
144
Sam Kolton1048fb12016-03-31 14:15:04 +0000145template <typename T> static inline T eatBytes(ArrayRef<uint8_t>& Bytes) {
146 assert(Bytes.size() >= sizeof(T));
147 const auto Res = support::endian::read<T, support::endianness::little>(Bytes.data());
148 Bytes = Bytes.slice(sizeof(T));
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000149 return Res;
150}
151
152DecodeStatus AMDGPUDisassembler::tryDecodeInst(const uint8_t* Table,
153 MCInst &MI,
154 uint64_t Inst,
155 uint64_t Address) const {
156 assert(MI.getOpcode() == 0);
157 assert(MI.getNumOperands() == 0);
158 MCInst TmpInst;
Dmitry Preobrazhenskyce941c92017-05-19 14:27:52 +0000159 HasLiteral = false;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000160 const auto SavedBytes = Bytes;
161 if (decodeInstruction(Table, TmpInst, Inst, Address, this, STI)) {
162 MI = TmpInst;
163 return MCDisassembler::Success;
164 }
165 Bytes = SavedBytes;
166 return MCDisassembler::Fail;
167}
168
Tom Stellarde1818af2016-02-18 03:42:32 +0000169DecodeStatus AMDGPUDisassembler::getInstruction(MCInst &MI, uint64_t &Size,
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000170 ArrayRef<uint8_t> Bytes_,
Nikolay Haustov161a1582016-02-25 16:09:14 +0000171 uint64_t Address,
Tom Stellarde1818af2016-02-18 03:42:32 +0000172 raw_ostream &WS,
173 raw_ostream &CS) const {
174 CommentStream = &CS;
Sam Kolton549c89d2017-06-21 08:53:38 +0000175 bool IsSDWA = false;
Tom Stellarde1818af2016-02-18 03:42:32 +0000176
177 // ToDo: AMDGPUDisassembler supports only VI ISA.
Matt Arsenaultd122abe2017-02-15 21:50:34 +0000178 if (!STI.getFeatureBits()[AMDGPU::FeatureGCN3Encoding])
179 report_fatal_error("Disassembly not yet supported for subtarget");
Tom Stellarde1818af2016-02-18 03:42:32 +0000180
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000181 const unsigned MaxInstBytesNum = (std::min)((size_t)8, Bytes_.size());
182 Bytes = Bytes_.slice(0, MaxInstBytesNum);
Nikolay Haustov161a1582016-02-25 16:09:14 +0000183
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000184 DecodeStatus Res = MCDisassembler::Fail;
185 do {
Valery Pykhtin824e8042016-03-04 10:59:50 +0000186 // ToDo: better to switch encoding length using some bit predicate
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000187 // but it is unknown yet, so try all we can
Matt Arsenault37fefd62016-06-10 02:18:02 +0000188
Sam Koltonc9bdcb72016-06-09 11:04:45 +0000189 // Try to decode DPP and SDWA first to solve conflict with VOP1 and VOP2
190 // encodings
Sam Kolton1048fb12016-03-31 14:15:04 +0000191 if (Bytes.size() >= 8) {
192 const uint64_t QW = eatBytes<uint64_t>(Bytes);
193 Res = tryDecodeInst(DecoderTableDPP64, MI, QW, Address);
194 if (Res) break;
Sam Koltonc9bdcb72016-06-09 11:04:45 +0000195
196 Res = tryDecodeInst(DecoderTableSDWA64, MI, QW, Address);
Sam Kolton549c89d2017-06-21 08:53:38 +0000197 if (Res) { IsSDWA = true; break; }
Sam Kolton363f47a2017-05-26 15:52:00 +0000198
199 Res = tryDecodeInst(DecoderTableSDWA964, MI, QW, Address);
Sam Kolton549c89d2017-06-21 08:53:38 +0000200 if (Res) { IsSDWA = true; break; }
Changpeng Fang09058702018-01-30 16:42:40 +0000201
202 if (STI.getFeatureBits()[AMDGPU::FeatureUnpackedD16VMem]) {
203 Res = tryDecodeInst(DecoderTableGFX80_UNPACKED64, MI, QW, Address);
204 if (Res) break;
205 }
Sam Kolton1048fb12016-03-31 14:15:04 +0000206 }
207
208 // Reinitialize Bytes as DPP64 could have eaten too much
209 Bytes = Bytes_.slice(0, MaxInstBytesNum);
210
211 // Try decode 32-bit instruction
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000212 if (Bytes.size() < 4) break;
Sam Kolton1048fb12016-03-31 14:15:04 +0000213 const uint32_t DW = eatBytes<uint32_t>(Bytes);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000214 Res = tryDecodeInst(DecoderTableVI32, MI, DW, Address);
215 if (Res) break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000216
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000217 Res = tryDecodeInst(DecoderTableAMDGPU32, MI, DW, Address);
218 if (Res) break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000219
Dmitry Preobrazhenskya0342dc2017-11-20 18:24:21 +0000220 Res = tryDecodeInst(DecoderTableGFX932, MI, DW, Address);
221 if (Res) break;
222
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000223 if (Bytes.size() < 4) break;
Sam Kolton1048fb12016-03-31 14:15:04 +0000224 const uint64_t QW = ((uint64_t)eatBytes<uint32_t>(Bytes) << 32) | DW;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000225 Res = tryDecodeInst(DecoderTableVI64, MI, QW, Address);
226 if (Res) break;
227
228 Res = tryDecodeInst(DecoderTableAMDGPU64, MI, QW, Address);
Dmitry Preobrazhensky1e325502017-08-09 17:10:47 +0000229 if (Res) break;
230
231 Res = tryDecodeInst(DecoderTableGFX964, MI, QW, Address);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000232 } while (false);
233
Matt Arsenault678e1112017-04-10 17:58:06 +0000234 if (Res && (MI.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
235 MI.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
236 MI.getOpcode() == AMDGPU::V_MAC_F16_e64_vi)) {
237 // Insert dummy unused src2_modifiers.
Sam Kolton549c89d2017-06-21 08:53:38 +0000238 insertNamedMCOperand(MI, MCOperand::createImm(0),
239 AMDGPU::OpName::src2_modifiers);
Matt Arsenault678e1112017-04-10 17:58:06 +0000240 }
241
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000242 if (Res && (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::MIMG)) {
243 Res = convertMIMGInst(MI);
244 }
245
Sam Kolton549c89d2017-06-21 08:53:38 +0000246 if (Res && IsSDWA)
247 Res = convertSDWAInst(MI);
248
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000249 Size = Res ? (MaxInstBytesNum - Bytes.size()) : 0;
250 return Res;
Tom Stellarde1818af2016-02-18 03:42:32 +0000251}
252
Sam Kolton549c89d2017-06-21 08:53:38 +0000253DecodeStatus AMDGPUDisassembler::convertSDWAInst(MCInst &MI) const {
254 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
255 if (AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst) != -1)
256 // VOPC - insert clamp
257 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::clamp);
258 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
259 int SDst = AMDGPU::getNamedOperandIdx(MI.getOpcode(), AMDGPU::OpName::sdst);
260 if (SDst != -1) {
261 // VOPC - insert VCC register as sdst
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000262 insertNamedMCOperand(MI, createRegOperand(AMDGPU::VCC),
Sam Kolton549c89d2017-06-21 08:53:38 +0000263 AMDGPU::OpName::sdst);
264 } else {
265 // VOP1/2 - insert omod if present in instruction
266 insertNamedMCOperand(MI, MCOperand::createImm(0), AMDGPU::OpName::omod);
267 }
268 }
269 return MCDisassembler::Success;
270}
271
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000272// Note that MIMG format provides no information about VADDR size.
273// Consequently, decoded instructions always show address
274// as if it has 1 dword, which could be not really so.
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000275DecodeStatus AMDGPUDisassembler::convertMIMGInst(MCInst &MI) const {
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +0000276
277 if (MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::Gather4) {
278 return MCDisassembler::Success;
279 }
280
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000281 int VDstIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
282 AMDGPU::OpName::vdst);
283
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000284 int VDataIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
285 AMDGPU::OpName::vdata);
286
287 int DMaskIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
288 AMDGPU::OpName::dmask);
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000289
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000290 int TFEIdx = AMDGPU::getNamedOperandIdx(MI.getOpcode(),
291 AMDGPU::OpName::tfe);
292
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000293 assert(VDataIdx != -1);
294 assert(DMaskIdx != -1);
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000295 assert(TFEIdx != -1);
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000296
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +0000297 bool IsAtomic = (VDstIdx != -1);
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000298
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000299 unsigned DMask = MI.getOperand(DMaskIdx).getImm() & 0xf;
300 if (DMask == 0)
301 return MCDisassembler::Success;
302
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000303 unsigned DstSize = countPopulation(DMask);
304 if (DstSize == 1)
305 return MCDisassembler::Success;
306
307 bool D16 = MCII->get(MI.getOpcode()).TSFlags & SIInstrFlags::D16;
308 if (D16 && AMDGPU::hasPackedD16(STI)) {
309 DstSize = (DstSize + 1) / 2;
310 }
311
312 // FIXME: Add tfe support
313 if (MI.getOperand(TFEIdx).getImm())
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000314 return MCDisassembler::Success;
315
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000316 int NewOpcode = -1;
317
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +0000318 if (IsAtomic) {
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000319 if (DMask == 0x1 || DMask == 0x3 || DMask == 0xF) {
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000320 NewOpcode = AMDGPU::getMaskedMIMGAtomicOp(*MCII, MI.getOpcode(), DstSize);
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000321 }
322 if (NewOpcode == -1) return MCDisassembler::Success;
323 } else {
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000324 NewOpcode = AMDGPU::getMaskedMIMGOp(*MCII, MI.getOpcode(), DstSize);
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000325 assert(NewOpcode != -1 && "could not find matching mimg channel instruction");
326 }
327
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000328 auto RCID = MCII->get(NewOpcode).OpInfo[VDataIdx].RegClass;
329
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000330 // Get first subregister of VData
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000331 unsigned Vdata0 = MI.getOperand(VDataIdx).getReg();
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000332 unsigned VdataSub0 = MRI.getSubReg(Vdata0, AMDGPU::sub0);
333 Vdata0 = (VdataSub0 != 0)? VdataSub0 : Vdata0;
334
335 // Widen the register to the correct number of enabled channels.
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000336 auto NewVdata = MRI.getMatchingSuperReg(Vdata0, AMDGPU::sub0,
337 &MRI.getRegClass(RCID));
338 if (NewVdata == AMDGPU::NoRegister) {
339 // It's possible to encode this such that the low register + enabled
340 // components exceeds the register count.
341 return MCDisassembler::Success;
342 }
343
344 MI.setOpcode(NewOpcode);
345 // vaddr will be always appear as a single VGPR. This will look different than
346 // how it is usually emitted because the number of register components is not
347 // in the instruction encoding.
348 MI.getOperand(VDataIdx) = MCOperand::createReg(NewVdata);
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000349
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +0000350 if (IsAtomic) {
Dmitry Preobrazhensky0b4eb1e2018-01-26 15:43:29 +0000351 // Atomic operations have an additional operand (a copy of data)
352 MI.getOperand(VDstIdx) = MCOperand::createReg(NewVdata);
353 }
354
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000355 return MCDisassembler::Success;
356}
357
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000358const char* AMDGPUDisassembler::getRegClassName(unsigned RegClassID) const {
359 return getContext().getRegisterInfo()->
360 getRegClassName(&AMDGPUMCRegisterClasses[RegClassID]);
Tom Stellarde1818af2016-02-18 03:42:32 +0000361}
362
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000363inline
364MCOperand AMDGPUDisassembler::errOperand(unsigned V,
365 const Twine& ErrMsg) const {
366 *CommentStream << "Error: " + ErrMsg;
367
368 // ToDo: add support for error operands to MCInst.h
369 // return MCOperand::createError(V);
370 return MCOperand();
Nikolay Haustov161a1582016-02-25 16:09:14 +0000371}
372
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000373inline
374MCOperand AMDGPUDisassembler::createRegOperand(unsigned int RegId) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000375 return MCOperand::createReg(AMDGPU::getMCReg(RegId, STI));
Tom Stellarde1818af2016-02-18 03:42:32 +0000376}
377
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000378inline
379MCOperand AMDGPUDisassembler::createRegOperand(unsigned RegClassID,
380 unsigned Val) const {
381 const auto& RegCl = AMDGPUMCRegisterClasses[RegClassID];
382 if (Val >= RegCl.getNumRegs())
383 return errOperand(Val, Twine(getRegClassName(RegClassID)) +
384 ": unknown register " + Twine(Val));
385 return createRegOperand(RegCl.getRegister(Val));
Tom Stellarde1818af2016-02-18 03:42:32 +0000386}
387
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000388inline
389MCOperand AMDGPUDisassembler::createSRegOperand(unsigned SRegClassID,
390 unsigned Val) const {
Tom Stellarde1818af2016-02-18 03:42:32 +0000391 // ToDo: SI/CI have 104 SGPRs, VI - 102
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000392 // Valery: here we accepting as much as we can, let assembler sort it out
393 int shift = 0;
394 switch (SRegClassID) {
395 case AMDGPU::SGPR_32RegClassID:
Artem Tamazov212a2512016-05-24 12:05:16 +0000396 case AMDGPU::TTMP_32RegClassID:
397 break;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000398 case AMDGPU::SGPR_64RegClassID:
Artem Tamazov212a2512016-05-24 12:05:16 +0000399 case AMDGPU::TTMP_64RegClassID:
400 shift = 1;
401 break;
402 case AMDGPU::SGPR_128RegClassID:
403 case AMDGPU::TTMP_128RegClassID:
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000404 // ToDo: unclear if s[100:104] is available on VI. Can we use VCC as SGPR in
405 // this bundle?
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000406 case AMDGPU::SGPR_256RegClassID:
407 case AMDGPU::TTMP_256RegClassID:
408 // ToDo: unclear if s[96:104] is available on VI. Can we use VCC as SGPR in
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000409 // this bundle?
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000410 case AMDGPU::SGPR_512RegClassID:
411 case AMDGPU::TTMP_512RegClassID:
Artem Tamazov212a2512016-05-24 12:05:16 +0000412 shift = 2;
413 break;
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000414 // ToDo: unclear if s[88:104] is available on VI. Can we use VCC as SGPR in
415 // this bundle?
Artem Tamazov212a2512016-05-24 12:05:16 +0000416 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +0000417 llvm_unreachable("unhandled register class");
Tom Stellarde1818af2016-02-18 03:42:32 +0000418 }
Matt Arsenault92b355b2016-11-15 19:34:37 +0000419
420 if (Val % (1 << shift)) {
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000421 *CommentStream << "Warning: " << getRegClassName(SRegClassID)
422 << ": scalar reg isn't aligned " << Val;
Matt Arsenault92b355b2016-11-15 19:34:37 +0000423 }
424
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000425 return createRegOperand(SRegClassID, Val >> shift);
Tom Stellarde1818af2016-02-18 03:42:32 +0000426}
427
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000428MCOperand AMDGPUDisassembler::decodeOperand_VS_32(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000429 return decodeSrcOp(OPW32, Val);
Tom Stellarde1818af2016-02-18 03:42:32 +0000430}
431
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000432MCOperand AMDGPUDisassembler::decodeOperand_VS_64(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000433 return decodeSrcOp(OPW64, Val);
Nikolay Haustov161a1582016-02-25 16:09:14 +0000434}
435
Dmitry Preobrazhensky30fc5232017-07-18 13:12:48 +0000436MCOperand AMDGPUDisassembler::decodeOperand_VS_128(unsigned Val) const {
437 return decodeSrcOp(OPW128, Val);
438}
439
Matt Arsenault4bd72362016-12-10 00:39:12 +0000440MCOperand AMDGPUDisassembler::decodeOperand_VSrc16(unsigned Val) const {
441 return decodeSrcOp(OPW16, Val);
442}
443
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000444MCOperand AMDGPUDisassembler::decodeOperand_VSrcV216(unsigned Val) const {
445 return decodeSrcOp(OPWV216, Val);
446}
447
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000448MCOperand AMDGPUDisassembler::decodeOperand_VGPR_32(unsigned Val) const {
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000449 // Some instructions have operand restrictions beyond what the encoding
450 // allows. Some ordinarily VSrc_32 operands are VGPR_32, so clear the extra
451 // high bit.
452 Val &= 255;
453
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000454 return createRegOperand(AMDGPU::VGPR_32RegClassID, Val);
455}
456
457MCOperand AMDGPUDisassembler::decodeOperand_VReg_64(unsigned Val) const {
458 return createRegOperand(AMDGPU::VReg_64RegClassID, Val);
459}
460
461MCOperand AMDGPUDisassembler::decodeOperand_VReg_96(unsigned Val) const {
462 return createRegOperand(AMDGPU::VReg_96RegClassID, Val);
463}
464
465MCOperand AMDGPUDisassembler::decodeOperand_VReg_128(unsigned Val) const {
466 return createRegOperand(AMDGPU::VReg_128RegClassID, Val);
467}
468
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000469MCOperand AMDGPUDisassembler::decodeOperand_SReg_32(unsigned Val) const {
470 // table-gen generated disassembler doesn't care about operand types
471 // leaving only registry class so SSrc_32 operand turns into SReg_32
472 // and therefore we accept immediates and literals here as well
Artem Tamazov212a2512016-05-24 12:05:16 +0000473 return decodeSrcOp(OPW32, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000474}
475
Matt Arsenault640c44b2016-11-29 19:39:53 +0000476MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XM0_XEXEC(
477 unsigned Val) const {
478 // SReg_32_XM0 is SReg_32 without M0 or EXEC_LO/EXEC_HI
Artem Tamazov38e496b2016-04-29 17:04:50 +0000479 return decodeOperand_SReg_32(Val);
480}
481
Matt Arsenaultca7b0a12017-07-21 15:36:16 +0000482MCOperand AMDGPUDisassembler::decodeOperand_SReg_32_XEXEC_HI(
483 unsigned Val) const {
484 // SReg_32_XM0 is SReg_32 without EXEC_HI
485 return decodeOperand_SReg_32(Val);
486}
487
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000488MCOperand AMDGPUDisassembler::decodeOperand_SReg_64(unsigned Val) const {
Matt Arsenault640c44b2016-11-29 19:39:53 +0000489 return decodeSrcOp(OPW64, Val);
490}
491
492MCOperand AMDGPUDisassembler::decodeOperand_SReg_64_XEXEC(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000493 return decodeSrcOp(OPW64, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000494}
495
496MCOperand AMDGPUDisassembler::decodeOperand_SReg_128(unsigned Val) const {
Artem Tamazov212a2512016-05-24 12:05:16 +0000497 return decodeSrcOp(OPW128, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000498}
499
500MCOperand AMDGPUDisassembler::decodeOperand_SReg_256(unsigned Val) const {
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000501 return decodeDstOp(OPW256, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000502}
503
504MCOperand AMDGPUDisassembler::decodeOperand_SReg_512(unsigned Val) const {
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000505 return decodeDstOp(OPW512, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000506}
507
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000508MCOperand AMDGPUDisassembler::decodeLiteralConstant() const {
Nikolay Haustov161a1582016-02-25 16:09:14 +0000509 // For now all literal constants are supposed to be unsigned integer
510 // ToDo: deal with signed/unsigned 64-bit integer constants
511 // ToDo: deal with float/double constants
Dmitry Preobrazhenskyce941c92017-05-19 14:27:52 +0000512 if (!HasLiteral) {
513 if (Bytes.size() < 4) {
514 return errOperand(0, "cannot read literal, inst bytes left " +
515 Twine(Bytes.size()));
516 }
517 HasLiteral = true;
518 Literal = eatBytes<uint32_t>(Bytes);
519 }
520 return MCOperand::createImm(Literal);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000521}
522
523MCOperand AMDGPUDisassembler::decodeIntImmed(unsigned Imm) {
Artem Tamazov212a2512016-05-24 12:05:16 +0000524 using namespace AMDGPU::EncValues;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000525
Artem Tamazov212a2512016-05-24 12:05:16 +0000526 assert(Imm >= INLINE_INTEGER_C_MIN && Imm <= INLINE_INTEGER_C_MAX);
527 return MCOperand::createImm((Imm <= INLINE_INTEGER_C_POSITIVE_MAX) ?
528 (static_cast<int64_t>(Imm) - INLINE_INTEGER_C_MIN) :
529 (INLINE_INTEGER_C_POSITIVE_MAX - static_cast<int64_t>(Imm)));
530 // Cast prevents negative overflow.
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000531}
532
Matt Arsenault4bd72362016-12-10 00:39:12 +0000533static int64_t getInlineImmVal32(unsigned Imm) {
534 switch (Imm) {
535 case 240:
536 return FloatToBits(0.5f);
537 case 241:
538 return FloatToBits(-0.5f);
539 case 242:
540 return FloatToBits(1.0f);
541 case 243:
542 return FloatToBits(-1.0f);
543 case 244:
544 return FloatToBits(2.0f);
545 case 245:
546 return FloatToBits(-2.0f);
547 case 246:
548 return FloatToBits(4.0f);
549 case 247:
550 return FloatToBits(-4.0f);
551 case 248: // 1 / (2 * PI)
552 return 0x3e22f983;
553 default:
554 llvm_unreachable("invalid fp inline imm");
555 }
556}
557
558static int64_t getInlineImmVal64(unsigned Imm) {
559 switch (Imm) {
560 case 240:
561 return DoubleToBits(0.5);
562 case 241:
563 return DoubleToBits(-0.5);
564 case 242:
565 return DoubleToBits(1.0);
566 case 243:
567 return DoubleToBits(-1.0);
568 case 244:
569 return DoubleToBits(2.0);
570 case 245:
571 return DoubleToBits(-2.0);
572 case 246:
573 return DoubleToBits(4.0);
574 case 247:
575 return DoubleToBits(-4.0);
576 case 248: // 1 / (2 * PI)
577 return 0x3fc45f306dc9c882;
578 default:
579 llvm_unreachable("invalid fp inline imm");
580 }
581}
582
583static int64_t getInlineImmVal16(unsigned Imm) {
584 switch (Imm) {
585 case 240:
586 return 0x3800;
587 case 241:
588 return 0xB800;
589 case 242:
590 return 0x3C00;
591 case 243:
592 return 0xBC00;
593 case 244:
594 return 0x4000;
595 case 245:
596 return 0xC000;
597 case 246:
598 return 0x4400;
599 case 247:
600 return 0xC400;
601 case 248: // 1 / (2 * PI)
602 return 0x3118;
603 default:
604 llvm_unreachable("invalid fp inline imm");
605 }
606}
607
608MCOperand AMDGPUDisassembler::decodeFPImmed(OpWidthTy Width, unsigned Imm) {
Artem Tamazov212a2512016-05-24 12:05:16 +0000609 assert(Imm >= AMDGPU::EncValues::INLINE_FLOATING_C_MIN
610 && Imm <= AMDGPU::EncValues::INLINE_FLOATING_C_MAX);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000611
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000612 // ToDo: case 248: 1/(2*PI) - is allowed only on VI
Matt Arsenault4bd72362016-12-10 00:39:12 +0000613 switch (Width) {
614 case OPW32:
615 return MCOperand::createImm(getInlineImmVal32(Imm));
616 case OPW64:
617 return MCOperand::createImm(getInlineImmVal64(Imm));
618 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000619 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000620 return MCOperand::createImm(getInlineImmVal16(Imm));
621 default:
622 llvm_unreachable("implement me");
Nikolay Haustov161a1582016-02-25 16:09:14 +0000623 }
Nikolay Haustov161a1582016-02-25 16:09:14 +0000624}
625
Artem Tamazov212a2512016-05-24 12:05:16 +0000626unsigned AMDGPUDisassembler::getVgprClassId(const OpWidthTy Width) const {
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000627 using namespace AMDGPU;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000628
Artem Tamazov212a2512016-05-24 12:05:16 +0000629 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
630 switch (Width) {
631 default: // fall
Matt Arsenault4bd72362016-12-10 00:39:12 +0000632 case OPW32:
633 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000634 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000635 return VGPR_32RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000636 case OPW64: return VReg_64RegClassID;
637 case OPW128: return VReg_128RegClassID;
638 }
639}
640
641unsigned AMDGPUDisassembler::getSgprClassId(const OpWidthTy Width) const {
642 using namespace AMDGPU;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000643
Artem Tamazov212a2512016-05-24 12:05:16 +0000644 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
645 switch (Width) {
646 default: // fall
Matt Arsenault4bd72362016-12-10 00:39:12 +0000647 case OPW32:
648 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000649 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000650 return SGPR_32RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000651 case OPW64: return SGPR_64RegClassID;
652 case OPW128: return SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000653 case OPW256: return SGPR_256RegClassID;
654 case OPW512: return SGPR_512RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000655 }
656}
657
658unsigned AMDGPUDisassembler::getTtmpClassId(const OpWidthTy Width) const {
659 using namespace AMDGPU;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000660
Artem Tamazov212a2512016-05-24 12:05:16 +0000661 assert(OPW_FIRST_ <= Width && Width < OPW_LAST_);
662 switch (Width) {
663 default: // fall
Matt Arsenault4bd72362016-12-10 00:39:12 +0000664 case OPW32:
665 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000666 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000667 return TTMP_32RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000668 case OPW64: return TTMP_64RegClassID;
669 case OPW128: return TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000670 case OPW256: return TTMP_256RegClassID;
671 case OPW512: return TTMP_512RegClassID;
Artem Tamazov212a2512016-05-24 12:05:16 +0000672 }
673}
674
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000675int AMDGPUDisassembler::getTTmpIdx(unsigned Val) const {
676 using namespace AMDGPU::EncValues;
677
678 unsigned TTmpMin = isGFX9() ? TTMP_GFX9_MIN : TTMP_VI_MIN;
679 unsigned TTmpMax = isGFX9() ? TTMP_GFX9_MAX : TTMP_VI_MAX;
680
681 return (TTmpMin <= Val && Val <= TTmpMax)? Val - TTmpMin : -1;
682}
683
Artem Tamazov212a2512016-05-24 12:05:16 +0000684MCOperand AMDGPUDisassembler::decodeSrcOp(const OpWidthTy Width, unsigned Val) const {
685 using namespace AMDGPU::EncValues;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000686
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000687 assert(Val < 512); // enum9
688
Artem Tamazov212a2512016-05-24 12:05:16 +0000689 if (VGPR_MIN <= Val && Val <= VGPR_MAX) {
690 return createRegOperand(getVgprClassId(Width), Val - VGPR_MIN);
691 }
Artem Tamazovb49c3362016-05-26 15:52:16 +0000692 if (Val <= SGPR_MAX) {
693 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
Artem Tamazov212a2512016-05-24 12:05:16 +0000694 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
695 }
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000696
697 int TTmpIdx = getTTmpIdx(Val);
698 if (TTmpIdx >= 0) {
699 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
Artem Tamazov212a2512016-05-24 12:05:16 +0000700 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000701
Artem Tamazov212a2512016-05-24 12:05:16 +0000702 if (INLINE_INTEGER_C_MIN <= Val && Val <= INLINE_INTEGER_C_MAX)
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000703 return decodeIntImmed(Val);
704
Artem Tamazov212a2512016-05-24 12:05:16 +0000705 if (INLINE_FLOATING_C_MIN <= Val && Val <= INLINE_FLOATING_C_MAX)
Matt Arsenault4bd72362016-12-10 00:39:12 +0000706 return decodeFPImmed(Width, Val);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000707
Artem Tamazov212a2512016-05-24 12:05:16 +0000708 if (Val == LITERAL_CONST)
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000709 return decodeLiteralConstant();
710
Matt Arsenault4bd72362016-12-10 00:39:12 +0000711 switch (Width) {
712 case OPW32:
713 case OPW16:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000714 case OPWV216:
Matt Arsenault4bd72362016-12-10 00:39:12 +0000715 return decodeSpecialReg32(Val);
716 case OPW64:
717 return decodeSpecialReg64(Val);
718 default:
719 llvm_unreachable("unexpected immediate type");
720 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000721}
722
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +0000723MCOperand AMDGPUDisassembler::decodeDstOp(const OpWidthTy Width, unsigned Val) const {
724 using namespace AMDGPU::EncValues;
725
726 assert(Val < 128);
727 assert(Width == OPW256 || Width == OPW512);
728
729 if (Val <= SGPR_MAX) {
730 assert(SGPR_MIN == 0); // "SGPR_MIN <= Val" is always true and causes compilation warning.
731 return createSRegOperand(getSgprClassId(Width), Val - SGPR_MIN);
732 }
733
734 int TTmpIdx = getTTmpIdx(Val);
735 if (TTmpIdx >= 0) {
736 return createSRegOperand(getTtmpClassId(Width), TTmpIdx);
737 }
738
739 llvm_unreachable("unknown dst register");
740}
741
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000742MCOperand AMDGPUDisassembler::decodeSpecialReg32(unsigned Val) const {
743 using namespace AMDGPU;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000744
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000745 switch (Val) {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000746 case 102: return createRegOperand(FLAT_SCR_LO);
747 case 103: return createRegOperand(FLAT_SCR_HI);
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000748 case 104: return createRegOperand(XNACK_MASK_LO);
749 case 105: return createRegOperand(XNACK_MASK_HI);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000750 case 106: return createRegOperand(VCC_LO);
751 case 107: return createRegOperand(VCC_HI);
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000752 case 108: assert(!isGFX9()); return createRegOperand(TBA_LO);
753 case 109: assert(!isGFX9()); return createRegOperand(TBA_HI);
754 case 110: assert(!isGFX9()); return createRegOperand(TMA_LO);
755 case 111: assert(!isGFX9()); return createRegOperand(TMA_HI);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000756 case 124: return createRegOperand(M0);
757 case 126: return createRegOperand(EXEC_LO);
758 case 127: return createRegOperand(EXEC_HI);
Matt Arsenaulta3b3b482017-02-18 18:41:41 +0000759 case 235: return createRegOperand(SRC_SHARED_BASE);
760 case 236: return createRegOperand(SRC_SHARED_LIMIT);
761 case 237: return createRegOperand(SRC_PRIVATE_BASE);
762 case 238: return createRegOperand(SRC_PRIVATE_LIMIT);
763 // TODO: SRC_POPS_EXITING_WAVE_ID
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000764 // ToDo: no support for vccz register
765 case 251: break;
766 // ToDo: no support for execz register
767 case 252: break;
768 case 253: return createRegOperand(SCC);
769 default: break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000770 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000771 return errOperand(Val, "unknown operand encoding " + Twine(Val));
Tom Stellarde1818af2016-02-18 03:42:32 +0000772}
773
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000774MCOperand AMDGPUDisassembler::decodeSpecialReg64(unsigned Val) const {
775 using namespace AMDGPU;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000776
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000777 switch (Val) {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000778 case 102: return createRegOperand(FLAT_SCR);
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000779 case 104: return createRegOperand(XNACK_MASK);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000780 case 106: return createRegOperand(VCC);
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000781 case 108: assert(!isGFX9()); return createRegOperand(TBA);
782 case 110: assert(!isGFX9()); return createRegOperand(TMA);
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000783 case 126: return createRegOperand(EXEC);
784 default: break;
Tom Stellarde1818af2016-02-18 03:42:32 +0000785 }
Nikolay Haustovac106ad2016-03-01 13:57:29 +0000786 return errOperand(Val, "unknown operand encoding " + Twine(Val));
Tom Stellarde1818af2016-02-18 03:42:32 +0000787}
788
Sam Kolton549c89d2017-06-21 08:53:38 +0000789MCOperand AMDGPUDisassembler::decodeSDWASrc(const OpWidthTy Width,
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000790 const unsigned Val) const {
Sam Kolton363f47a2017-05-26 15:52:00 +0000791 using namespace AMDGPU::SDWA;
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000792 using namespace AMDGPU::EncValues;
Sam Kolton363f47a2017-05-26 15:52:00 +0000793
Sam Kolton549c89d2017-06-21 08:53:38 +0000794 if (STI.getFeatureBits()[AMDGPU::FeatureGFX9]) {
Sam Koltona179d252017-06-27 15:02:23 +0000795 // XXX: static_cast<int> is needed to avoid stupid warning:
796 // compare with unsigned is always true
797 if (SDWA9EncValues::SRC_VGPR_MIN <= static_cast<int>(Val) &&
Sam Kolton549c89d2017-06-21 08:53:38 +0000798 Val <= SDWA9EncValues::SRC_VGPR_MAX) {
799 return createRegOperand(getVgprClassId(Width),
800 Val - SDWA9EncValues::SRC_VGPR_MIN);
801 }
802 if (SDWA9EncValues::SRC_SGPR_MIN <= Val &&
803 Val <= SDWA9EncValues::SRC_SGPR_MAX) {
804 return createSRegOperand(getSgprClassId(Width),
805 Val - SDWA9EncValues::SRC_SGPR_MIN);
806 }
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000807 if (SDWA9EncValues::SRC_TTMP_MIN <= Val &&
808 Val <= SDWA9EncValues::SRC_TTMP_MAX) {
809 return createSRegOperand(getTtmpClassId(Width),
810 Val - SDWA9EncValues::SRC_TTMP_MIN);
811 }
Sam Kolton549c89d2017-06-21 08:53:38 +0000812
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000813 const unsigned SVal = Val - SDWA9EncValues::SRC_SGPR_MIN;
814
815 if (INLINE_INTEGER_C_MIN <= SVal && SVal <= INLINE_INTEGER_C_MAX)
816 return decodeIntImmed(SVal);
817
818 if (INLINE_FLOATING_C_MIN <= SVal && SVal <= INLINE_FLOATING_C_MAX)
819 return decodeFPImmed(Width, SVal);
820
821 return decodeSpecialReg32(SVal);
Sam Kolton549c89d2017-06-21 08:53:38 +0000822 } else if (STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands]) {
823 return createRegOperand(getVgprClassId(Width), Val);
Sam Kolton363f47a2017-05-26 15:52:00 +0000824 }
Sam Kolton549c89d2017-06-21 08:53:38 +0000825 llvm_unreachable("unsupported target");
Sam Kolton363f47a2017-05-26 15:52:00 +0000826}
827
Sam Kolton549c89d2017-06-21 08:53:38 +0000828MCOperand AMDGPUDisassembler::decodeSDWASrc16(unsigned Val) const {
829 return decodeSDWASrc(OPW16, Val);
Sam Kolton363f47a2017-05-26 15:52:00 +0000830}
831
Sam Kolton549c89d2017-06-21 08:53:38 +0000832MCOperand AMDGPUDisassembler::decodeSDWASrc32(unsigned Val) const {
833 return decodeSDWASrc(OPW32, Val);
Sam Kolton363f47a2017-05-26 15:52:00 +0000834}
835
Sam Kolton549c89d2017-06-21 08:53:38 +0000836MCOperand AMDGPUDisassembler::decodeSDWAVopcDst(unsigned Val) const {
Sam Kolton363f47a2017-05-26 15:52:00 +0000837 using namespace AMDGPU::SDWA;
838
Sam Kolton549c89d2017-06-21 08:53:38 +0000839 assert(STI.getFeatureBits()[AMDGPU::FeatureGFX9] &&
840 "SDWAVopcDst should be present only on GFX9");
Sam Kolton363f47a2017-05-26 15:52:00 +0000841 if (Val & SDWA9EncValues::VOPC_DST_VCC_MASK) {
842 Val &= SDWA9EncValues::VOPC_DST_SGPR_MASK;
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000843
844 int TTmpIdx = getTTmpIdx(Val);
845 if (TTmpIdx >= 0) {
846 return createSRegOperand(getTtmpClassId(OPW64), TTmpIdx);
847 } else if (Val > AMDGPU::EncValues::SGPR_MAX) {
Sam Kolton363f47a2017-05-26 15:52:00 +0000848 return decodeSpecialReg64(Val);
849 } else {
850 return createSRegOperand(getSgprClassId(OPW64), Val);
851 }
852 } else {
853 return createRegOperand(AMDGPU::VCC);
854 }
855}
856
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +0000857bool AMDGPUDisassembler::isVI() const {
858 return STI.getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
859}
860
861bool AMDGPUDisassembler::isGFX9() const {
862 return STI.getFeatureBits()[AMDGPU::FeatureGFX9];
863}
864
Sam Kolton3381d7a2016-10-06 13:46:08 +0000865//===----------------------------------------------------------------------===//
866// AMDGPUSymbolizer
867//===----------------------------------------------------------------------===//
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000868
Sam Kolton3381d7a2016-10-06 13:46:08 +0000869// Try to find symbol name for specified label
870bool AMDGPUSymbolizer::tryAddingSymbolicOperand(MCInst &Inst,
871 raw_ostream &/*cStream*/, int64_t Value,
872 uint64_t /*Address*/, bool IsBranch,
873 uint64_t /*Offset*/, uint64_t /*InstSize*/) {
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000874 using SymbolInfoTy = std::tuple<uint64_t, StringRef, uint8_t>;
875 using SectionSymbolsTy = std::vector<SymbolInfoTy>;
Sam Kolton3381d7a2016-10-06 13:46:08 +0000876
877 if (!IsBranch) {
878 return false;
879 }
880
881 auto *Symbols = static_cast<SectionSymbolsTy *>(DisInfo);
882 auto Result = std::find_if(Symbols->begin(), Symbols->end(),
883 [Value](const SymbolInfoTy& Val) {
884 return std::get<0>(Val) == static_cast<uint64_t>(Value)
885 && std::get<2>(Val) == ELF::STT_NOTYPE;
886 });
887 if (Result != Symbols->end()) {
888 auto *Sym = Ctx.getOrCreateSymbol(std::get<1>(*Result));
889 const auto *Add = MCSymbolRefExpr::create(Sym, Ctx);
890 Inst.addOperand(MCOperand::createExpr(Add));
891 return true;
892 }
893 return false;
894}
895
Matt Arsenault92b355b2016-11-15 19:34:37 +0000896void AMDGPUSymbolizer::tryAddingPcLoadReferenceComment(raw_ostream &cStream,
897 int64_t Value,
898 uint64_t Address) {
899 llvm_unreachable("unimplemented");
900}
901
Sam Kolton3381d7a2016-10-06 13:46:08 +0000902//===----------------------------------------------------------------------===//
903// Initialization
904//===----------------------------------------------------------------------===//
905
906static MCSymbolizer *createAMDGPUSymbolizer(const Triple &/*TT*/,
907 LLVMOpInfoCallback /*GetOpInfo*/,
908 LLVMSymbolLookupCallback /*SymbolLookUp*/,
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000909 void *DisInfo,
Sam Kolton3381d7a2016-10-06 13:46:08 +0000910 MCContext *Ctx,
911 std::unique_ptr<MCRelocationInfo> &&RelInfo) {
912 return new AMDGPUSymbolizer(*Ctx, std::move(RelInfo), DisInfo);
913}
914
Tom Stellarde1818af2016-02-18 03:42:32 +0000915static MCDisassembler *createAMDGPUDisassembler(const Target &T,
916 const MCSubtargetInfo &STI,
917 MCContext &Ctx) {
Matt Arsenaultcad7fa82017-12-13 21:07:51 +0000918 return new AMDGPUDisassembler(STI, Ctx, T.createMCInstrInfo());
Tom Stellarde1818af2016-02-18 03:42:32 +0000919}
920
921extern "C" void LLVMInitializeAMDGPUDisassembler() {
Mehdi Aminif42454b2016-10-09 23:00:34 +0000922 TargetRegistry::RegisterMCDisassembler(getTheGCNTarget(),
923 createAMDGPUDisassembler);
924 TargetRegistry::RegisterMCSymbolizer(getTheGCNTarget(),
925 createAMDGPUSymbolizer);
Tom Stellarde1818af2016-02-18 03:42:32 +0000926}