blob: 728f3522e3c06eaf1a1448e571e6ea2dd2784521 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000014#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000015#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000016#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000018#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000019#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000020#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000021#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000022#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000023#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000026#include "llvm/BinaryFormat/ELF.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000027#include "llvm/CodeGen/MachineValueType.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000028#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000032#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000033#include "llvm/MC/MCInstrInfo.h"
34#include "llvm/MC/MCParser/MCAsmLexer.h"
35#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000036#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000038#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039#include "llvm/MC/MCRegisterInfo.h"
40#include "llvm/MC/MCStreamer.h"
41#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000042#include "llvm/MC/MCSymbol.h"
43#include "llvm/Support/Casting.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000044#include "llvm/Support/ErrorHandling.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000045#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/SMLoc.h"
47#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000048#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000049#include <algorithm>
50#include <cassert>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <map>
55#include <memory>
56#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000057
Tom Stellard45bb48e2015-06-13 03:28:10 +000058using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000059using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000060
61namespace {
62
Sam Kolton1eeb11b2016-09-09 14:44:04 +000063class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000064
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000065enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
66
Sam Kolton1eeb11b2016-09-09 14:44:04 +000067//===----------------------------------------------------------------------===//
68// Operand
69//===----------------------------------------------------------------------===//
70
Tom Stellard45bb48e2015-06-13 03:28:10 +000071class AMDGPUOperand : public MCParsedAsmOperand {
72 enum KindTy {
73 Token,
74 Immediate,
75 Register,
76 Expression
77 } Kind;
78
79 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000080 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000081
82public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000083 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000084 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000085
Sam Kolton5f10a132016-05-06 11:31:17 +000086 typedef std::unique_ptr<AMDGPUOperand> Ptr;
87
Sam Kolton945231a2016-06-10 09:57:59 +000088 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000089 bool Abs = false;
90 bool Neg = false;
91 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000092
93 bool hasFPModifiers() const { return Abs || Neg; }
94 bool hasIntModifiers() const { return Sext; }
95 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
96
97 int64_t getFPModifiersOperand() const {
98 int64_t Operand = 0;
99 Operand |= Abs ? SISrcMods::ABS : 0;
100 Operand |= Neg ? SISrcMods::NEG : 0;
101 return Operand;
102 }
103
104 int64_t getIntModifiersOperand() const {
105 int64_t Operand = 0;
106 Operand |= Sext ? SISrcMods::SEXT : 0;
107 return Operand;
108 }
109
110 int64_t getModifiersOperand() const {
111 assert(!(hasFPModifiers() && hasIntModifiers())
112 && "fp and int modifiers should not be used simultaneously");
113 if (hasFPModifiers()) {
114 return getFPModifiersOperand();
115 } else if (hasIntModifiers()) {
116 return getIntModifiersOperand();
117 } else {
118 return 0;
119 }
120 }
121
122 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
123 };
124
Tom Stellard45bb48e2015-06-13 03:28:10 +0000125 enum ImmTy {
126 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000128 ImmTyOffen,
129 ImmTyIdxen,
130 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000132 ImmTyOffset0,
133 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGLC,
135 ImmTySLC,
136 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000137 ImmTyClampSI,
138 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000139 ImmTyDppCtrl,
140 ImmTyDppRowMask,
141 ImmTyDppBankMask,
142 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000143 ImmTySdwaDstSel,
144 ImmTySdwaSrc0Sel,
145 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000146 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000147 ImmTyDMask,
148 ImmTyUNorm,
149 ImmTyDA,
150 ImmTyR128,
151 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000152 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000153 ImmTyExpCompr,
154 ImmTyExpVM,
David Stuttard70e8bc12017-06-22 16:29:22 +0000155 ImmTyDFMT,
156 ImmTyNFMT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000157 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000158 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000159 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000160 ImmTyInterpSlot,
161 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000162 ImmTyAttrChan,
163 ImmTyOpSel,
164 ImmTyOpSelHi,
165 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000166 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000167 ImmTySwizzle,
168 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000169 };
170
171 struct TokOp {
172 const char *Data;
173 unsigned Length;
174 };
175
176 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000177 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000178 ImmTy Type;
179 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000180 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000181 };
182
183 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000184 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000185 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000186 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187 };
188
189 union {
190 TokOp Tok;
191 ImmOp Imm;
192 RegOp Reg;
193 const MCExpr *Expr;
194 };
195
Tom Stellard45bb48e2015-06-13 03:28:10 +0000196 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000197 if (Kind == Token)
198 return true;
199
200 if (Kind != Expression || !Expr)
201 return false;
202
203 // When parsing operands, we can't always tell if something was meant to be
204 // a token, like 'gds', or an expression that references a global variable.
205 // In this case, we assume the string is an expression, and if we need to
206 // interpret is a token, then we treat the symbol name as the token.
207 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000208 }
209
210 bool isImm() const override {
211 return Kind == Immediate;
212 }
213
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000214 bool isInlinableImm(MVT type) const;
215 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000216
Tom Stellard45bb48e2015-06-13 03:28:10 +0000217 bool isRegKind() const {
218 return Kind == Register;
219 }
220
221 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000222 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000223 }
224
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000225 bool isRegOrImmWithInputMods(MVT type) const {
226 return isRegKind() || isInlinableImm(type);
227 }
228
Matt Arsenault4bd72362016-12-10 00:39:12 +0000229 bool isRegOrImmWithInt16InputMods() const {
230 return isRegOrImmWithInputMods(MVT::i16);
231 }
232
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000233 bool isRegOrImmWithInt32InputMods() const {
234 return isRegOrImmWithInputMods(MVT::i32);
235 }
236
237 bool isRegOrImmWithInt64InputMods() const {
238 return isRegOrImmWithInputMods(MVT::i64);
239 }
240
Matt Arsenault4bd72362016-12-10 00:39:12 +0000241 bool isRegOrImmWithFP16InputMods() const {
242 return isRegOrImmWithInputMods(MVT::f16);
243 }
244
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000245 bool isRegOrImmWithFP32InputMods() const {
246 return isRegOrImmWithInputMods(MVT::f32);
247 }
248
249 bool isRegOrImmWithFP64InputMods() const {
250 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000251 }
252
Sam Kolton9772eb32017-01-11 11:46:30 +0000253 bool isVReg() const {
254 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
255 isRegClass(AMDGPU::VReg_64RegClassID) ||
256 isRegClass(AMDGPU::VReg_96RegClassID) ||
257 isRegClass(AMDGPU::VReg_128RegClassID) ||
258 isRegClass(AMDGPU::VReg_256RegClassID) ||
259 isRegClass(AMDGPU::VReg_512RegClassID);
260 }
261
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000262 bool isVReg32OrOff() const {
263 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
264 }
265
Sam Kolton549c89d2017-06-21 08:53:38 +0000266 bool isSDWARegKind() const;
267
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000268 bool isImmTy(ImmTy ImmT) const {
269 return isImm() && Imm.Type == ImmT;
270 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000271
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000272 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000273 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000274 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000275
Sam Kolton945231a2016-06-10 09:57:59 +0000276 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
277 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
278 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000279 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
280 bool isDA() const { return isImmTy(ImmTyDA); }
281 bool isR128() const { return isImmTy(ImmTyUNorm); }
282 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000283 bool isOff() const { return isImmTy(ImmTyOff); }
284 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000285 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
286 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000287 bool isOffen() const { return isImmTy(ImmTyOffen); }
288 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
289 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
290 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
291 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
292 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000293
294 bool isOffsetU12() const { return isImmTy(ImmTyOffset) && isUInt<12>(getImm()); }
295 bool isOffsetS13() const { return isImmTy(ImmTyOffset) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000296 bool isGDS() const { return isImmTy(ImmTyGDS); }
297 bool isGLC() const { return isImmTy(ImmTyGLC); }
298 bool isSLC() const { return isImmTy(ImmTySLC); }
299 bool isTFE() const { return isImmTy(ImmTyTFE); }
David Stuttard70e8bc12017-06-22 16:29:22 +0000300 bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); }
301 bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000302 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
303 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
304 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
305 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
306 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
307 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
308 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000309 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
310 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
311 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000312 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
313 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
314 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
315 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000316 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000317
Sam Kolton945231a2016-06-10 09:57:59 +0000318 bool isMod() const {
319 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000320 }
321
322 bool isRegOrImm() const {
323 return isReg() || isImm();
324 }
325
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000326 bool isRegClass(unsigned RCID) const;
327
Sam Kolton9772eb32017-01-11 11:46:30 +0000328 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
329 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
330 }
331
Matt Arsenault4bd72362016-12-10 00:39:12 +0000332 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000333 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000334 }
335
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000336 bool isSCSrcV2B16() const {
337 return isSCSrcB16();
338 }
339
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000340 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000341 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000342 }
343
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000344 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000345 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000346 }
347
Matt Arsenault4bd72362016-12-10 00:39:12 +0000348 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000349 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000350 }
351
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000352 bool isSCSrcV2F16() const {
353 return isSCSrcF16();
354 }
355
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000356 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000357 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000358 }
359
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000360 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000361 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000362 }
363
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000364 bool isSSrcB32() const {
365 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
366 }
367
Matt Arsenault4bd72362016-12-10 00:39:12 +0000368 bool isSSrcB16() const {
369 return isSCSrcB16() || isLiteralImm(MVT::i16);
370 }
371
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000372 bool isSSrcV2B16() const {
373 llvm_unreachable("cannot happen");
374 return isSSrcB16();
375 }
376
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000377 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000378 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
379 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000380 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000381 }
382
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000383 bool isSSrcF32() const {
384 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385 }
386
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000387 bool isSSrcF64() const {
388 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000389 }
390
Matt Arsenault4bd72362016-12-10 00:39:12 +0000391 bool isSSrcF16() const {
392 return isSCSrcB16() || isLiteralImm(MVT::f16);
393 }
394
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000395 bool isSSrcV2F16() const {
396 llvm_unreachable("cannot happen");
397 return isSSrcF16();
398 }
399
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000400 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000401 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000402 }
403
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000404 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000405 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000406 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000407
Matt Arsenault4bd72362016-12-10 00:39:12 +0000408 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000409 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000410 }
411
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000412 bool isVCSrcV2B16() const {
413 return isVCSrcB16();
414 }
415
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000416 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000417 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000418 }
419
420 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000421 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000422 }
423
Matt Arsenault4bd72362016-12-10 00:39:12 +0000424 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000425 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000426 }
427
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000428 bool isVCSrcV2F16() const {
429 return isVCSrcF16();
430 }
431
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000432 bool isVSrcB32() const {
433 return isVCSrcF32() || isLiteralImm(MVT::i32);
434 }
435
436 bool isVSrcB64() const {
437 return isVCSrcF64() || isLiteralImm(MVT::i64);
438 }
439
Matt Arsenault4bd72362016-12-10 00:39:12 +0000440 bool isVSrcB16() const {
441 return isVCSrcF16() || isLiteralImm(MVT::i16);
442 }
443
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000444 bool isVSrcV2B16() const {
445 llvm_unreachable("cannot happen");
446 return isVSrcB16();
447 }
448
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000449 bool isVSrcF32() const {
450 return isVCSrcF32() || isLiteralImm(MVT::f32);
451 }
452
453 bool isVSrcF64() const {
454 return isVCSrcF64() || isLiteralImm(MVT::f64);
455 }
456
Matt Arsenault4bd72362016-12-10 00:39:12 +0000457 bool isVSrcF16() const {
458 return isVCSrcF16() || isLiteralImm(MVT::f16);
459 }
460
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000461 bool isVSrcV2F16() const {
462 llvm_unreachable("cannot happen");
463 return isVSrcF16();
464 }
465
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000466 bool isKImmFP32() const {
467 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000468 }
469
Matt Arsenault4bd72362016-12-10 00:39:12 +0000470 bool isKImmFP16() const {
471 return isLiteralImm(MVT::f16);
472 }
473
Tom Stellard45bb48e2015-06-13 03:28:10 +0000474 bool isMem() const override {
475 return false;
476 }
477
478 bool isExpr() const {
479 return Kind == Expression;
480 }
481
482 bool isSoppBrTarget() const {
483 return isExpr() || isImm();
484 }
485
Sam Kolton945231a2016-06-10 09:57:59 +0000486 bool isSWaitCnt() const;
487 bool isHwreg() const;
488 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000489 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000490 bool isSMRDOffset8() const;
491 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000492 bool isSMRDLiteralOffset() const;
493 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000494 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000495 bool isS16Imm() const;
496 bool isU16Imm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000497
Tom Stellard89049702016-06-15 02:54:14 +0000498 StringRef getExpressionAsToken() const {
499 assert(isExpr());
500 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
501 return S->getSymbol().getName();
502 }
503
Sam Kolton945231a2016-06-10 09:57:59 +0000504 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000505 assert(isToken());
506
507 if (Kind == Expression)
508 return getExpressionAsToken();
509
Sam Kolton945231a2016-06-10 09:57:59 +0000510 return StringRef(Tok.Data, Tok.Length);
511 }
512
513 int64_t getImm() const {
514 assert(isImm());
515 return Imm.Val;
516 }
517
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000518 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000519 assert(isImm());
520 return Imm.Type;
521 }
522
523 unsigned getReg() const override {
524 return Reg.RegNo;
525 }
526
Tom Stellard45bb48e2015-06-13 03:28:10 +0000527 SMLoc getStartLoc() const override {
528 return StartLoc;
529 }
530
Peter Collingbourne0da86302016-10-10 22:49:37 +0000531 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000532 return EndLoc;
533 }
534
Sam Kolton945231a2016-06-10 09:57:59 +0000535 Modifiers getModifiers() const {
536 assert(isRegKind() || isImmTy(ImmTyNone));
537 return isRegKind() ? Reg.Mods : Imm.Mods;
538 }
539
540 void setModifiers(Modifiers Mods) {
541 assert(isRegKind() || isImmTy(ImmTyNone));
542 if (isRegKind())
543 Reg.Mods = Mods;
544 else
545 Imm.Mods = Mods;
546 }
547
548 bool hasModifiers() const {
549 return getModifiers().hasModifiers();
550 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000551
Sam Kolton945231a2016-06-10 09:57:59 +0000552 bool hasFPModifiers() const {
553 return getModifiers().hasFPModifiers();
554 }
555
556 bool hasIntModifiers() const {
557 return getModifiers().hasIntModifiers();
558 }
559
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000560 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
561
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000562 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000563
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000564 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000565
Matt Arsenault4bd72362016-12-10 00:39:12 +0000566 template <unsigned Bitwidth>
567 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
568
569 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
570 addKImmFPOperands<16>(Inst, N);
571 }
572
573 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
574 addKImmFPOperands<32>(Inst, N);
575 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000576
577 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000578
579 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
580 if (isRegKind())
581 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000582 else if (isExpr())
583 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000584 else
585 addImmOperands(Inst, N);
586 }
587
588 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
589 Modifiers Mods = getModifiers();
590 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
591 if (isRegKind()) {
592 addRegOperands(Inst, N);
593 } else {
594 addImmOperands(Inst, N, false);
595 }
596 }
597
598 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
599 assert(!hasIntModifiers());
600 addRegOrImmWithInputModsOperands(Inst, N);
601 }
602
603 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
604 assert(!hasFPModifiers());
605 addRegOrImmWithInputModsOperands(Inst, N);
606 }
607
Sam Kolton9772eb32017-01-11 11:46:30 +0000608 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
609 Modifiers Mods = getModifiers();
610 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
611 assert(isRegKind());
612 addRegOperands(Inst, N);
613 }
614
615 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
616 assert(!hasIntModifiers());
617 addRegWithInputModsOperands(Inst, N);
618 }
619
620 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
621 assert(!hasFPModifiers());
622 addRegWithInputModsOperands(Inst, N);
623 }
624
Sam Kolton945231a2016-06-10 09:57:59 +0000625 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
626 if (isImm())
627 addImmOperands(Inst, N);
628 else {
629 assert(isExpr());
630 Inst.addOperand(MCOperand::createExpr(Expr));
631 }
632 }
633
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000634 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000635 switch (Type) {
636 case ImmTyNone: OS << "None"; break;
637 case ImmTyGDS: OS << "GDS"; break;
638 case ImmTyOffen: OS << "Offen"; break;
639 case ImmTyIdxen: OS << "Idxen"; break;
640 case ImmTyAddr64: OS << "Addr64"; break;
641 case ImmTyOffset: OS << "Offset"; break;
642 case ImmTyOffset0: OS << "Offset0"; break;
643 case ImmTyOffset1: OS << "Offset1"; break;
644 case ImmTyGLC: OS << "GLC"; break;
645 case ImmTySLC: OS << "SLC"; break;
646 case ImmTyTFE: OS << "TFE"; break;
David Stuttard70e8bc12017-06-22 16:29:22 +0000647 case ImmTyDFMT: OS << "DFMT"; break;
648 case ImmTyNFMT: OS << "NFMT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000649 case ImmTyClampSI: OS << "ClampSI"; break;
650 case ImmTyOModSI: OS << "OModSI"; break;
651 case ImmTyDppCtrl: OS << "DppCtrl"; break;
652 case ImmTyDppRowMask: OS << "DppRowMask"; break;
653 case ImmTyDppBankMask: OS << "DppBankMask"; break;
654 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000655 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
656 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
657 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000658 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
659 case ImmTyDMask: OS << "DMask"; break;
660 case ImmTyUNorm: OS << "UNorm"; break;
661 case ImmTyDA: OS << "DA"; break;
662 case ImmTyR128: OS << "R128"; break;
663 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000664 case ImmTyOff: OS << "Off"; break;
665 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000666 case ImmTyExpCompr: OS << "ExpCompr"; break;
667 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000668 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000669 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000670 case ImmTyInterpSlot: OS << "InterpSlot"; break;
671 case ImmTyInterpAttr: OS << "InterpAttr"; break;
672 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000673 case ImmTyOpSel: OS << "OpSel"; break;
674 case ImmTyOpSelHi: OS << "OpSelHi"; break;
675 case ImmTyNegLo: OS << "NegLo"; break;
676 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000677 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000678 case ImmTyHigh: OS << "High"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000679 }
680 }
681
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000682 void print(raw_ostream &OS) const override {
683 switch (Kind) {
684 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000685 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000686 break;
687 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000688 OS << '<' << getImm();
689 if (getImmTy() != ImmTyNone) {
690 OS << " type: "; printImmTy(OS, getImmTy());
691 }
Sam Kolton945231a2016-06-10 09:57:59 +0000692 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000693 break;
694 case Token:
695 OS << '\'' << getToken() << '\'';
696 break;
697 case Expression:
698 OS << "<expr " << *Expr << '>';
699 break;
700 }
701 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000702
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000703 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
704 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000705 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000706 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000707 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000708 Op->Imm.Val = Val;
709 Op->Imm.IsFPImm = IsFPImm;
710 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000711 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000712 Op->StartLoc = Loc;
713 Op->EndLoc = Loc;
714 return Op;
715 }
716
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000717 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
718 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000719 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000720 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000721 Res->Tok.Data = Str.data();
722 Res->Tok.Length = Str.size();
723 Res->StartLoc = Loc;
724 Res->EndLoc = Loc;
725 return Res;
726 }
727
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000728 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
729 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000730 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000731 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000732 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000733 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000734 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000735 Op->Reg.IsForcedVOP3 = ForceVOP3;
736 Op->StartLoc = S;
737 Op->EndLoc = E;
738 return Op;
739 }
740
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000741 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
742 const class MCExpr *Expr, SMLoc S) {
743 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000744 Op->Expr = Expr;
745 Op->StartLoc = S;
746 Op->EndLoc = S;
747 return Op;
748 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000749};
750
Sam Kolton945231a2016-06-10 09:57:59 +0000751raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
752 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
753 return OS;
754}
755
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000756//===----------------------------------------------------------------------===//
757// AsmParser
758//===----------------------------------------------------------------------===//
759
Artem Tamazova01cce82016-12-27 16:00:11 +0000760// Holds info related to the current kernel, e.g. count of SGPRs used.
761// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
762// .amdgpu_hsa_kernel or at EOF.
763class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000764 int SgprIndexUnusedMin = -1;
765 int VgprIndexUnusedMin = -1;
766 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000767
768 void usesSgprAt(int i) {
769 if (i >= SgprIndexUnusedMin) {
770 SgprIndexUnusedMin = ++i;
771 if (Ctx) {
772 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
773 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
774 }
775 }
776 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000777
Artem Tamazova01cce82016-12-27 16:00:11 +0000778 void usesVgprAt(int i) {
779 if (i >= VgprIndexUnusedMin) {
780 VgprIndexUnusedMin = ++i;
781 if (Ctx) {
782 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
783 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
784 }
785 }
786 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000787
Artem Tamazova01cce82016-12-27 16:00:11 +0000788public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000789 KernelScopeInfo() = default;
790
Artem Tamazova01cce82016-12-27 16:00:11 +0000791 void initialize(MCContext &Context) {
792 Ctx = &Context;
793 usesSgprAt(SgprIndexUnusedMin = -1);
794 usesVgprAt(VgprIndexUnusedMin = -1);
795 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000796
Artem Tamazova01cce82016-12-27 16:00:11 +0000797 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
798 switch (RegKind) {
799 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
800 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
801 default: break;
802 }
803 }
804};
805
Tom Stellard45bb48e2015-06-13 03:28:10 +0000806class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000807 const MCInstrInfo &MII;
808 MCAsmParser &Parser;
809
Eugene Zelenko66203762017-01-21 00:53:49 +0000810 unsigned ForcedEncodingSize = 0;
811 bool ForcedDPP = false;
812 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000813 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000814
Tom Stellard45bb48e2015-06-13 03:28:10 +0000815 /// @name Auto-generated Match Functions
816 /// {
817
818#define GET_ASSEMBLER_HEADER
819#include "AMDGPUGenAsmMatcher.inc"
820
821 /// }
822
Tom Stellard347ac792015-06-26 21:15:07 +0000823private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000824 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Tom Stellard347ac792015-06-26 21:15:07 +0000825 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
826 bool ParseDirectiveHSACodeObjectVersion();
827 bool ParseDirectiveHSACodeObjectISA();
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +0000828 bool ParseDirectiveCodeObjectMetadata();
Tom Stellardff7416b2015-06-26 21:58:31 +0000829 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
830 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000831 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000832 bool ParseDirectiveAMDGPUHsaKernel();
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000833 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
834 RegisterKind RegKind, unsigned Reg1,
835 unsigned RegNum);
836 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
837 unsigned& RegNum, unsigned& RegWidth,
838 unsigned *DwordRegIndex);
839 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
840 bool IsAtomic, bool IsAtomicReturn);
841 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
842 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000843
Tom Stellard45bb48e2015-06-13 03:28:10 +0000844public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000845 enum AMDGPUMatchResultTy {
846 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
847 };
848
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000849 typedef std::map<AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
850
Akira Hatanakab11ef082015-11-14 06:35:56 +0000851 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000852 const MCInstrInfo &MII,
853 const MCTargetOptions &Options)
Eugene Zelenko66203762017-01-21 00:53:49 +0000854 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000855 MCAsmParserExtension::Initialize(Parser);
856
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000857 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000858 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000859 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000860 }
861
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000862 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000863
864 {
865 // TODO: make those pre-defined variables read-only.
866 // Currently there is none suitable machinery in the core llvm-mc for this.
867 // MCSymbol::isRedefinable is intended for another purpose, and
868 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000869 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000870 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Artem Tamazov17091362016-06-14 15:03:59 +0000871 MCContext &Ctx = getContext();
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000872 MCSymbol *Sym =
873 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
874 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000875 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000876 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000877 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000878 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000879 }
Artem Tamazova01cce82016-12-27 16:00:11 +0000880 KernelScope.initialize(getContext());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000881 }
882
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000883 bool isSI() const {
884 return AMDGPU::isSI(getSTI());
885 }
886
887 bool isCI() const {
888 return AMDGPU::isCI(getSTI());
889 }
890
891 bool isVI() const {
892 return AMDGPU::isVI(getSTI());
893 }
894
Sam Koltonf7659d712017-05-23 10:08:55 +0000895 bool isGFX9() const {
896 return AMDGPU::isGFX9(getSTI());
897 }
898
Matt Arsenault26faed32016-12-05 22:26:17 +0000899 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000900 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000901 }
902
Matt Arsenaultfd023142017-06-12 15:55:58 +0000903 bool hasFlatOffsets() const {
904 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
905 }
906
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000907 bool hasSGPR102_SGPR103() const {
908 return !isVI();
909 }
910
Tom Stellard347ac792015-06-26 21:15:07 +0000911 AMDGPUTargetStreamer &getTargetStreamer() {
912 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
913 return static_cast<AMDGPUTargetStreamer &>(TS);
914 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000915
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000916 const MCRegisterInfo *getMRI() const {
917 // We need this const_cast because for some reason getContext() is not const
918 // in MCAsmParser.
919 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
920 }
921
922 const MCInstrInfo *getMII() const {
923 return &MII;
924 }
925
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000926 const FeatureBitset &getFeatureBits() const {
927 return getSTI().getFeatureBits();
928 }
929
Sam Kolton05ef1c92016-06-03 10:27:37 +0000930 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
931 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
932 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000933
Sam Kolton05ef1c92016-06-03 10:27:37 +0000934 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
935 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
936 bool isForcedDPP() const { return ForcedDPP; }
937 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +0000938 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000939
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000940 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000941 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
942 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000943 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
944 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000945 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
946 OperandVector &Operands, MCStreamer &Out,
947 uint64_t &ErrorInfo,
948 bool MatchingInlineAsm) override;
949 bool ParseDirective(AsmToken DirectiveID) override;
950 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000951 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000952 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
953 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000954 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000955
Sam Kolton11de3702016-05-24 12:38:33 +0000956 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000957
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000958 OperandMatchResultTy
959 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000960 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000961 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000962
963 OperandMatchResultTy parseOperandArrayWithPrefix(
964 const char *Prefix,
965 OperandVector &Operands,
966 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
967 bool (*ConvertResult)(int64_t&) = nullptr);
968
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000969 OperandMatchResultTy
970 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000971 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000972 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
973 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000974
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +0000975 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
976 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +0000977 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +0000978 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +0000979 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
980 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
981 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
982 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000983 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000984
Tom Stellard45bb48e2015-06-13 03:28:10 +0000985 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +0000986 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
987 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000988 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000989
990 bool parseCnt(int64_t &IntVal);
991 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000992 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000993
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000994private:
995 struct OperandInfoTy {
996 int64_t Id;
997 bool IsSymbolic;
998 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
999 };
Sam Kolton11de3702016-05-24 12:38:33 +00001000
Artem Tamazov6edc1352016-05-26 17:00:33 +00001001 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1002 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001003
1004 void errorExpTgt();
1005 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1006
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001007 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1008 bool validateConstantBusLimitations(const MCInst &Inst);
1009 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001010 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1011 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1012 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001013
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001014 bool trySkipId(const StringRef Id);
1015 bool trySkipToken(const AsmToken::TokenKind Kind);
1016 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1017 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1018 bool parseExpr(int64_t &Imm);
1019
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001020public:
Sam Kolton11de3702016-05-24 12:38:33 +00001021 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
1022
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001023 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001024 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001025 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1026 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001027 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1028
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001029 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1030 const unsigned MinVal,
1031 const unsigned MaxVal,
1032 const StringRef ErrMsg);
1033 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1034 bool parseSwizzleOffset(int64_t &Imm);
1035 bool parseSwizzleMacro(int64_t &Imm);
1036 bool parseSwizzleQuadPerm(int64_t &Imm);
1037 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1038 bool parseSwizzleBroadcast(int64_t &Imm);
1039 bool parseSwizzleSwap(int64_t &Imm);
1040 bool parseSwizzleReverse(int64_t &Imm);
1041
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001042 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1043 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1044 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001045 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1046
Sam Kolton5f10a132016-05-06 11:31:17 +00001047 AMDGPUOperand::Ptr defaultGLC() const;
1048 AMDGPUOperand::Ptr defaultSLC() const;
1049 AMDGPUOperand::Ptr defaultTFE() const;
1050
Sam Kolton5f10a132016-05-06 11:31:17 +00001051 AMDGPUOperand::Ptr defaultDMask() const;
1052 AMDGPUOperand::Ptr defaultUNorm() const;
1053 AMDGPUOperand::Ptr defaultDA() const;
1054 AMDGPUOperand::Ptr defaultR128() const;
1055 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +00001056 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1057 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001058 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001059 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001060 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001061
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001062 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1063
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001064 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1065 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001066 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001067 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001068 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001069
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001070 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1071
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001072 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1073 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001074 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001075
Sam Kolton11de3702016-05-24 12:38:33 +00001076 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001077 AMDGPUOperand::Ptr defaultRowMask() const;
1078 AMDGPUOperand::Ptr defaultBankMask() const;
1079 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1080 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001081
Sam Kolton05ef1c92016-06-03 10:27:37 +00001082 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1083 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001084 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001085 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1086 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001087 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001088 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1089 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001090 uint64_t BasicInstType, bool skipVcc = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001091};
1092
1093struct OptionalOperand {
1094 const char *Name;
1095 AMDGPUOperand::ImmTy Type;
1096 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001097 bool (*ConvertResult)(int64_t&);
1098};
1099
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001100} // end anonymous namespace
1101
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001102// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001103static const fltSemantics *getFltSemantics(unsigned Size) {
1104 switch (Size) {
1105 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001106 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001107 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001108 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001109 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001110 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001111 default:
1112 llvm_unreachable("unsupported fp type");
1113 }
1114}
1115
Matt Arsenault4bd72362016-12-10 00:39:12 +00001116static const fltSemantics *getFltSemantics(MVT VT) {
1117 return getFltSemantics(VT.getSizeInBits() / 8);
1118}
1119
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001120static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1121 switch (OperandType) {
1122 case AMDGPU::OPERAND_REG_IMM_INT32:
1123 case AMDGPU::OPERAND_REG_IMM_FP32:
1124 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1125 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1126 return &APFloat::IEEEsingle();
1127 case AMDGPU::OPERAND_REG_IMM_INT64:
1128 case AMDGPU::OPERAND_REG_IMM_FP64:
1129 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1130 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1131 return &APFloat::IEEEdouble();
1132 case AMDGPU::OPERAND_REG_IMM_INT16:
1133 case AMDGPU::OPERAND_REG_IMM_FP16:
1134 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1135 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1136 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1137 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1138 return &APFloat::IEEEhalf();
1139 default:
1140 llvm_unreachable("unsupported fp type");
1141 }
1142}
1143
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001144//===----------------------------------------------------------------------===//
1145// Operand
1146//===----------------------------------------------------------------------===//
1147
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001148static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1149 bool Lost;
1150
1151 // Convert literal to single precision
1152 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1153 APFloat::rmNearestTiesToEven,
1154 &Lost);
1155 // We allow precision lost but not overflow or underflow
1156 if (Status != APFloat::opOK &&
1157 Lost &&
1158 ((Status & APFloat::opOverflow) != 0 ||
1159 (Status & APFloat::opUnderflow) != 0)) {
1160 return false;
1161 }
1162
1163 return true;
1164}
1165
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001166bool AMDGPUOperand::isInlinableImm(MVT type) const {
1167 if (!isImmTy(ImmTyNone)) {
1168 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1169 return false;
1170 }
1171 // TODO: We should avoid using host float here. It would be better to
1172 // check the float bit values which is what a few other places do.
1173 // We've had bot failures before due to weird NaN support on mips hosts.
1174
1175 APInt Literal(64, Imm.Val);
1176
1177 if (Imm.IsFPImm) { // We got fp literal token
1178 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001179 return AMDGPU::isInlinableLiteral64(Imm.Val,
1180 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001181 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001182
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001183 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001184 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1185 return false;
1186
Sam Kolton9dffada2017-01-17 15:26:02 +00001187 if (type.getScalarSizeInBits() == 16) {
1188 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001189 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001190 AsmParser->hasInv2PiInlineImm());
1191 }
1192
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001193 // Check if single precision literal is inlinable
1194 return AMDGPU::isInlinableLiteral32(
1195 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001196 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001197 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001198
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001199 // We got int literal token.
1200 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001201 return AMDGPU::isInlinableLiteral64(Imm.Val,
1202 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001203 }
1204
Matt Arsenault4bd72362016-12-10 00:39:12 +00001205 if (type.getScalarSizeInBits() == 16) {
1206 return AMDGPU::isInlinableLiteral16(
1207 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1208 AsmParser->hasInv2PiInlineImm());
1209 }
1210
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001211 return AMDGPU::isInlinableLiteral32(
1212 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001213 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001214}
1215
1216bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001217 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001218 if (!isImmTy(ImmTyNone)) {
1219 return false;
1220 }
1221
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001222 if (!Imm.IsFPImm) {
1223 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001224
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001225 if (type == MVT::f64 && hasFPModifiers()) {
1226 // Cannot apply fp modifiers to int literals preserving the same semantics
1227 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1228 // disable these cases.
1229 return false;
1230 }
1231
Matt Arsenault4bd72362016-12-10 00:39:12 +00001232 unsigned Size = type.getSizeInBits();
1233 if (Size == 64)
1234 Size = 32;
1235
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001236 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1237 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001238 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001239 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001240
1241 // We got fp literal token
1242 if (type == MVT::f64) { // Expected 64-bit fp operand
1243 // We would set low 64-bits of literal to zeroes but we accept this literals
1244 return true;
1245 }
1246
1247 if (type == MVT::i64) { // Expected 64-bit int operand
1248 // We don't allow fp literals in 64-bit integer instructions. It is
1249 // unclear how we should encode them.
1250 return false;
1251 }
1252
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001253 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001254 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001255}
1256
1257bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001258 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001259}
1260
Sam Kolton549c89d2017-06-21 08:53:38 +00001261bool AMDGPUOperand::isSDWARegKind() const {
1262 if (AsmParser->isVI())
1263 return isVReg();
1264 else if (AsmParser->isGFX9())
1265 return isRegKind();
1266 else
1267 return false;
1268}
1269
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001270uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1271{
1272 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1273 assert(Size == 2 || Size == 4 || Size == 8);
1274
1275 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1276
1277 if (Imm.Mods.Abs) {
1278 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001279 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001280 if (Imm.Mods.Neg) {
1281 Val ^= FpSignMask;
1282 }
1283
1284 return Val;
1285}
1286
1287void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001288
Matt Arsenault4bd72362016-12-10 00:39:12 +00001289 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1290 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001291 addLiteralImmOperand(Inst, Imm.Val,
1292 ApplyModifiers &
1293 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001294 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001295 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1296 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001297 }
1298}
1299
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001300void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001301 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1302 auto OpNum = Inst.getNumOperands();
1303 // Check that this operand accepts literals
1304 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1305
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001306 if (ApplyModifiers) {
1307 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1308 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1309 Val = applyInputFPModifiers(Val, Size);
1310 }
1311
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001312 APInt Literal(64, Val);
1313 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001314
1315 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001316 switch (OpTy) {
1317 case AMDGPU::OPERAND_REG_IMM_INT64:
1318 case AMDGPU::OPERAND_REG_IMM_FP64:
1319 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1320 case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
Matt Arsenault26faed32016-12-05 22:26:17 +00001321 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1322 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001323 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001324 return;
1325 }
1326
1327 // Non-inlineable
1328 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001329 // For fp operands we check if low 32 bits are zeros
1330 if (Literal.getLoBits(32) != 0) {
1331 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001332 "Can't encode literal as exact 64-bit floating-point operand. "
1333 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001334 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001335
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001336 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001337 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001338 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001339
1340 // We don't allow fp literals in 64-bit integer instructions. It is
1341 // unclear how we should encode them. This case should be checked earlier
1342 // in predicate methods (isLiteralImm())
1343 llvm_unreachable("fp literal in 64-bit integer instruction.");
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001344 }
1345 case AMDGPU::OPERAND_REG_IMM_INT32:
1346 case AMDGPU::OPERAND_REG_IMM_FP32:
1347 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1348 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1349 case AMDGPU::OPERAND_REG_IMM_INT16:
1350 case AMDGPU::OPERAND_REG_IMM_FP16:
1351 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1352 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1353 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1354 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001355 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001356 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001357 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001358 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001359 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001360 // We allow precision lost but not overflow or underflow. This should be
1361 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001362
1363 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1364 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1365 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1366 ImmVal |= (ImmVal << 16);
1367 }
1368
1369 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001370 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001371 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001372 default:
1373 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001374 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001375
1376 return;
1377 }
1378
1379 // We got int literal token.
1380 // Only sign extend inline immediates.
1381 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001382 switch (OpTy) {
1383 case AMDGPU::OPERAND_REG_IMM_INT32:
1384 case AMDGPU::OPERAND_REG_IMM_FP32:
1385 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1386 case AMDGPU::OPERAND_REG_INLINE_C_FP32: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001387 if (isInt<32>(Val) &&
1388 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1389 AsmParser->hasInv2PiInlineImm())) {
1390 Inst.addOperand(MCOperand::createImm(Val));
1391 return;
1392 }
1393
1394 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1395 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001396 }
1397 case AMDGPU::OPERAND_REG_IMM_INT64:
1398 case AMDGPU::OPERAND_REG_IMM_FP64:
1399 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1400 case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
1401 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001402 Inst.addOperand(MCOperand::createImm(Val));
1403 return;
1404 }
1405
1406 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1407 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001408 }
1409 case AMDGPU::OPERAND_REG_IMM_INT16:
1410 case AMDGPU::OPERAND_REG_IMM_FP16:
1411 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1412 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001413 if (isInt<16>(Val) &&
1414 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1415 AsmParser->hasInv2PiInlineImm())) {
1416 Inst.addOperand(MCOperand::createImm(Val));
1417 return;
1418 }
1419
1420 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1421 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001422 }
1423 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1424 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1425 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1426 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1427 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001428
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001429 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1430 static_cast<uint32_t>(LiteralVal);
1431 Inst.addOperand(MCOperand::createImm(ImmVal));
1432 return;
1433 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001434 default:
1435 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001436 }
1437}
1438
Matt Arsenault4bd72362016-12-10 00:39:12 +00001439template <unsigned Bitwidth>
1440void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001441 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001442
1443 if (!Imm.IsFPImm) {
1444 // We got int literal token.
1445 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1446 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001447 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001448
1449 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001450 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001451 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1452 APFloat::rmNearestTiesToEven, &Lost);
1453 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001454}
1455
1456void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1457 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1458}
1459
1460//===----------------------------------------------------------------------===//
1461// AsmParser
1462//===----------------------------------------------------------------------===//
1463
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001464static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1465 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001466 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001467 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001468 case 1: return AMDGPU::VGPR_32RegClassID;
1469 case 2: return AMDGPU::VReg_64RegClassID;
1470 case 3: return AMDGPU::VReg_96RegClassID;
1471 case 4: return AMDGPU::VReg_128RegClassID;
1472 case 8: return AMDGPU::VReg_256RegClassID;
1473 case 16: return AMDGPU::VReg_512RegClassID;
1474 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001475 } else if (Is == IS_TTMP) {
1476 switch (RegWidth) {
1477 default: return -1;
1478 case 1: return AMDGPU::TTMP_32RegClassID;
1479 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001480 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001481 }
1482 } else if (Is == IS_SGPR) {
1483 switch (RegWidth) {
1484 default: return -1;
1485 case 1: return AMDGPU::SGPR_32RegClassID;
1486 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001487 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001488 case 8: return AMDGPU::SReg_256RegClassID;
1489 case 16: return AMDGPU::SReg_512RegClassID;
1490 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001491 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001492 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001493}
1494
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001495static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001496 return StringSwitch<unsigned>(RegName)
1497 .Case("exec", AMDGPU::EXEC)
1498 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001499 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001500 .Case("m0", AMDGPU::M0)
1501 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001502 .Case("tba", AMDGPU::TBA)
1503 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001504 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1505 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001506 .Case("vcc_lo", AMDGPU::VCC_LO)
1507 .Case("vcc_hi", AMDGPU::VCC_HI)
1508 .Case("exec_lo", AMDGPU::EXEC_LO)
1509 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001510 .Case("tma_lo", AMDGPU::TMA_LO)
1511 .Case("tma_hi", AMDGPU::TMA_HI)
1512 .Case("tba_lo", AMDGPU::TBA_LO)
1513 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001514 .Default(0);
1515}
1516
Eugene Zelenko66203762017-01-21 00:53:49 +00001517bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1518 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001519 auto R = parseRegister();
1520 if (!R) return true;
1521 assert(R->isReg());
1522 RegNo = R->getReg();
1523 StartLoc = R->getStartLoc();
1524 EndLoc = R->getEndLoc();
1525 return false;
1526}
1527
Eugene Zelenko66203762017-01-21 00:53:49 +00001528bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1529 RegisterKind RegKind, unsigned Reg1,
1530 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001531 switch (RegKind) {
1532 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001533 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1534 Reg = AMDGPU::EXEC;
1535 RegWidth = 2;
1536 return true;
1537 }
1538 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1539 Reg = AMDGPU::FLAT_SCR;
1540 RegWidth = 2;
1541 return true;
1542 }
1543 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1544 Reg = AMDGPU::VCC;
1545 RegWidth = 2;
1546 return true;
1547 }
1548 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1549 Reg = AMDGPU::TBA;
1550 RegWidth = 2;
1551 return true;
1552 }
1553 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1554 Reg = AMDGPU::TMA;
1555 RegWidth = 2;
1556 return true;
1557 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001558 return false;
1559 case IS_VGPR:
1560 case IS_SGPR:
1561 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001562 if (Reg1 != Reg + RegWidth) {
1563 return false;
1564 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001565 RegWidth++;
1566 return true;
1567 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001568 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001569 }
1570}
1571
Eugene Zelenko66203762017-01-21 00:53:49 +00001572bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1573 unsigned &RegNum, unsigned &RegWidth,
1574 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001575 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001576 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1577 if (getLexer().is(AsmToken::Identifier)) {
1578 StringRef RegName = Parser.getTok().getString();
1579 if ((Reg = getSpecialRegForName(RegName))) {
1580 Parser.Lex();
1581 RegKind = IS_SPECIAL;
1582 } else {
1583 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001584 if (RegName[0] == 'v') {
1585 RegNumIndex = 1;
1586 RegKind = IS_VGPR;
1587 } else if (RegName[0] == 's') {
1588 RegNumIndex = 1;
1589 RegKind = IS_SGPR;
1590 } else if (RegName.startswith("ttmp")) {
1591 RegNumIndex = strlen("ttmp");
1592 RegKind = IS_TTMP;
1593 } else {
1594 return false;
1595 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001596 if (RegName.size() > RegNumIndex) {
1597 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001598 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1599 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001600 Parser.Lex();
1601 RegWidth = 1;
1602 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001603 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001604 Parser.Lex();
1605 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001606 if (getLexer().isNot(AsmToken::LBrac))
1607 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001608 Parser.Lex();
1609
Artem Tamazovf88397c2016-06-03 14:41:17 +00001610 if (getParser().parseAbsoluteExpression(RegLo))
1611 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001612
Artem Tamazov7da9b822016-05-27 12:50:13 +00001613 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001614 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1615 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001616 Parser.Lex();
1617
Artem Tamazov7da9b822016-05-27 12:50:13 +00001618 if (isRBrace) {
1619 RegHi = RegLo;
1620 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001621 if (getParser().parseAbsoluteExpression(RegHi))
1622 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001623
Artem Tamazovf88397c2016-06-03 14:41:17 +00001624 if (getLexer().isNot(AsmToken::RBrac))
1625 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001626 Parser.Lex();
1627 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001628 RegNum = (unsigned) RegLo;
1629 RegWidth = (RegHi - RegLo) + 1;
1630 }
1631 }
1632 } else if (getLexer().is(AsmToken::LBrac)) {
1633 // List of consecutive registers: [s0,s1,s2,s3]
1634 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001635 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001636 return false;
1637 if (RegWidth != 1)
1638 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001639 RegisterKind RegKind1;
1640 unsigned Reg1, RegNum1, RegWidth1;
1641 do {
1642 if (getLexer().is(AsmToken::Comma)) {
1643 Parser.Lex();
1644 } else if (getLexer().is(AsmToken::RBrac)) {
1645 Parser.Lex();
1646 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001647 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001648 if (RegWidth1 != 1) {
1649 return false;
1650 }
1651 if (RegKind1 != RegKind) {
1652 return false;
1653 }
1654 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1655 return false;
1656 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001657 } else {
1658 return false;
1659 }
1660 } while (true);
1661 } else {
1662 return false;
1663 }
1664 switch (RegKind) {
1665 case IS_SPECIAL:
1666 RegNum = 0;
1667 RegWidth = 1;
1668 break;
1669 case IS_VGPR:
1670 case IS_SGPR:
1671 case IS_TTMP:
1672 {
1673 unsigned Size = 1;
1674 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001675 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001676 Size = std::min(RegWidth, 4u);
1677 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001678 if (RegNum % Size != 0)
1679 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001680 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001681 RegNum = RegNum / Size;
1682 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001683 if (RCID == -1)
1684 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001685 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001686 if (RegNum >= RC.getNumRegs())
1687 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001688 Reg = RC.getRegister(RegNum);
1689 break;
1690 }
1691
1692 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001693 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001694 }
1695
Artem Tamazovf88397c2016-06-03 14:41:17 +00001696 if (!subtargetHasRegister(*TRI, Reg))
1697 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001698 return true;
1699}
1700
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001701std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001702 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001703 SMLoc StartLoc = Tok.getLoc();
1704 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001705 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001706 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001707
Artem Tamazova01cce82016-12-27 16:00:11 +00001708 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001709 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001710 }
Artem Tamazova01cce82016-12-27 16:00:11 +00001711 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001712 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001713}
1714
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001715bool
1716AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1717 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1718 (getLexer().getKind() == AsmToken::Integer ||
1719 getLexer().getKind() == AsmToken::Real)) {
1720
1721 // This is a workaround for handling operands like these:
1722 // |1.0|
1723 // |-1|
1724 // This syntax is not compatible with syntax of standard
1725 // MC expressions (due to the trailing '|').
1726
1727 SMLoc EndLoc;
1728 const MCExpr *Expr;
1729
1730 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1731 return true;
1732 }
1733
1734 return !Expr->evaluateAsAbsolute(Val);
1735 }
1736
1737 return getParser().parseAbsoluteExpression(Val);
1738}
1739
Alex Bradbury58eba092016-11-01 16:32:05 +00001740OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001741AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001742 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001743 bool Minus = false;
1744 if (getLexer().getKind() == AsmToken::Minus) {
1745 Minus = true;
1746 Parser.Lex();
1747 }
1748
1749 SMLoc S = Parser.getTok().getLoc();
1750 switch(getLexer().getKind()) {
1751 case AsmToken::Integer: {
1752 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001753 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001754 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001755 if (Minus)
1756 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001757 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001758 return MatchOperand_Success;
1759 }
1760 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001761 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001762 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001763 return MatchOperand_ParseFail;
1764
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001765 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001766 if (Minus)
1767 F.changeSign();
1768 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001769 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001770 AMDGPUOperand::ImmTyNone, true));
1771 return MatchOperand_Success;
1772 }
1773 default:
1774 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1775 }
1776}
1777
Alex Bradbury58eba092016-11-01 16:32:05 +00001778OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00001779AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001780 if (auto R = parseRegister()) {
1781 assert(R->isReg());
1782 R->Reg.IsForcedVOP3 = isForcedVOP3();
1783 Operands.push_back(std::move(R));
1784 return MatchOperand_Success;
1785 }
Sam Kolton9772eb32017-01-11 11:46:30 +00001786 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001787}
1788
Alex Bradbury58eba092016-11-01 16:32:05 +00001789OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001790AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1791 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00001792 if (res != MatchOperand_NoMatch) {
1793 return res;
1794 }
1795
1796 return parseReg(Operands);
1797}
1798
1799OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001800AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1801 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001802 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001803
1804 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001805 const AsmToken NextToken = getLexer().peekTok();
1806
1807 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1808 if (NextToken.is(AsmToken::Minus)) {
1809 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1810 return MatchOperand_ParseFail;
1811 }
1812
1813 // '-' followed by an integer literal N should be interpreted as integer
1814 // negation rather than a floating-point NEG modifier applied to N.
1815 // Beside being contr-intuitive, such use of floating-point NEG modifier
1816 // results in different meaning of integer literals used with VOP1/2/C
1817 // and VOP3, for example:
1818 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1819 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1820 // Negative fp literals should be handled likewise for unifomtity
1821 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1822 Parser.Lex();
1823 Negate = true;
1824 }
1825 }
1826
1827 if (getLexer().getKind() == AsmToken::Identifier &&
1828 Parser.getTok().getString() == "neg") {
1829 if (Negate) {
1830 Error(Parser.getTok().getLoc(), "expected register or immediate");
1831 return MatchOperand_ParseFail;
1832 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001833 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001834 Negate2 = true;
1835 if (getLexer().isNot(AsmToken::LParen)) {
1836 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1837 return MatchOperand_ParseFail;
1838 }
1839 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001840 }
1841
Eugene Zelenko66203762017-01-21 00:53:49 +00001842 if (getLexer().getKind() == AsmToken::Identifier &&
1843 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001844 Parser.Lex();
1845 Abs2 = true;
1846 if (getLexer().isNot(AsmToken::LParen)) {
1847 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1848 return MatchOperand_ParseFail;
1849 }
1850 Parser.Lex();
1851 }
1852
1853 if (getLexer().getKind() == AsmToken::Pipe) {
1854 if (Abs2) {
1855 Error(Parser.getTok().getLoc(), "expected register or immediate");
1856 return MatchOperand_ParseFail;
1857 }
1858 Parser.Lex();
1859 Abs = true;
1860 }
1861
Sam Kolton9772eb32017-01-11 11:46:30 +00001862 OperandMatchResultTy Res;
1863 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001864 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00001865 } else {
1866 Res = parseReg(Operands);
1867 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001868 if (Res != MatchOperand_Success) {
1869 return Res;
1870 }
1871
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001872 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001873 if (Abs) {
1874 if (getLexer().getKind() != AsmToken::Pipe) {
1875 Error(Parser.getTok().getLoc(), "expected vertical bar");
1876 return MatchOperand_ParseFail;
1877 }
1878 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001879 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001880 }
1881 if (Abs2) {
1882 if (getLexer().isNot(AsmToken::RParen)) {
1883 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1884 return MatchOperand_ParseFail;
1885 }
1886 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001887 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001888 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001889
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001890 if (Negate) {
1891 Mods.Neg = true;
1892 } else if (Negate2) {
1893 if (getLexer().isNot(AsmToken::RParen)) {
1894 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1895 return MatchOperand_ParseFail;
1896 }
1897 Parser.Lex();
1898 Mods.Neg = true;
1899 }
1900
Sam Kolton945231a2016-06-10 09:57:59 +00001901 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001902 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001903 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001904 }
1905 return MatchOperand_Success;
1906}
1907
Alex Bradbury58eba092016-11-01 16:32:05 +00001908OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001909AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1910 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00001911 bool Sext = false;
1912
Eugene Zelenko66203762017-01-21 00:53:49 +00001913 if (getLexer().getKind() == AsmToken::Identifier &&
1914 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00001915 Parser.Lex();
1916 Sext = true;
1917 if (getLexer().isNot(AsmToken::LParen)) {
1918 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1919 return MatchOperand_ParseFail;
1920 }
1921 Parser.Lex();
1922 }
1923
Sam Kolton9772eb32017-01-11 11:46:30 +00001924 OperandMatchResultTy Res;
1925 if (AllowImm) {
1926 Res = parseRegOrImm(Operands);
1927 } else {
1928 Res = parseReg(Operands);
1929 }
Sam Kolton945231a2016-06-10 09:57:59 +00001930 if (Res != MatchOperand_Success) {
1931 return Res;
1932 }
1933
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001934 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00001935 if (Sext) {
1936 if (getLexer().isNot(AsmToken::RParen)) {
1937 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1938 return MatchOperand_ParseFail;
1939 }
1940 Parser.Lex();
1941 Mods.Sext = true;
1942 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001943
Sam Kolton945231a2016-06-10 09:57:59 +00001944 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001945 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001946 Op.setModifiers(Mods);
1947 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001948
Sam Kolton945231a2016-06-10 09:57:59 +00001949 return MatchOperand_Success;
1950}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001951
Sam Kolton9772eb32017-01-11 11:46:30 +00001952OperandMatchResultTy
1953AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
1954 return parseRegOrImmWithFPInputMods(Operands, false);
1955}
1956
1957OperandMatchResultTy
1958AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
1959 return parseRegOrImmWithIntInputMods(Operands, false);
1960}
1961
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001962OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1963 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1964 if (Reg) {
1965 Operands.push_back(std::move(Reg));
1966 return MatchOperand_Success;
1967 }
1968
1969 const AsmToken &Tok = Parser.getTok();
1970 if (Tok.getString() == "off") {
1971 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1972 AMDGPUOperand::ImmTyOff, false));
1973 Parser.Lex();
1974 return MatchOperand_Success;
1975 }
1976
1977 return MatchOperand_NoMatch;
1978}
1979
Tom Stellard45bb48e2015-06-13 03:28:10 +00001980unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001981 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1982
1983 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001984 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1985 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1986 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001987 return Match_InvalidOperand;
1988
Tom Stellard88e0b252015-10-06 15:57:53 +00001989 if ((TSFlags & SIInstrFlags::VOP3) &&
1990 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1991 getForcedEncodingSize() != 64)
1992 return Match_PreferE32;
1993
Sam Koltona568e3d2016-12-22 12:57:41 +00001994 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
1995 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001996 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001997 auto OpNum =
1998 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001999 const auto &Op = Inst.getOperand(OpNum);
2000 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2001 return Match_InvalidOperand;
2002 }
2003 }
2004
Matt Arsenaultfd023142017-06-12 15:55:58 +00002005 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2006 // FIXME: Produces error without correct column reported.
2007 auto OpNum =
2008 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2009 const auto &Op = Inst.getOperand(OpNum);
2010 if (Op.getImm() != 0)
2011 return Match_InvalidOperand;
2012 }
2013
Tom Stellard45bb48e2015-06-13 03:28:10 +00002014 return Match_Success;
2015}
2016
Matt Arsenault5f45e782017-01-09 18:44:11 +00002017// What asm variants we should check
2018ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2019 if (getForcedEncodingSize() == 32) {
2020 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2021 return makeArrayRef(Variants);
2022 }
2023
2024 if (isForcedVOP3()) {
2025 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2026 return makeArrayRef(Variants);
2027 }
2028
2029 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002030 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2031 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002032 return makeArrayRef(Variants);
2033 }
2034
2035 if (isForcedDPP()) {
2036 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2037 return makeArrayRef(Variants);
2038 }
2039
2040 static const unsigned Variants[] = {
2041 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002042 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002043 };
2044
2045 return makeArrayRef(Variants);
2046}
2047
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002048unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2049 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2050 const unsigned Num = Desc.getNumImplicitUses();
2051 for (unsigned i = 0; i < Num; ++i) {
2052 unsigned Reg = Desc.ImplicitUses[i];
2053 switch (Reg) {
2054 case AMDGPU::FLAT_SCR:
2055 case AMDGPU::VCC:
2056 case AMDGPU::M0:
2057 return Reg;
2058 default:
2059 break;
2060 }
2061 }
2062 return AMDGPU::NoRegister;
2063}
2064
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002065// NB: This code is correct only when used to check constant
2066// bus limitations because GFX7 support no f16 inline constants.
2067// Note that there are no cases when a GFX7 opcode violates
2068// constant bus limitations due to the use of an f16 constant.
2069bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2070 unsigned OpIdx) const {
2071 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2072
2073 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2074 return false;
2075 }
2076
2077 const MCOperand &MO = Inst.getOperand(OpIdx);
2078
2079 int64_t Val = MO.getImm();
2080 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2081
2082 switch (OpSize) { // expected operand size
2083 case 8:
2084 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2085 case 4:
2086 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2087 case 2: {
2088 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2089 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2090 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2091 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2092 } else {
2093 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2094 }
2095 }
2096 default:
2097 llvm_unreachable("invalid operand size");
2098 }
2099}
2100
2101bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2102 const MCOperand &MO = Inst.getOperand(OpIdx);
2103 if (MO.isImm()) {
2104 return !isInlineConstant(Inst, OpIdx);
2105 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002106 return !MO.isReg() ||
2107 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002108}
2109
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002110bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002111 const unsigned Opcode = Inst.getOpcode();
2112 const MCInstrDesc &Desc = MII.get(Opcode);
2113 unsigned ConstantBusUseCount = 0;
2114
2115 if (Desc.TSFlags &
2116 (SIInstrFlags::VOPC |
2117 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002118 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2119 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002120
2121 // Check special imm operands (used by madmk, etc)
2122 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2123 ++ConstantBusUseCount;
2124 }
2125
2126 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2127 if (SGPRUsed != AMDGPU::NoRegister) {
2128 ++ConstantBusUseCount;
2129 }
2130
2131 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2132 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2133 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2134
2135 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2136
2137 for (int OpIdx : OpIndices) {
2138 if (OpIdx == -1) break;
2139
2140 const MCOperand &MO = Inst.getOperand(OpIdx);
2141 if (usesConstantBus(Inst, OpIdx)) {
2142 if (MO.isReg()) {
2143 const unsigned Reg = mc2PseudoReg(MO.getReg());
2144 // Pairs of registers with a partial intersections like these
2145 // s0, s[0:1]
2146 // flat_scratch_lo, flat_scratch
2147 // flat_scratch_lo, flat_scratch_hi
2148 // are theoretically valid but they are disabled anyway.
2149 // Note that this code mimics SIInstrInfo::verifyInstruction
2150 if (Reg != SGPRUsed) {
2151 ++ConstantBusUseCount;
2152 }
2153 SGPRUsed = Reg;
2154 } else { // Expression or a literal
2155 ++ConstantBusUseCount;
2156 }
2157 }
2158 }
2159 }
2160
2161 return ConstantBusUseCount <= 1;
2162}
2163
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002164bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
2165
2166 const unsigned Opcode = Inst.getOpcode();
2167 const MCInstrDesc &Desc = MII.get(Opcode);
2168
2169 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2170 if (DstIdx == -1 ||
2171 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2172 return true;
2173 }
2174
2175 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2176
2177 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2178 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2179 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2180
2181 assert(DstIdx != -1);
2182 const MCOperand &Dst = Inst.getOperand(DstIdx);
2183 assert(Dst.isReg());
2184 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2185
2186 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2187
2188 for (int SrcIdx : SrcIndices) {
2189 if (SrcIdx == -1) break;
2190 const MCOperand &Src = Inst.getOperand(SrcIdx);
2191 if (Src.isReg()) {
2192 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2193 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2194 return false;
2195 }
2196 }
2197 }
2198
2199 return true;
2200}
2201
2202bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2203 const SMLoc &IDLoc) {
2204 if (!validateConstantBusLimitations(Inst)) {
2205 Error(IDLoc,
2206 "invalid operand (violates constant bus restrictions)");
2207 return false;
2208 }
2209 if (!validateEarlyClobberLimitations(Inst)) {
2210 Error(IDLoc,
2211 "destination must be different than all sources");
2212 return false;
2213 }
2214
2215 return true;
2216}
2217
Tom Stellard45bb48e2015-06-13 03:28:10 +00002218bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2219 OperandVector &Operands,
2220 MCStreamer &Out,
2221 uint64_t &ErrorInfo,
2222 bool MatchingInlineAsm) {
2223 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002224 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002225 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002226 uint64_t EI;
2227 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2228 Variant);
2229 // We order match statuses from least to most specific. We use most specific
2230 // status as resulting
2231 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2232 if ((R == Match_Success) ||
2233 (R == Match_PreferE32) ||
2234 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2235 (R == Match_InvalidOperand && Result != Match_MissingFeature
2236 && Result != Match_PreferE32) ||
2237 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2238 && Result != Match_MissingFeature
2239 && Result != Match_PreferE32)) {
2240 Result = R;
2241 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002242 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002243 if (R == Match_Success)
2244 break;
2245 }
2246
2247 switch (Result) {
2248 default: break;
2249 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002250 if (!validateInstruction(Inst, IDLoc)) {
2251 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002252 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002253 Inst.setLoc(IDLoc);
2254 Out.EmitInstruction(Inst, getSTI());
2255 return false;
2256
2257 case Match_MissingFeature:
2258 return Error(IDLoc, "instruction not supported on this GPU");
2259
2260 case Match_MnemonicFail:
2261 return Error(IDLoc, "unrecognized instruction mnemonic");
2262
2263 case Match_InvalidOperand: {
2264 SMLoc ErrorLoc = IDLoc;
2265 if (ErrorInfo != ~0ULL) {
2266 if (ErrorInfo >= Operands.size()) {
2267 return Error(IDLoc, "too few operands for instruction");
2268 }
2269 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2270 if (ErrorLoc == SMLoc())
2271 ErrorLoc = IDLoc;
2272 }
2273 return Error(ErrorLoc, "invalid operand for instruction");
2274 }
2275
2276 case Match_PreferE32:
2277 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2278 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002279 }
2280 llvm_unreachable("Implement any new match types added!");
2281}
2282
Artem Tamazov25478d82016-12-29 15:41:52 +00002283bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2284 int64_t Tmp = -1;
2285 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2286 return true;
2287 }
2288 if (getParser().parseAbsoluteExpression(Tmp)) {
2289 return true;
2290 }
2291 Ret = static_cast<uint32_t>(Tmp);
2292 return false;
2293}
2294
Tom Stellard347ac792015-06-26 21:15:07 +00002295bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2296 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002297 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002298 return TokError("invalid major version");
2299
Tom Stellard347ac792015-06-26 21:15:07 +00002300 if (getLexer().isNot(AsmToken::Comma))
2301 return TokError("minor version number required, comma expected");
2302 Lex();
2303
Artem Tamazov25478d82016-12-29 15:41:52 +00002304 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002305 return TokError("invalid minor version");
2306
Tom Stellard347ac792015-06-26 21:15:07 +00002307 return false;
2308}
2309
2310bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00002311 uint32_t Major;
2312 uint32_t Minor;
2313
2314 if (ParseDirectiveMajorMinor(Major, Minor))
2315 return true;
2316
2317 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2318 return false;
2319}
2320
2321bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00002322 uint32_t Major;
2323 uint32_t Minor;
2324 uint32_t Stepping;
2325 StringRef VendorName;
2326 StringRef ArchName;
2327
2328 // If this directive has no arguments, then use the ISA version for the
2329 // targeted GPU.
2330 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002331 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002332 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002333 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2334 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00002335 "AMD", "AMDGPU");
2336 return false;
2337 }
2338
Tom Stellard347ac792015-06-26 21:15:07 +00002339 if (ParseDirectiveMajorMinor(Major, Minor))
2340 return true;
2341
2342 if (getLexer().isNot(AsmToken::Comma))
2343 return TokError("stepping version number required, comma expected");
2344 Lex();
2345
Artem Tamazov25478d82016-12-29 15:41:52 +00002346 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00002347 return TokError("invalid stepping version");
2348
Tom Stellard347ac792015-06-26 21:15:07 +00002349 if (getLexer().isNot(AsmToken::Comma))
2350 return TokError("vendor name required, comma expected");
2351 Lex();
2352
2353 if (getLexer().isNot(AsmToken::String))
2354 return TokError("invalid vendor name");
2355
2356 VendorName = getLexer().getTok().getStringContents();
2357 Lex();
2358
2359 if (getLexer().isNot(AsmToken::Comma))
2360 return TokError("arch name required, comma expected");
2361 Lex();
2362
2363 if (getLexer().isNot(AsmToken::String))
2364 return TokError("invalid arch name");
2365
2366 ArchName = getLexer().getTok().getStringContents();
2367 Lex();
2368
2369 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2370 VendorName, ArchName);
2371 return false;
2372}
2373
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002374bool AMDGPUAsmParser::ParseDirectiveCodeObjectMetadata() {
2375 std::string YamlString;
2376 raw_string_ostream YamlStream(YamlString);
Sam Kolton69c8aa22016-12-19 11:43:15 +00002377
2378 getLexer().setSkipSpace(false);
2379
2380 bool FoundEnd = false;
2381 while (!getLexer().is(AsmToken::Eof)) {
2382 while (getLexer().is(AsmToken::Space)) {
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002383 YamlStream << getLexer().getTok().getString();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002384 Lex();
2385 }
2386
2387 if (getLexer().is(AsmToken::Identifier)) {
2388 StringRef ID = getLexer().getTok().getIdentifier();
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002389 if (ID == AMDGPU::CodeObject::MetadataAssemblerDirectiveEnd) {
Sam Kolton69c8aa22016-12-19 11:43:15 +00002390 Lex();
2391 FoundEnd = true;
2392 break;
2393 }
2394 }
2395
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002396 YamlStream << Parser.parseStringToEndOfStatement()
2397 << getContext().getAsmInfo()->getSeparatorString();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002398
2399 Parser.eatToEndOfStatement();
2400 }
2401
2402 getLexer().setSkipSpace(true);
2403
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002404 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
2405 return TokError(
2406 "expected directive .end_amdgpu_code_object_metadata not found");
2407 }
Sam Kolton69c8aa22016-12-19 11:43:15 +00002408
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002409 YamlStream.flush();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002410
Konstantin Zhuravlyov4cbb6892017-03-22 23:27:09 +00002411 if (!getTargetStreamer().EmitCodeObjectMetadata(YamlString))
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002412 return Error(getParser().getTok().getLoc(), "invalid code object metadata");
Sam Kolton69c8aa22016-12-19 11:43:15 +00002413
2414 return false;
2415}
2416
Tom Stellardff7416b2015-06-26 21:58:31 +00002417bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2418 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002419 SmallString<40> ErrStr;
2420 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00002421 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002422 return TokError(Err.str());
2423 }
Tom Stellardff7416b2015-06-26 21:58:31 +00002424 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00002425 return false;
2426}
2427
2428bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00002429 amd_kernel_code_t Header;
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002430 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00002431
2432 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00002433 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2434 // will set the current token to EndOfStatement.
2435 while(getLexer().is(AsmToken::EndOfStatement))
2436 Lex();
2437
2438 if (getLexer().isNot(AsmToken::Identifier))
2439 return TokError("expected value identifier or .end_amd_kernel_code_t");
2440
2441 StringRef ID = getLexer().getTok().getIdentifier();
2442 Lex();
2443
2444 if (ID == ".end_amd_kernel_code_t")
2445 break;
2446
2447 if (ParseAMDKernelCodeTValue(ID, Header))
2448 return true;
2449 }
2450
2451 getTargetStreamer().EmitAMDKernelCodeT(Header);
2452
2453 return false;
2454}
2455
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002456bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2457 if (getLexer().isNot(AsmToken::Identifier))
2458 return TokError("expected symbol name");
2459
2460 StringRef KernelName = Parser.getTok().getString();
2461
2462 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2463 ELF::STT_AMDGPU_HSA_KERNEL);
2464 Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00002465 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002466 return false;
2467}
2468
Tom Stellard45bb48e2015-06-13 03:28:10 +00002469bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00002470 StringRef IDVal = DirectiveID.getString();
2471
2472 if (IDVal == ".hsa_code_object_version")
2473 return ParseDirectiveHSACodeObjectVersion();
2474
2475 if (IDVal == ".hsa_code_object_isa")
2476 return ParseDirectiveHSACodeObjectISA();
2477
Konstantin Zhuravlyov7498cd62017-03-22 22:32:22 +00002478 if (IDVal == AMDGPU::CodeObject::MetadataAssemblerDirectiveBegin)
2479 return ParseDirectiveCodeObjectMetadata();
Sam Kolton69c8aa22016-12-19 11:43:15 +00002480
Tom Stellardff7416b2015-06-26 21:58:31 +00002481 if (IDVal == ".amd_kernel_code_t")
2482 return ParseDirectiveAMDKernelCodeT();
2483
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002484 if (IDVal == ".amdgpu_hsa_kernel")
2485 return ParseDirectiveAMDGPUHsaKernel();
2486
Tom Stellard45bb48e2015-06-13 03:28:10 +00002487 return true;
2488}
2489
Matt Arsenault68802d32015-11-05 03:11:27 +00002490bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2491 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00002492 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00002493 return true;
2494
Matt Arsenault3b159672015-12-01 20:31:08 +00002495 if (isSI()) {
2496 // No flat_scr
2497 switch (RegNo) {
2498 case AMDGPU::FLAT_SCR:
2499 case AMDGPU::FLAT_SCR_LO:
2500 case AMDGPU::FLAT_SCR_HI:
2501 return false;
2502 default:
2503 return true;
2504 }
2505 }
2506
Matt Arsenault68802d32015-11-05 03:11:27 +00002507 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2508 // SI/CI have.
2509 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2510 R.isValid(); ++R) {
2511 if (*R == RegNo)
2512 return false;
2513 }
2514
2515 return true;
2516}
2517
Alex Bradbury58eba092016-11-01 16:32:05 +00002518OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002519AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002520 // Try to parse with a custom parser
2521 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2522
2523 // If we successfully parsed the operand or if there as an error parsing,
2524 // we are done.
2525 //
2526 // If we are parsing after we reach EndOfStatement then this means we
2527 // are appending default values to the Operands list. This is only done
2528 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002529 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00002530 getLexer().is(AsmToken::EndOfStatement))
2531 return ResTy;
2532
Sam Kolton1bdcef72016-05-23 09:59:02 +00002533 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00002534
Sam Kolton1bdcef72016-05-23 09:59:02 +00002535 if (ResTy == MatchOperand_Success)
2536 return ResTy;
2537
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00002538 const auto &Tok = Parser.getTok();
2539 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00002540
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00002541 const MCExpr *Expr = nullptr;
2542 if (!Parser.parseExpression(Expr)) {
2543 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
2544 return MatchOperand_Success;
2545 }
2546
2547 // Possibly this is an instruction flag like 'gds'.
2548 if (Tok.getKind() == AsmToken::Identifier) {
2549 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002550 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002551 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002552 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00002553
Sam Kolton1bdcef72016-05-23 09:59:02 +00002554 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002555}
2556
Sam Kolton05ef1c92016-06-03 10:27:37 +00002557StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2558 // Clear any forced encodings from the previous instruction.
2559 setForcedEncodingSize(0);
2560 setForcedDPP(false);
2561 setForcedSDWA(false);
2562
2563 if (Name.endswith("_e64")) {
2564 setForcedEncodingSize(64);
2565 return Name.substr(0, Name.size() - 4);
2566 } else if (Name.endswith("_e32")) {
2567 setForcedEncodingSize(32);
2568 return Name.substr(0, Name.size() - 4);
2569 } else if (Name.endswith("_dpp")) {
2570 setForcedDPP(true);
2571 return Name.substr(0, Name.size() - 4);
2572 } else if (Name.endswith("_sdwa")) {
2573 setForcedSDWA(true);
2574 return Name.substr(0, Name.size() - 5);
2575 }
2576 return Name;
2577}
2578
Tom Stellard45bb48e2015-06-13 03:28:10 +00002579bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2580 StringRef Name,
2581 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002582 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00002583 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002584 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00002585
Tom Stellard45bb48e2015-06-13 03:28:10 +00002586 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00002587 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002588
2589 // Eat the comma or space if there is one.
2590 if (getLexer().is(AsmToken::Comma))
2591 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00002592
Tom Stellard45bb48e2015-06-13 03:28:10 +00002593 switch (Res) {
2594 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002595 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002596 Error(getLexer().getLoc(), "failed parsing operand.");
2597 while (!getLexer().is(AsmToken::EndOfStatement)) {
2598 Parser.Lex();
2599 }
2600 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002601 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002602 Error(getLexer().getLoc(), "not a valid operand.");
2603 while (!getLexer().is(AsmToken::EndOfStatement)) {
2604 Parser.Lex();
2605 }
2606 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002607 }
2608 }
2609
Tom Stellard45bb48e2015-06-13 03:28:10 +00002610 return false;
2611}
2612
2613//===----------------------------------------------------------------------===//
2614// Utility functions
2615//===----------------------------------------------------------------------===//
2616
Alex Bradbury58eba092016-11-01 16:32:05 +00002617OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002618AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002619 switch(getLexer().getKind()) {
2620 default: return MatchOperand_NoMatch;
2621 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002622 StringRef Name = Parser.getTok().getString();
2623 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002624 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002625 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002626
2627 Parser.Lex();
2628 if (getLexer().isNot(AsmToken::Colon))
2629 return MatchOperand_ParseFail;
2630
2631 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00002632
2633 bool IsMinus = false;
2634 if (getLexer().getKind() == AsmToken::Minus) {
2635 Parser.Lex();
2636 IsMinus = true;
2637 }
2638
Tom Stellard45bb48e2015-06-13 03:28:10 +00002639 if (getLexer().isNot(AsmToken::Integer))
2640 return MatchOperand_ParseFail;
2641
2642 if (getParser().parseAbsoluteExpression(Int))
2643 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00002644
2645 if (IsMinus)
2646 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002647 break;
2648 }
2649 }
2650 return MatchOperand_Success;
2651}
2652
Alex Bradbury58eba092016-11-01 16:32:05 +00002653OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002654AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002655 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002656 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002657 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002658 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002659
Alex Bradbury58eba092016-11-01 16:32:05 +00002660 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002661 if (Res != MatchOperand_Success)
2662 return Res;
2663
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002664 if (ConvertResult && !ConvertResult(Value)) {
2665 return MatchOperand_ParseFail;
2666 }
2667
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002668 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002669 return MatchOperand_Success;
2670}
2671
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002672OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
2673 const char *Prefix,
2674 OperandVector &Operands,
2675 AMDGPUOperand::ImmTy ImmTy,
2676 bool (*ConvertResult)(int64_t&)) {
2677 StringRef Name = Parser.getTok().getString();
2678 if (!Name.equals(Prefix))
2679 return MatchOperand_NoMatch;
2680
2681 Parser.Lex();
2682 if (getLexer().isNot(AsmToken::Colon))
2683 return MatchOperand_ParseFail;
2684
2685 Parser.Lex();
2686 if (getLexer().isNot(AsmToken::LBrac))
2687 return MatchOperand_ParseFail;
2688 Parser.Lex();
2689
2690 unsigned Val = 0;
2691 SMLoc S = Parser.getTok().getLoc();
2692
2693 // FIXME: How to verify the number of elements matches the number of src
2694 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00002695 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002696 if (I != 0) {
2697 if (getLexer().is(AsmToken::RBrac))
2698 break;
2699
2700 if (getLexer().isNot(AsmToken::Comma))
2701 return MatchOperand_ParseFail;
2702 Parser.Lex();
2703 }
2704
2705 if (getLexer().isNot(AsmToken::Integer))
2706 return MatchOperand_ParseFail;
2707
2708 int64_t Op;
2709 if (getParser().parseAbsoluteExpression(Op))
2710 return MatchOperand_ParseFail;
2711
2712 if (Op != 0 && Op != 1)
2713 return MatchOperand_ParseFail;
2714 Val |= (Op << I);
2715 }
2716
2717 Parser.Lex();
2718 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
2719 return MatchOperand_Success;
2720}
2721
Alex Bradbury58eba092016-11-01 16:32:05 +00002722OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002723AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002724 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002725 int64_t Bit = 0;
2726 SMLoc S = Parser.getTok().getLoc();
2727
2728 // We are at the end of the statement, and this is a default argument, so
2729 // use a default value.
2730 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2731 switch(getLexer().getKind()) {
2732 case AsmToken::Identifier: {
2733 StringRef Tok = Parser.getTok().getString();
2734 if (Tok == Name) {
2735 Bit = 1;
2736 Parser.Lex();
2737 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
2738 Bit = 0;
2739 Parser.Lex();
2740 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00002741 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002742 }
2743 break;
2744 }
2745 default:
2746 return MatchOperand_NoMatch;
2747 }
2748 }
2749
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002750 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002751 return MatchOperand_Success;
2752}
2753
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002754static void addOptionalImmOperand(
2755 MCInst& Inst, const OperandVector& Operands,
2756 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
2757 AMDGPUOperand::ImmTy ImmT,
2758 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002759 auto i = OptionalIdx.find(ImmT);
2760 if (i != OptionalIdx.end()) {
2761 unsigned Idx = i->second;
2762 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
2763 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00002764 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002765 }
2766}
2767
Alex Bradbury58eba092016-11-01 16:32:05 +00002768OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002769AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00002770 if (getLexer().isNot(AsmToken::Identifier)) {
2771 return MatchOperand_NoMatch;
2772 }
2773 StringRef Tok = Parser.getTok().getString();
2774 if (Tok != Prefix) {
2775 return MatchOperand_NoMatch;
2776 }
2777
2778 Parser.Lex();
2779 if (getLexer().isNot(AsmToken::Colon)) {
2780 return MatchOperand_ParseFail;
2781 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002782
Sam Kolton3025e7f2016-04-26 13:33:56 +00002783 Parser.Lex();
2784 if (getLexer().isNot(AsmToken::Identifier)) {
2785 return MatchOperand_ParseFail;
2786 }
2787
2788 Value = Parser.getTok().getString();
2789 return MatchOperand_Success;
2790}
2791
Tom Stellard45bb48e2015-06-13 03:28:10 +00002792//===----------------------------------------------------------------------===//
2793// ds
2794//===----------------------------------------------------------------------===//
2795
Tom Stellard45bb48e2015-06-13 03:28:10 +00002796void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2797 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002798 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002799
2800 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2801 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2802
2803 // Add the register arguments
2804 if (Op.isReg()) {
2805 Op.addRegOperands(Inst, 1);
2806 continue;
2807 }
2808
2809 // Handle optional arguments
2810 OptionalIdx[Op.getImmTy()] = i;
2811 }
2812
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002813 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2814 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002815 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002816
Tom Stellard45bb48e2015-06-13 03:28:10 +00002817 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2818}
2819
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002820void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
2821 bool IsGdsHardcoded) {
2822 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002823
2824 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2825 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2826
2827 // Add the register arguments
2828 if (Op.isReg()) {
2829 Op.addRegOperands(Inst, 1);
2830 continue;
2831 }
2832
2833 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00002834 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002835 continue;
2836 }
2837
2838 // Handle optional arguments
2839 OptionalIdx[Op.getImmTy()] = i;
2840 }
2841
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00002842 AMDGPUOperand::ImmTy OffsetType =
2843 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
2844 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
2845 AMDGPUOperand::ImmTyOffset;
2846
2847 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
2848
Artem Tamazov43b61562017-02-03 12:47:30 +00002849 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002850 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002851 }
2852 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2853}
2854
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002855void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2856 OptionalImmIndexMap OptionalIdx;
2857
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002858 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002859 unsigned EnMask = 0;
2860 int SrcIdx = 0;
2861
2862 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2863 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2864
2865 // Add the register arguments
2866 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002867 assert(SrcIdx < 4);
2868 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002869 Op.addRegOperands(Inst, 1);
2870 ++SrcIdx;
2871 continue;
2872 }
2873
2874 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002875 assert(SrcIdx < 4);
2876 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002877 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002878 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002879 continue;
2880 }
2881
2882 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2883 Op.addImmOperands(Inst, 1);
2884 continue;
2885 }
2886
2887 if (Op.isToken() && Op.getToken() == "done")
2888 continue;
2889
2890 // Handle optional arguments
2891 OptionalIdx[Op.getImmTy()] = i;
2892 }
2893
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00002894 assert(SrcIdx == 4);
2895
2896 bool Compr = false;
2897 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
2898 Compr = true;
2899 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
2900 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
2901 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
2902 }
2903
2904 for (auto i = 0; i < SrcIdx; ++i) {
2905 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
2906 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
2907 }
2908 }
2909
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002910 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2911 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2912
2913 Inst.addOperand(MCOperand::createImm(EnMask));
2914}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002915
2916//===----------------------------------------------------------------------===//
2917// s_waitcnt
2918//===----------------------------------------------------------------------===//
2919
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00002920static bool
2921encodeCnt(
2922 const AMDGPU::IsaInfo::IsaVersion ISA,
2923 int64_t &IntVal,
2924 int64_t CntVal,
2925 bool Saturate,
2926 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
2927 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
2928{
2929 bool Failed = false;
2930
2931 IntVal = encode(ISA, IntVal, CntVal);
2932 if (CntVal != decode(ISA, IntVal)) {
2933 if (Saturate) {
2934 IntVal = encode(ISA, IntVal, -1);
2935 } else {
2936 Failed = true;
2937 }
2938 }
2939 return Failed;
2940}
2941
Tom Stellard45bb48e2015-06-13 03:28:10 +00002942bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2943 StringRef CntName = Parser.getTok().getString();
2944 int64_t CntVal;
2945
2946 Parser.Lex();
2947 if (getLexer().isNot(AsmToken::LParen))
2948 return true;
2949
2950 Parser.Lex();
2951 if (getLexer().isNot(AsmToken::Integer))
2952 return true;
2953
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00002954 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00002955 if (getParser().parseAbsoluteExpression(CntVal))
2956 return true;
2957
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002958 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002959 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +00002960
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00002961 bool Failed = true;
2962 bool Sat = CntName.endswith("_sat");
2963
2964 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
2965 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
2966 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
2967 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
2968 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
2969 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
2970 }
2971
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00002972 if (Failed) {
2973 Error(ValLoc, "too large value for " + CntName);
2974 return true;
2975 }
2976
2977 if (getLexer().isNot(AsmToken::RParen)) {
2978 return true;
2979 }
2980
2981 Parser.Lex();
2982 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
2983 const AsmToken NextToken = getLexer().peekTok();
2984 if (NextToken.is(AsmToken::Identifier)) {
2985 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00002986 }
2987 }
2988
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00002989 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002990}
2991
Alex Bradbury58eba092016-11-01 16:32:05 +00002992OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002993AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002994 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002995 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002996 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002997 SMLoc S = Parser.getTok().getLoc();
2998
2999 switch(getLexer().getKind()) {
3000 default: return MatchOperand_ParseFail;
3001 case AsmToken::Integer:
3002 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003003 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003004 return MatchOperand_ParseFail;
3005 break;
3006
3007 case AsmToken::Identifier:
3008 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003009 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003010 return MatchOperand_ParseFail;
3011 } while(getLexer().isNot(AsmToken::EndOfStatement));
3012 break;
3013 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003014 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003015 return MatchOperand_Success;
3016}
3017
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003018bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3019 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003020 using namespace llvm::AMDGPU::Hwreg;
3021
Artem Tamazovd6468662016-04-25 14:13:51 +00003022 if (Parser.getTok().getString() != "hwreg")
3023 return true;
3024 Parser.Lex();
3025
3026 if (getLexer().isNot(AsmToken::LParen))
3027 return true;
3028 Parser.Lex();
3029
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003030 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003031 HwReg.IsSymbolic = true;
3032 HwReg.Id = ID_UNKNOWN_;
3033 const StringRef tok = Parser.getTok().getString();
3034 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
3035 if (tok == IdSymbolic[i]) {
3036 HwReg.Id = i;
3037 break;
3038 }
3039 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003040 Parser.Lex();
3041 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003042 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003043 if (getLexer().isNot(AsmToken::Integer))
3044 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003045 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003046 return true;
3047 }
Artem Tamazovd6468662016-04-25 14:13:51 +00003048
3049 if (getLexer().is(AsmToken::RParen)) {
3050 Parser.Lex();
3051 return false;
3052 }
3053
3054 // optional params
3055 if (getLexer().isNot(AsmToken::Comma))
3056 return true;
3057 Parser.Lex();
3058
3059 if (getLexer().isNot(AsmToken::Integer))
3060 return true;
3061 if (getParser().parseAbsoluteExpression(Offset))
3062 return true;
3063
3064 if (getLexer().isNot(AsmToken::Comma))
3065 return true;
3066 Parser.Lex();
3067
3068 if (getLexer().isNot(AsmToken::Integer))
3069 return true;
3070 if (getParser().parseAbsoluteExpression(Width))
3071 return true;
3072
3073 if (getLexer().isNot(AsmToken::RParen))
3074 return true;
3075 Parser.Lex();
3076
3077 return false;
3078}
3079
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003080OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003081 using namespace llvm::AMDGPU::Hwreg;
3082
Artem Tamazovd6468662016-04-25 14:13:51 +00003083 int64_t Imm16Val = 0;
3084 SMLoc S = Parser.getTok().getLoc();
3085
3086 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00003087 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00003088 case AsmToken::Integer:
3089 // The operand can be an integer value.
3090 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00003091 return MatchOperand_NoMatch;
3092 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00003093 Error(S, "invalid immediate: only 16-bit values are legal");
3094 // Do not return error code, but create an imm operand anyway and proceed
3095 // to the next operand, if any. That avoids unneccessary error messages.
3096 }
3097 break;
3098
3099 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003100 OperandInfoTy HwReg(ID_UNKNOWN_);
3101 int64_t Offset = OFFSET_DEFAULT_;
3102 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3103 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00003104 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003105 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3106 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003107 Error(S, "invalid symbolic name of hardware register");
3108 else
3109 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00003110 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003111 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00003112 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003113 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00003114 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003115 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00003116 }
3117 break;
3118 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003119 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00003120 return MatchOperand_Success;
3121}
3122
Tom Stellard45bb48e2015-06-13 03:28:10 +00003123bool AMDGPUOperand::isSWaitCnt() const {
3124 return isImm();
3125}
3126
Artem Tamazovd6468662016-04-25 14:13:51 +00003127bool AMDGPUOperand::isHwreg() const {
3128 return isImmTy(ImmTyHwreg);
3129}
3130
Artem Tamazov6edc1352016-05-26 17:00:33 +00003131bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003132 using namespace llvm::AMDGPU::SendMsg;
3133
3134 if (Parser.getTok().getString() != "sendmsg")
3135 return true;
3136 Parser.Lex();
3137
3138 if (getLexer().isNot(AsmToken::LParen))
3139 return true;
3140 Parser.Lex();
3141
3142 if (getLexer().is(AsmToken::Identifier)) {
3143 Msg.IsSymbolic = true;
3144 Msg.Id = ID_UNKNOWN_;
3145 const std::string tok = Parser.getTok().getString();
3146 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3147 switch(i) {
3148 default: continue; // Omit gaps.
3149 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3150 }
3151 if (tok == IdSymbolic[i]) {
3152 Msg.Id = i;
3153 break;
3154 }
3155 }
3156 Parser.Lex();
3157 } else {
3158 Msg.IsSymbolic = false;
3159 if (getLexer().isNot(AsmToken::Integer))
3160 return true;
3161 if (getParser().parseAbsoluteExpression(Msg.Id))
3162 return true;
3163 if (getLexer().is(AsmToken::Integer))
3164 if (getParser().parseAbsoluteExpression(Msg.Id))
3165 Msg.Id = ID_UNKNOWN_;
3166 }
3167 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3168 return false;
3169
3170 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3171 if (getLexer().isNot(AsmToken::RParen))
3172 return true;
3173 Parser.Lex();
3174 return false;
3175 }
3176
3177 if (getLexer().isNot(AsmToken::Comma))
3178 return true;
3179 Parser.Lex();
3180
3181 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
3182 Operation.Id = ID_UNKNOWN_;
3183 if (getLexer().is(AsmToken::Identifier)) {
3184 Operation.IsSymbolic = true;
3185 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3186 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3187 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003188 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003189 for (int i = F; i < L; ++i) {
3190 if (Tok == S[i]) {
3191 Operation.Id = i;
3192 break;
3193 }
3194 }
3195 Parser.Lex();
3196 } else {
3197 Operation.IsSymbolic = false;
3198 if (getLexer().isNot(AsmToken::Integer))
3199 return true;
3200 if (getParser().parseAbsoluteExpression(Operation.Id))
3201 return true;
3202 }
3203
3204 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3205 // Stream id is optional.
3206 if (getLexer().is(AsmToken::RParen)) {
3207 Parser.Lex();
3208 return false;
3209 }
3210
3211 if (getLexer().isNot(AsmToken::Comma))
3212 return true;
3213 Parser.Lex();
3214
3215 if (getLexer().isNot(AsmToken::Integer))
3216 return true;
3217 if (getParser().parseAbsoluteExpression(StreamId))
3218 return true;
3219 }
3220
3221 if (getLexer().isNot(AsmToken::RParen))
3222 return true;
3223 Parser.Lex();
3224 return false;
3225}
3226
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003227OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3228 if (getLexer().getKind() != AsmToken::Identifier)
3229 return MatchOperand_NoMatch;
3230
3231 StringRef Str = Parser.getTok().getString();
3232 int Slot = StringSwitch<int>(Str)
3233 .Case("p10", 0)
3234 .Case("p20", 1)
3235 .Case("p0", 2)
3236 .Default(-1);
3237
3238 SMLoc S = Parser.getTok().getLoc();
3239 if (Slot == -1)
3240 return MatchOperand_ParseFail;
3241
3242 Parser.Lex();
3243 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3244 AMDGPUOperand::ImmTyInterpSlot));
3245 return MatchOperand_Success;
3246}
3247
3248OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3249 if (getLexer().getKind() != AsmToken::Identifier)
3250 return MatchOperand_NoMatch;
3251
3252 StringRef Str = Parser.getTok().getString();
3253 if (!Str.startswith("attr"))
3254 return MatchOperand_NoMatch;
3255
3256 StringRef Chan = Str.take_back(2);
3257 int AttrChan = StringSwitch<int>(Chan)
3258 .Case(".x", 0)
3259 .Case(".y", 1)
3260 .Case(".z", 2)
3261 .Case(".w", 3)
3262 .Default(-1);
3263 if (AttrChan == -1)
3264 return MatchOperand_ParseFail;
3265
3266 Str = Str.drop_back(2).drop_front(4);
3267
3268 uint8_t Attr;
3269 if (Str.getAsInteger(10, Attr))
3270 return MatchOperand_ParseFail;
3271
3272 SMLoc S = Parser.getTok().getLoc();
3273 Parser.Lex();
3274 if (Attr > 63) {
3275 Error(S, "out of bounds attr");
3276 return MatchOperand_Success;
3277 }
3278
3279 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3280
3281 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3282 AMDGPUOperand::ImmTyInterpAttr));
3283 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3284 AMDGPUOperand::ImmTyAttrChan));
3285 return MatchOperand_Success;
3286}
3287
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003288void AMDGPUAsmParser::errorExpTgt() {
3289 Error(Parser.getTok().getLoc(), "invalid exp target");
3290}
3291
3292OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3293 uint8_t &Val) {
3294 if (Str == "null") {
3295 Val = 9;
3296 return MatchOperand_Success;
3297 }
3298
3299 if (Str.startswith("mrt")) {
3300 Str = Str.drop_front(3);
3301 if (Str == "z") { // == mrtz
3302 Val = 8;
3303 return MatchOperand_Success;
3304 }
3305
3306 if (Str.getAsInteger(10, Val))
3307 return MatchOperand_ParseFail;
3308
3309 if (Val > 7)
3310 errorExpTgt();
3311
3312 return MatchOperand_Success;
3313 }
3314
3315 if (Str.startswith("pos")) {
3316 Str = Str.drop_front(3);
3317 if (Str.getAsInteger(10, Val))
3318 return MatchOperand_ParseFail;
3319
3320 if (Val > 3)
3321 errorExpTgt();
3322
3323 Val += 12;
3324 return MatchOperand_Success;
3325 }
3326
3327 if (Str.startswith("param")) {
3328 Str = Str.drop_front(5);
3329 if (Str.getAsInteger(10, Val))
3330 return MatchOperand_ParseFail;
3331
3332 if (Val >= 32)
3333 errorExpTgt();
3334
3335 Val += 32;
3336 return MatchOperand_Success;
3337 }
3338
3339 if (Str.startswith("invalid_target_")) {
3340 Str = Str.drop_front(15);
3341 if (Str.getAsInteger(10, Val))
3342 return MatchOperand_ParseFail;
3343
3344 errorExpTgt();
3345 return MatchOperand_Success;
3346 }
3347
3348 return MatchOperand_NoMatch;
3349}
3350
3351OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3352 uint8_t Val;
3353 StringRef Str = Parser.getTok().getString();
3354
3355 auto Res = parseExpTgtImpl(Str, Val);
3356 if (Res != MatchOperand_Success)
3357 return Res;
3358
3359 SMLoc S = Parser.getTok().getLoc();
3360 Parser.Lex();
3361
3362 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3363 AMDGPUOperand::ImmTyExpTgt));
3364 return MatchOperand_Success;
3365}
3366
Alex Bradbury58eba092016-11-01 16:32:05 +00003367OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003368AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3369 using namespace llvm::AMDGPU::SendMsg;
3370
3371 int64_t Imm16Val = 0;
3372 SMLoc S = Parser.getTok().getLoc();
3373
3374 switch(getLexer().getKind()) {
3375 default:
3376 return MatchOperand_NoMatch;
3377 case AsmToken::Integer:
3378 // The operand can be an integer value.
3379 if (getParser().parseAbsoluteExpression(Imm16Val))
3380 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003381 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003382 Error(S, "invalid immediate: only 16-bit values are legal");
3383 // Do not return error code, but create an imm operand anyway and proceed
3384 // to the next operand, if any. That avoids unneccessary error messages.
3385 }
3386 break;
3387 case AsmToken::Identifier: {
3388 OperandInfoTy Msg(ID_UNKNOWN_);
3389 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00003390 int64_t StreamId = STREAM_ID_DEFAULT_;
3391 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3392 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003393 do {
3394 // Validate and encode message ID.
3395 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3396 || Msg.Id == ID_SYSMSG)) {
3397 if (Msg.IsSymbolic)
3398 Error(S, "invalid/unsupported symbolic name of message");
3399 else
3400 Error(S, "invalid/unsupported code of message");
3401 break;
3402 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003403 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003404 // Validate and encode operation ID.
3405 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3406 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3407 if (Operation.IsSymbolic)
3408 Error(S, "invalid symbolic name of GS_OP");
3409 else
3410 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3411 break;
3412 }
3413 if (Operation.Id == OP_GS_NOP
3414 && Msg.Id != ID_GS_DONE) {
3415 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3416 break;
3417 }
3418 Imm16Val |= (Operation.Id << OP_SHIFT_);
3419 }
3420 if (Msg.Id == ID_SYSMSG) {
3421 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3422 if (Operation.IsSymbolic)
3423 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3424 else
3425 Error(S, "invalid/unsupported code of SYSMSG_OP");
3426 break;
3427 }
3428 Imm16Val |= (Operation.Id << OP_SHIFT_);
3429 }
3430 // Validate and encode stream ID.
3431 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3432 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3433 Error(S, "invalid stream id: only 2-bit values are legal");
3434 break;
3435 }
3436 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3437 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003438 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003439 }
3440 break;
3441 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003442 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003443 return MatchOperand_Success;
3444}
3445
3446bool AMDGPUOperand::isSendMsg() const {
3447 return isImmTy(ImmTySendMsg);
3448}
3449
Tom Stellard45bb48e2015-06-13 03:28:10 +00003450//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003451// parser helpers
3452//===----------------------------------------------------------------------===//
3453
3454bool
3455AMDGPUAsmParser::trySkipId(const StringRef Id) {
3456 if (getLexer().getKind() == AsmToken::Identifier &&
3457 Parser.getTok().getString() == Id) {
3458 Parser.Lex();
3459 return true;
3460 }
3461 return false;
3462}
3463
3464bool
3465AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
3466 if (getLexer().getKind() == Kind) {
3467 Parser.Lex();
3468 return true;
3469 }
3470 return false;
3471}
3472
3473bool
3474AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
3475 const StringRef ErrMsg) {
3476 if (!trySkipToken(Kind)) {
3477 Error(Parser.getTok().getLoc(), ErrMsg);
3478 return false;
3479 }
3480 return true;
3481}
3482
3483bool
3484AMDGPUAsmParser::parseExpr(int64_t &Imm) {
3485 return !getParser().parseAbsoluteExpression(Imm);
3486}
3487
3488bool
3489AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
3490 SMLoc S = Parser.getTok().getLoc();
3491 if (getLexer().getKind() == AsmToken::String) {
3492 Val = Parser.getTok().getStringContents();
3493 Parser.Lex();
3494 return true;
3495 } else {
3496 Error(S, ErrMsg);
3497 return false;
3498 }
3499}
3500
3501//===----------------------------------------------------------------------===//
3502// swizzle
3503//===----------------------------------------------------------------------===//
3504
3505LLVM_READNONE
3506static unsigned
3507encodeBitmaskPerm(const unsigned AndMask,
3508 const unsigned OrMask,
3509 const unsigned XorMask) {
3510 using namespace llvm::AMDGPU::Swizzle;
3511
3512 return BITMASK_PERM_ENC |
3513 (AndMask << BITMASK_AND_SHIFT) |
3514 (OrMask << BITMASK_OR_SHIFT) |
3515 (XorMask << BITMASK_XOR_SHIFT);
3516}
3517
3518bool
3519AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
3520 const unsigned MinVal,
3521 const unsigned MaxVal,
3522 const StringRef ErrMsg) {
3523 for (unsigned i = 0; i < OpNum; ++i) {
3524 if (!skipToken(AsmToken::Comma, "expected a comma")){
3525 return false;
3526 }
3527 SMLoc ExprLoc = Parser.getTok().getLoc();
3528 if (!parseExpr(Op[i])) {
3529 return false;
3530 }
3531 if (Op[i] < MinVal || Op[i] > MaxVal) {
3532 Error(ExprLoc, ErrMsg);
3533 return false;
3534 }
3535 }
3536
3537 return true;
3538}
3539
3540bool
3541AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
3542 using namespace llvm::AMDGPU::Swizzle;
3543
3544 int64_t Lane[LANE_NUM];
3545 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
3546 "expected a 2-bit lane id")) {
3547 Imm = QUAD_PERM_ENC;
3548 for (auto i = 0; i < LANE_NUM; ++i) {
3549 Imm |= Lane[i] << (LANE_SHIFT * i);
3550 }
3551 return true;
3552 }
3553 return false;
3554}
3555
3556bool
3557AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
3558 using namespace llvm::AMDGPU::Swizzle;
3559
3560 SMLoc S = Parser.getTok().getLoc();
3561 int64_t GroupSize;
3562 int64_t LaneIdx;
3563
3564 if (!parseSwizzleOperands(1, &GroupSize,
3565 2, 32,
3566 "group size must be in the interval [2,32]")) {
3567 return false;
3568 }
3569 if (!isPowerOf2_64(GroupSize)) {
3570 Error(S, "group size must be a power of two");
3571 return false;
3572 }
3573 if (parseSwizzleOperands(1, &LaneIdx,
3574 0, GroupSize - 1,
3575 "lane id must be in the interval [0,group size - 1]")) {
3576 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
3577 return true;
3578 }
3579 return false;
3580}
3581
3582bool
3583AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
3584 using namespace llvm::AMDGPU::Swizzle;
3585
3586 SMLoc S = Parser.getTok().getLoc();
3587 int64_t GroupSize;
3588
3589 if (!parseSwizzleOperands(1, &GroupSize,
3590 2, 32, "group size must be in the interval [2,32]")) {
3591 return false;
3592 }
3593 if (!isPowerOf2_64(GroupSize)) {
3594 Error(S, "group size must be a power of two");
3595 return false;
3596 }
3597
3598 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
3599 return true;
3600}
3601
3602bool
3603AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
3604 using namespace llvm::AMDGPU::Swizzle;
3605
3606 SMLoc S = Parser.getTok().getLoc();
3607 int64_t GroupSize;
3608
3609 if (!parseSwizzleOperands(1, &GroupSize,
3610 1, 16, "group size must be in the interval [1,16]")) {
3611 return false;
3612 }
3613 if (!isPowerOf2_64(GroupSize)) {
3614 Error(S, "group size must be a power of two");
3615 return false;
3616 }
3617
3618 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
3619 return true;
3620}
3621
3622bool
3623AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
3624 using namespace llvm::AMDGPU::Swizzle;
3625
3626 if (!skipToken(AsmToken::Comma, "expected a comma")) {
3627 return false;
3628 }
3629
3630 StringRef Ctl;
3631 SMLoc StrLoc = Parser.getTok().getLoc();
3632 if (!parseString(Ctl)) {
3633 return false;
3634 }
3635 if (Ctl.size() != BITMASK_WIDTH) {
3636 Error(StrLoc, "expected a 5-character mask");
3637 return false;
3638 }
3639
3640 unsigned AndMask = 0;
3641 unsigned OrMask = 0;
3642 unsigned XorMask = 0;
3643
3644 for (size_t i = 0; i < Ctl.size(); ++i) {
3645 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
3646 switch(Ctl[i]) {
3647 default:
3648 Error(StrLoc, "invalid mask");
3649 return false;
3650 case '0':
3651 break;
3652 case '1':
3653 OrMask |= Mask;
3654 break;
3655 case 'p':
3656 AndMask |= Mask;
3657 break;
3658 case 'i':
3659 AndMask |= Mask;
3660 XorMask |= Mask;
3661 break;
3662 }
3663 }
3664
3665 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
3666 return true;
3667}
3668
3669bool
3670AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
3671
3672 SMLoc OffsetLoc = Parser.getTok().getLoc();
3673
3674 if (!parseExpr(Imm)) {
3675 return false;
3676 }
3677 if (!isUInt<16>(Imm)) {
3678 Error(OffsetLoc, "expected a 16-bit offset");
3679 return false;
3680 }
3681 return true;
3682}
3683
3684bool
3685AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
3686 using namespace llvm::AMDGPU::Swizzle;
3687
3688 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
3689
3690 SMLoc ModeLoc = Parser.getTok().getLoc();
3691 bool Ok = false;
3692
3693 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
3694 Ok = parseSwizzleQuadPerm(Imm);
3695 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
3696 Ok = parseSwizzleBitmaskPerm(Imm);
3697 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
3698 Ok = parseSwizzleBroadcast(Imm);
3699 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
3700 Ok = parseSwizzleSwap(Imm);
3701 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
3702 Ok = parseSwizzleReverse(Imm);
3703 } else {
3704 Error(ModeLoc, "expected a swizzle mode");
3705 }
3706
3707 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
3708 }
3709
3710 return false;
3711}
3712
3713OperandMatchResultTy
3714AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
3715 SMLoc S = Parser.getTok().getLoc();
3716 int64_t Imm = 0;
3717
3718 if (trySkipId("offset")) {
3719
3720 bool Ok = false;
3721 if (skipToken(AsmToken::Colon, "expected a colon")) {
3722 if (trySkipId("swizzle")) {
3723 Ok = parseSwizzleMacro(Imm);
3724 } else {
3725 Ok = parseSwizzleOffset(Imm);
3726 }
3727 }
3728
3729 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
3730
3731 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
3732 } else {
3733 return MatchOperand_NoMatch;
3734 }
3735}
3736
3737bool
3738AMDGPUOperand::isSwizzle() const {
3739 return isImmTy(ImmTySwizzle);
3740}
3741
3742//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003743// sopp branch targets
3744//===----------------------------------------------------------------------===//
3745
Alex Bradbury58eba092016-11-01 16:32:05 +00003746OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003747AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
3748 SMLoc S = Parser.getTok().getLoc();
3749
3750 switch (getLexer().getKind()) {
3751 default: return MatchOperand_ParseFail;
3752 case AsmToken::Integer: {
3753 int64_t Imm;
3754 if (getParser().parseAbsoluteExpression(Imm))
3755 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003756 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003757 return MatchOperand_Success;
3758 }
3759
3760 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003761 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00003762 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
3763 Parser.getTok().getString()), getContext()), S));
3764 Parser.Lex();
3765 return MatchOperand_Success;
3766 }
3767}
3768
3769//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003770// mubuf
3771//===----------------------------------------------------------------------===//
3772
Sam Kolton5f10a132016-05-06 11:31:17 +00003773AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003774 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00003775}
3776
3777AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003778 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00003779}
3780
3781AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003782 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003783}
3784
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003785void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
3786 const OperandVector &Operands,
3787 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003788 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003789 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003790
3791 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3792 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3793
3794 // Add the register arguments
3795 if (Op.isReg()) {
3796 Op.addRegOperands(Inst, 1);
3797 continue;
3798 }
3799
3800 // Handle the case where soffset is an immediate
3801 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3802 Op.addImmOperands(Inst, 1);
3803 continue;
3804 }
3805
3806 // Handle tokens like 'offen' which are sometimes hard-coded into the
3807 // asm string. There are no MCInst operands for these.
3808 if (Op.isToken()) {
3809 continue;
3810 }
3811 assert(Op.isImm());
3812
3813 // Handle optional arguments
3814 OptionalIdx[Op.getImmTy()] = i;
3815 }
3816
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003817 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
3818 if (IsAtomicReturn) {
3819 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
3820 Inst.insert(I, *I);
3821 }
3822
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003823 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003824 if (!IsAtomic) { // glc is hard-coded.
3825 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3826 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003827 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3828 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003829}
3830
David Stuttard70e8bc12017-06-22 16:29:22 +00003831void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
3832 OptionalImmIndexMap OptionalIdx;
3833
3834 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3835 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3836
3837 // Add the register arguments
3838 if (Op.isReg()) {
3839 Op.addRegOperands(Inst, 1);
3840 continue;
3841 }
3842
3843 // Handle the case where soffset is an immediate
3844 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3845 Op.addImmOperands(Inst, 1);
3846 continue;
3847 }
3848
3849 // Handle tokens like 'offen' which are sometimes hard-coded into the
3850 // asm string. There are no MCInst operands for these.
3851 if (Op.isToken()) {
3852 continue;
3853 }
3854 assert(Op.isImm());
3855
3856 // Handle optional arguments
3857 OptionalIdx[Op.getImmTy()] = i;
3858 }
3859
3860 addOptionalImmOperand(Inst, Operands, OptionalIdx,
3861 AMDGPUOperand::ImmTyOffset);
3862 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDFMT);
3863 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyNFMT);
3864 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3865 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3866 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3867}
3868
Tom Stellard45bb48e2015-06-13 03:28:10 +00003869//===----------------------------------------------------------------------===//
3870// mimg
3871//===----------------------------------------------------------------------===//
3872
Sam Kolton10ac2fd2017-07-07 15:21:52 +00003873void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
3874 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00003875 unsigned I = 1;
3876 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3877 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3878 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3879 }
3880
Sam Kolton10ac2fd2017-07-07 15:21:52 +00003881 if (IsAtomic) {
3882 // Add src, same as dst
3883 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
3884 }
3885
Sam Kolton1bdcef72016-05-23 09:59:02 +00003886 OptionalImmIndexMap OptionalIdx;
3887
3888 for (unsigned E = Operands.size(); I != E; ++I) {
3889 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3890
3891 // Add the register arguments
3892 if (Op.isRegOrImm()) {
3893 Op.addRegOrImmOperands(Inst, 1);
3894 continue;
3895 } else if (Op.isImmModifier()) {
3896 OptionalIdx[Op.getImmTy()] = I;
3897 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003898 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00003899 }
3900 }
3901
3902 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
3903 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
3904 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3905 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
3906 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
3907 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3908 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
3909 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3910}
3911
3912void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00003913 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00003914}
3915
Sam Kolton5f10a132016-05-06 11:31:17 +00003916AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003917 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00003918}
3919
3920AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003921 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00003922}
3923
3924AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003925 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00003926}
3927
3928AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003929 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00003930}
3931
3932AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003933 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003934}
3935
Tom Stellard45bb48e2015-06-13 03:28:10 +00003936//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00003937// smrd
3938//===----------------------------------------------------------------------===//
3939
Artem Tamazov54bfd542016-10-31 16:07:39 +00003940bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00003941 return isImm() && isUInt<8>(getImm());
3942}
3943
Artem Tamazov54bfd542016-10-31 16:07:39 +00003944bool AMDGPUOperand::isSMRDOffset20() const {
3945 return isImm() && isUInt<20>(getImm());
3946}
3947
Tom Stellard217361c2015-08-06 19:28:38 +00003948bool AMDGPUOperand::isSMRDLiteralOffset() const {
3949 // 32-bit literals are only supported on CI and we only want to use them
3950 // when the offset is > 8-bits.
3951 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
3952}
3953
Artem Tamazov54bfd542016-10-31 16:07:39 +00003954AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
3955 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3956}
3957
3958AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003959 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003960}
3961
3962AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003963 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003964}
3965
Matt Arsenaultfd023142017-06-12 15:55:58 +00003966AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
3967 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3968}
3969
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003970AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
3971 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3972}
3973
Tom Stellard217361c2015-08-06 19:28:38 +00003974//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003975// vop3
3976//===----------------------------------------------------------------------===//
3977
3978static bool ConvertOmodMul(int64_t &Mul) {
3979 if (Mul != 1 && Mul != 2 && Mul != 4)
3980 return false;
3981
3982 Mul >>= 1;
3983 return true;
3984}
3985
3986static bool ConvertOmodDiv(int64_t &Div) {
3987 if (Div == 1) {
3988 Div = 0;
3989 return true;
3990 }
3991
3992 if (Div == 2) {
3993 Div = 3;
3994 return true;
3995 }
3996
3997 return false;
3998}
3999
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004000static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4001 if (BoundCtrl == 0) {
4002 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004003 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00004004 }
4005
4006 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004007 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004008 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004009 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004010
Tom Stellard45bb48e2015-06-13 03:28:10 +00004011 return false;
4012}
4013
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004014// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00004015static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4016 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4017 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4018 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4019 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4020 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4021 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
4022 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
David Stuttard70e8bc12017-06-22 16:29:22 +00004023 {"dfmt", AMDGPUOperand::ImmTyDFMT, false, nullptr},
4024 {"nfmt", AMDGPUOperand::ImmTyNFMT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004025 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4026 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4027 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004028 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004029 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4030 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4031 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4032 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
4033 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
4034 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
4035 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4036 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4037 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4038 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00004039 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4040 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4041 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004042 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004043 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004044 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004045 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4046 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4047 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4048 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004049};
Tom Stellard45bb48e2015-06-13 03:28:10 +00004050
Alex Bradbury58eba092016-11-01 16:32:05 +00004051OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00004052 OperandMatchResultTy res;
4053 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4054 // try to parse any optional operand here
4055 if (Op.IsBit) {
4056 res = parseNamedBit(Op.Name, Operands, Op.Type);
4057 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4058 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004059 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4060 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4061 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4062 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00004063 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4064 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004065 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4066 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4067 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4068 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4069 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4070 Op.ConvertResult);
Sam Kolton11de3702016-05-24 12:38:33 +00004071 } else {
4072 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4073 }
4074 if (res != MatchOperand_NoMatch) {
4075 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004076 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004077 }
4078 return MatchOperand_NoMatch;
4079}
4080
Matt Arsenault12c53892016-11-15 19:58:54 +00004081OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004082 StringRef Name = Parser.getTok().getString();
4083 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00004084 return parseIntWithPrefix("mul", Operands,
4085 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004086 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004087
4088 if (Name == "div") {
4089 return parseIntWithPrefix("div", Operands,
4090 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4091 }
4092
4093 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004094}
4095
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004096void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4097 cvtVOP3P(Inst, Operands);
4098
4099 int Opc = Inst.getOpcode();
4100
4101 int SrcNum;
4102 const int Ops[] = { AMDGPU::OpName::src0,
4103 AMDGPU::OpName::src1,
4104 AMDGPU::OpName::src2 };
4105 for (SrcNum = 0;
4106 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4107 ++SrcNum);
4108 assert(SrcNum > 0);
4109
4110 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4111 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4112
4113 if ((OpSel & (1 << SrcNum)) != 0) {
4114 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4115 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4116 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4117 }
4118}
4119
Sam Koltona3ec5c12016-10-07 14:46:06 +00004120static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4121 // 1. This operand is input modifiers
4122 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4123 // 2. This is not last operand
4124 && Desc.NumOperands > (OpNum + 1)
4125 // 3. Next operand is register class
4126 && Desc.OpInfo[OpNum + 1].RegClass != -1
4127 // 4. Next register is not tied to any other operand
4128 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4129}
4130
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004131void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands) {
4132
4133 OptionalImmIndexMap OptionalIdx;
4134 unsigned Opc = Inst.getOpcode();
4135
4136 unsigned I = 1;
4137 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4138 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4139 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4140 }
4141
4142 for (unsigned E = Operands.size(); I != E; ++I) {
4143 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4144 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4145 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4146 } else if (Op.isInterpSlot() ||
4147 Op.isInterpAttr() ||
4148 Op.isAttrChan()) {
4149 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4150 } else if (Op.isImmModifier()) {
4151 OptionalIdx[Op.getImmTy()] = I;
4152 } else {
4153 llvm_unreachable("unhandled operand type");
4154 }
4155 }
4156
4157 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4158 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4159 }
4160
4161 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4162 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4163 }
4164
4165 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4166 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4167 }
4168}
4169
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004170void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4171 OptionalImmIndexMap &OptionalIdx) {
4172 unsigned Opc = Inst.getOpcode();
4173
Tom Stellarda90b9522016-02-11 03:28:15 +00004174 unsigned I = 1;
4175 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00004176 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004177 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00004178 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004179
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004180 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4181 // This instruction has src modifiers
4182 for (unsigned E = Operands.size(); I != E; ++I) {
4183 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4184 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4185 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4186 } else if (Op.isImmModifier()) {
4187 OptionalIdx[Op.getImmTy()] = I;
4188 } else if (Op.isRegOrImm()) {
4189 Op.addRegOrImmOperands(Inst, 1);
4190 } else {
4191 llvm_unreachable("unhandled operand type");
4192 }
4193 }
4194 } else {
4195 // No src modifiers
4196 for (unsigned E = Operands.size(); I != E; ++I) {
4197 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4198 if (Op.isMod()) {
4199 OptionalIdx[Op.getImmTy()] = I;
4200 } else {
4201 Op.addRegOrImmOperands(Inst, 1);
4202 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004203 }
Tom Stellarda90b9522016-02-11 03:28:15 +00004204 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004205
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004206 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4207 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4208 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004209
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004210 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4211 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4212 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00004213
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004214 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004215 // it has src2 register operand that is tied to dst operand
4216 // we don't allow modifiers for this operand in assembler so src2_modifiers
4217 // should be 0
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004218 if (Opc == AMDGPU::V_MAC_F32_e64_si || Opc == AMDGPU::V_MAC_F32_e64_vi ||
4219 Opc == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004220 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004221 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004222 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
4223 ++it;
4224 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4225 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004226}
4227
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004228void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00004229 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004230 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00004231}
4232
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004233void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
4234 OptionalImmIndexMap OptIdx;
4235
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004236 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004237
4238 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
4239 // instruction, and then figure out where to actually put the modifiers
4240 int Opc = Inst.getOpcode();
4241
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004242 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004243
4244 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
4245 if (OpSelHiIdx != -1) {
4246 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi, -1);
4247 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004248
4249 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
4250 if (NegLoIdx != -1) {
4251 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
4252 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
4253 }
4254
4255 const int Ops[] = { AMDGPU::OpName::src0,
4256 AMDGPU::OpName::src1,
4257 AMDGPU::OpName::src2 };
4258 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
4259 AMDGPU::OpName::src1_modifiers,
4260 AMDGPU::OpName::src2_modifiers };
4261
4262 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004263
4264 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004265 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004266 unsigned NegLo = 0;
4267 unsigned NegHi = 0;
4268
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004269 if (OpSelHiIdx != -1) {
4270 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
4271 }
4272
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004273 if (NegLoIdx != -1) {
4274 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
4275 NegLo = Inst.getOperand(NegLoIdx).getImm();
4276 NegHi = Inst.getOperand(NegHiIdx).getImm();
4277 }
4278
4279 for (int J = 0; J < 3; ++J) {
4280 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
4281 if (OpIdx == -1)
4282 break;
4283
4284 uint32_t ModVal = 0;
4285
4286 if ((OpSel & (1 << J)) != 0)
4287 ModVal |= SISrcMods::OP_SEL_0;
4288
4289 if ((OpSelHi & (1 << J)) != 0)
4290 ModVal |= SISrcMods::OP_SEL_1;
4291
4292 if ((NegLo & (1 << J)) != 0)
4293 ModVal |= SISrcMods::NEG;
4294
4295 if ((NegHi & (1 << J)) != 0)
4296 ModVal |= SISrcMods::NEG_HI;
4297
4298 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
4299
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00004300 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004301 }
4302}
4303
Sam Koltondfa29f72016-03-09 12:29:31 +00004304//===----------------------------------------------------------------------===//
4305// dpp
4306//===----------------------------------------------------------------------===//
4307
4308bool AMDGPUOperand::isDPPCtrl() const {
4309 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
4310 if (result) {
4311 int64_t Imm = getImm();
4312 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
4313 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
4314 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
4315 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
4316 (Imm == 0x130) ||
4317 (Imm == 0x134) ||
4318 (Imm == 0x138) ||
4319 (Imm == 0x13c) ||
4320 (Imm == 0x140) ||
4321 (Imm == 0x141) ||
4322 (Imm == 0x142) ||
4323 (Imm == 0x143);
4324 }
4325 return false;
4326}
4327
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00004328bool AMDGPUOperand::isGPRIdxMode() const {
4329 return isImm() && isUInt<4>(getImm());
4330}
4331
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00004332bool AMDGPUOperand::isS16Imm() const {
4333 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
4334}
4335
4336bool AMDGPUOperand::isU16Imm() const {
4337 return isImm() && isUInt<16>(getImm());
4338}
4339
Alex Bradbury58eba092016-11-01 16:32:05 +00004340OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00004341AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00004342 SMLoc S = Parser.getTok().getLoc();
4343 StringRef Prefix;
4344 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00004345
Sam Koltona74cd522016-03-18 15:35:51 +00004346 if (getLexer().getKind() == AsmToken::Identifier) {
4347 Prefix = Parser.getTok().getString();
4348 } else {
4349 return MatchOperand_NoMatch;
4350 }
4351
4352 if (Prefix == "row_mirror") {
4353 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004354 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004355 } else if (Prefix == "row_half_mirror") {
4356 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004357 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004358 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00004359 // Check to prevent parseDPPCtrlOps from eating invalid tokens
4360 if (Prefix != "quad_perm"
4361 && Prefix != "row_shl"
4362 && Prefix != "row_shr"
4363 && Prefix != "row_ror"
4364 && Prefix != "wave_shl"
4365 && Prefix != "wave_rol"
4366 && Prefix != "wave_shr"
4367 && Prefix != "wave_ror"
4368 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00004369 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00004370 }
4371
Sam Koltona74cd522016-03-18 15:35:51 +00004372 Parser.Lex();
4373 if (getLexer().isNot(AsmToken::Colon))
4374 return MatchOperand_ParseFail;
4375
4376 if (Prefix == "quad_perm") {
4377 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00004378 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004379 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00004380 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004381 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00004382
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004383 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00004384 return MatchOperand_ParseFail;
4385
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004386 for (int i = 0; i < 3; ++i) {
4387 if (getLexer().isNot(AsmToken::Comma))
4388 return MatchOperand_ParseFail;
4389 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00004390
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004391 int64_t Temp;
4392 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
4393 return MatchOperand_ParseFail;
4394 const int shift = i*2 + 2;
4395 Int += (Temp << shift);
4396 }
Sam Koltona74cd522016-03-18 15:35:51 +00004397
Sam Koltona74cd522016-03-18 15:35:51 +00004398 if (getLexer().isNot(AsmToken::RBrac))
4399 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004400 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00004401
4402 } else {
4403 // sel:%d
4404 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004405 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00004406 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004407
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004408 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00004409 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004410 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00004411 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004412 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00004413 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004414 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004415 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004416 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004417 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004418 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004419 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00004420 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00004421 Int = 0x13C;
4422 } else if (Prefix == "row_bcast") {
4423 if (Int == 15) {
4424 Int = 0x142;
4425 } else if (Int == 31) {
4426 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00004427 } else {
4428 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004429 }
4430 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00004431 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00004432 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004433 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004434 }
Sam Koltona74cd522016-03-18 15:35:51 +00004435
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004436 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00004437 return MatchOperand_Success;
4438}
4439
Sam Kolton5f10a132016-05-06 11:31:17 +00004440AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004441 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00004442}
4443
Sam Kolton5f10a132016-05-06 11:31:17 +00004444AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004445 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00004446}
4447
Sam Kolton5f10a132016-05-06 11:31:17 +00004448AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004449 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00004450}
4451
4452void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00004453 OptionalImmIndexMap OptionalIdx;
4454
4455 unsigned I = 1;
4456 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4457 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4458 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4459 }
4460
4461 for (unsigned E = Operands.size(); I != E; ++I) {
4462 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4463 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00004464 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00004465 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00004466 // Skip it.
4467 continue;
4468 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00004469 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00004470 } else if (Op.isDPPCtrl()) {
4471 Op.addImmOperands(Inst, 1);
4472 } else if (Op.isImm()) {
4473 // Handle optional arguments
4474 OptionalIdx[Op.getImmTy()] = I;
4475 } else {
4476 llvm_unreachable("Invalid operand type");
4477 }
4478 }
4479
Sam Koltondfa29f72016-03-09 12:29:31 +00004480 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
4481 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
4482 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004483
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004484 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004485 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004486 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
4487 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004488 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004489 std::advance(
4490 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004491 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
4492 }
Sam Koltondfa29f72016-03-09 12:29:31 +00004493}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00004494
Sam Kolton3025e7f2016-04-26 13:33:56 +00004495//===----------------------------------------------------------------------===//
4496// sdwa
4497//===----------------------------------------------------------------------===//
4498
Alex Bradbury58eba092016-11-01 16:32:05 +00004499OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00004500AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
4501 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004502 using namespace llvm::AMDGPU::SDWA;
4503
Sam Kolton3025e7f2016-04-26 13:33:56 +00004504 SMLoc S = Parser.getTok().getLoc();
4505 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004506 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004507
Sam Kolton05ef1c92016-06-03 10:27:37 +00004508 res = parseStringWithPrefix(Prefix, Value);
4509 if (res != MatchOperand_Success) {
4510 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004511 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00004512
Sam Kolton3025e7f2016-04-26 13:33:56 +00004513 int64_t Int;
4514 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004515 .Case("BYTE_0", SdwaSel::BYTE_0)
4516 .Case("BYTE_1", SdwaSel::BYTE_1)
4517 .Case("BYTE_2", SdwaSel::BYTE_2)
4518 .Case("BYTE_3", SdwaSel::BYTE_3)
4519 .Case("WORD_0", SdwaSel::WORD_0)
4520 .Case("WORD_1", SdwaSel::WORD_1)
4521 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004522 .Default(0xffffffff);
4523 Parser.Lex(); // eat last token
4524
4525 if (Int == 0xffffffff) {
4526 return MatchOperand_ParseFail;
4527 }
4528
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004529 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004530 return MatchOperand_Success;
4531}
4532
Alex Bradbury58eba092016-11-01 16:32:05 +00004533OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00004534AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004535 using namespace llvm::AMDGPU::SDWA;
4536
Sam Kolton3025e7f2016-04-26 13:33:56 +00004537 SMLoc S = Parser.getTok().getLoc();
4538 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004539 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004540
4541 res = parseStringWithPrefix("dst_unused", Value);
4542 if (res != MatchOperand_Success) {
4543 return res;
4544 }
4545
4546 int64_t Int;
4547 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004548 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4549 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4550 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004551 .Default(0xffffffff);
4552 Parser.Lex(); // eat last token
4553
4554 if (Int == 0xffffffff) {
4555 return MatchOperand_ParseFail;
4556 }
4557
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004558 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004559 return MatchOperand_Success;
4560}
4561
Sam Kolton945231a2016-06-10 09:57:59 +00004562void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004563 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004564}
4565
Sam Kolton945231a2016-06-10 09:57:59 +00004566void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004567 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4568}
4569
Sam Koltonf7659d712017-05-23 10:08:55 +00004570void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
4571 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
4572}
4573
Sam Kolton5196b882016-07-01 09:59:21 +00004574void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00004575 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00004576}
4577
4578void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00004579 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00004580 using namespace llvm::AMDGPU::SDWA;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004581 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00004582 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004583
4584 unsigned I = 1;
4585 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4586 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4587 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4588 }
4589
4590 for (unsigned E = Operands.size(); I != E; ++I) {
4591 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00004592 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
4593 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
4594 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
4595 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
4596 // Skip VCC only if we didn't skip it on previous iteration.
4597 if (BasicInstType == SIInstrFlags::VOP2 &&
4598 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
4599 skippedVcc = true;
4600 continue;
4601 } else if (BasicInstType == SIInstrFlags::VOPC &&
4602 Inst.getNumOperands() == 0) {
4603 skippedVcc = true;
4604 continue;
4605 }
4606 }
4607 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00004608 Op.addRegWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004609 } else if (Op.isImm()) {
4610 // Handle optional arguments
4611 OptionalIdx[Op.getImmTy()] = I;
4612 } else {
4613 llvm_unreachable("Invalid operand type");
4614 }
Sam Koltonf7659d712017-05-23 10:08:55 +00004615 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004616 }
4617
Sam Koltonf7659d712017-05-23 10:08:55 +00004618 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
4619 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00004620 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00004621 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004622 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00004623 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00004624 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00004625 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4626 }
Sam Kolton9dffada2017-01-17 15:26:02 +00004627 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4628 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4629 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004630 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004631
4632 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00004633 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00004634 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00004635 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
4636 }
Sam Kolton9dffada2017-01-17 15:26:02 +00004637 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4638 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4639 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4640 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004641 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004642
4643 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00004644 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00004645 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4646 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004647 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004648
Sam Koltona3ec5c12016-10-07 14:46:06 +00004649 default:
4650 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
4651 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00004652 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00004653
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004654 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004655 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00004656 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
4657 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004658 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004659 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00004660 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004661 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00004662 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00004663}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00004664
Tom Stellard45bb48e2015-06-13 03:28:10 +00004665/// Force static initialization.
4666extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00004667 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
4668 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004669}
4670
4671#define GET_REGISTER_MATCHER
4672#define GET_MATCHER_IMPLEMENTATION
4673#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00004674
Sam Kolton11de3702016-05-24 12:38:33 +00004675// This fuction should be defined after auto-generated include so that we have
4676// MatchClassKind enum defined
4677unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
4678 unsigned Kind) {
4679 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00004680 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00004681 // operand. This method checks if we are given immediate operand but expect to
4682 // get corresponding token.
4683 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
4684 switch (Kind) {
4685 case MCK_addr64:
4686 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
4687 case MCK_gds:
4688 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
4689 case MCK_glc:
4690 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
4691 case MCK_idxen:
4692 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
4693 case MCK_offen:
4694 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004695 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00004696 // When operands have expression values, they will return true for isToken,
4697 // because it is not possible to distinguish between a token and an
4698 // expression at parse time. MatchInstructionImpl() will always try to
4699 // match an operand as a token, when isToken returns true, and when the
4700 // name of the expression is not a valid token, the match will fail,
4701 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004702 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
4703 case MCK_SSrcF32:
4704 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00004705 case MCK_SoppBrTarget:
4706 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004707 case MCK_VReg32OrOff:
4708 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004709 case MCK_InterpSlot:
4710 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
4711 case MCK_Attr:
4712 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
4713 case MCK_AttrChan:
4714 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004715 default:
4716 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00004717 }
4718}