blob: 440d0faae483290712b43047a9de23c96daa49f5 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000023#include "llvm/CodeGen/MachineValueType.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/MC/MCContext.h"
25#include "llvm/MC/MCExpr.h"
26#include "llvm/MC/MCInst.h"
27#include "llvm/MC/MCInstrInfo.h"
28#include "llvm/MC/MCParser/MCAsmLexer.h"
29#include "llvm/MC/MCParser/MCAsmParser.h"
30#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000031#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCRegisterInfo.h"
33#include "llvm/MC/MCStreamer.h"
34#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000036#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000037#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/Support/SourceMgr.h"
39#include "llvm/Support/TargetRegistry.h"
40#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000041#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000042
Tom Stellard45bb48e2015-06-13 03:28:10 +000043using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000044using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000045
46namespace {
47
Sam Kolton1eeb11b2016-09-09 14:44:04 +000048class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000049struct OptionalOperand;
50
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000051enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
52
Sam Kolton1eeb11b2016-09-09 14:44:04 +000053//===----------------------------------------------------------------------===//
54// Operand
55//===----------------------------------------------------------------------===//
56
Tom Stellard45bb48e2015-06-13 03:28:10 +000057class AMDGPUOperand : public MCParsedAsmOperand {
58 enum KindTy {
59 Token,
60 Immediate,
61 Register,
62 Expression
63 } Kind;
64
65 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000066 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
68public:
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069 AMDGPUOperand(enum KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
70 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000071
Sam Kolton5f10a132016-05-06 11:31:17 +000072 typedef std::unique_ptr<AMDGPUOperand> Ptr;
73
Sam Kolton945231a2016-06-10 09:57:59 +000074 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000075 bool Abs = false;
76 bool Neg = false;
77 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000078
79 bool hasFPModifiers() const { return Abs || Neg; }
80 bool hasIntModifiers() const { return Sext; }
81 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
82
83 int64_t getFPModifiersOperand() const {
84 int64_t Operand = 0;
85 Operand |= Abs ? SISrcMods::ABS : 0;
86 Operand |= Neg ? SISrcMods::NEG : 0;
87 return Operand;
88 }
89
90 int64_t getIntModifiersOperand() const {
91 int64_t Operand = 0;
92 Operand |= Sext ? SISrcMods::SEXT : 0;
93 return Operand;
94 }
95
96 int64_t getModifiersOperand() const {
97 assert(!(hasFPModifiers() && hasIntModifiers())
98 && "fp and int modifiers should not be used simultaneously");
99 if (hasFPModifiers()) {
100 return getFPModifiersOperand();
101 } else if (hasIntModifiers()) {
102 return getIntModifiersOperand();
103 } else {
104 return 0;
105 }
106 }
107
108 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
109 };
110
Tom Stellard45bb48e2015-06-13 03:28:10 +0000111 enum ImmTy {
112 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000114 ImmTyOffen,
115 ImmTyIdxen,
116 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000117 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000118 ImmTyOffset0,
119 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000120 ImmTyGLC,
121 ImmTySLC,
122 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000123 ImmTyClampSI,
124 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000125 ImmTyDppCtrl,
126 ImmTyDppRowMask,
127 ImmTyDppBankMask,
128 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000129 ImmTySdwaDstSel,
130 ImmTySdwaSrc0Sel,
131 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000132 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000133 ImmTyDMask,
134 ImmTyUNorm,
135 ImmTyDA,
136 ImmTyR128,
137 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000138 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000139 ImmTyExpCompr,
140 ImmTyExpVM,
Artem Tamazovd6468662016-04-25 14:13:51 +0000141 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000142 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000143 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000144 };
145
146 struct TokOp {
147 const char *Data;
148 unsigned Length;
149 };
150
151 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000153 ImmTy Type;
154 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000155 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000156 };
157
158 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000159 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000160 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000161 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000162 };
163
164 union {
165 TokOp Tok;
166 ImmOp Imm;
167 RegOp Reg;
168 const MCExpr *Expr;
169 };
170
Tom Stellard45bb48e2015-06-13 03:28:10 +0000171 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000172 if (Kind == Token)
173 return true;
174
175 if (Kind != Expression || !Expr)
176 return false;
177
178 // When parsing operands, we can't always tell if something was meant to be
179 // a token, like 'gds', or an expression that references a global variable.
180 // In this case, we assume the string is an expression, and if we need to
181 // interpret is a token, then we treat the symbol name as the token.
182 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000183 }
184
185 bool isImm() const override {
186 return Kind == Immediate;
187 }
188
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000189 bool isInlinableImm(MVT type) const;
190 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192 bool isRegKind() const {
193 return Kind == Register;
194 }
195
196 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000197 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000198 }
199
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000200 bool isRegOrImmWithInputMods(MVT type) const {
201 return isRegKind() || isInlinableImm(type);
202 }
203
204 bool isRegOrImmWithInt32InputMods() const {
205 return isRegOrImmWithInputMods(MVT::i32);
206 }
207
208 bool isRegOrImmWithInt64InputMods() const {
209 return isRegOrImmWithInputMods(MVT::i64);
210 }
211
212 bool isRegOrImmWithFP32InputMods() const {
213 return isRegOrImmWithInputMods(MVT::f32);
214 }
215
216 bool isRegOrImmWithFP64InputMods() const {
217 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000218 }
219
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000220 bool isVReg32OrOff() const {
221 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
222 }
223
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000224 bool isImmTy(ImmTy ImmT) const {
225 return isImm() && Imm.Type == ImmT;
226 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000227
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000228 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000229 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000230 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000231
Sam Kolton945231a2016-06-10 09:57:59 +0000232 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
233 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
234 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000235 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
236 bool isDA() const { return isImmTy(ImmTyDA); }
237 bool isR128() const { return isImmTy(ImmTyUNorm); }
238 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000239 bool isOff() const { return isImmTy(ImmTyOff); }
240 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000241 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
242 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000243 bool isOffen() const { return isImmTy(ImmTyOffen); }
244 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
245 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
246 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
247 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
248 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000249 bool isGDS() const { return isImmTy(ImmTyGDS); }
250 bool isGLC() const { return isImmTy(ImmTyGLC); }
251 bool isSLC() const { return isImmTy(ImmTySLC); }
252 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000253 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
254 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
255 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
256 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
257 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
258 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
259 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000260
Sam Kolton945231a2016-06-10 09:57:59 +0000261 bool isMod() const {
262 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000263 }
264
265 bool isRegOrImm() const {
266 return isReg() || isImm();
267 }
268
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000269 bool isRegClass(unsigned RCID) const;
270
271 bool isSCSrcB32() const {
272 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000273 }
274
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000275 bool isSCSrcB64() const {
276 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000277 }
278
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000279 bool isSCSrcF32() const {
280 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000281 }
282
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000283 bool isSCSrcF64() const {
284 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000285 }
286
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000287 bool isSSrcB32() const {
288 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
289 }
290
291 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000292 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
293 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000294 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000295 }
296
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000297 bool isSSrcF32() const {
298 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000299 }
300
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000301 bool isSSrcF64() const {
302 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000303 }
304
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000305 bool isVCSrcB32() const {
306 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000307 }
308
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000309 bool isVCSrcB64() const {
310 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64);
311 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000312
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000313 bool isVCSrcF32() const {
314 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32);
315 }
316
317 bool isVCSrcF64() const {
318 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64);
319 }
320
321 bool isVSrcB32() const {
322 return isVCSrcF32() || isLiteralImm(MVT::i32);
323 }
324
325 bool isVSrcB64() const {
326 return isVCSrcF64() || isLiteralImm(MVT::i64);
327 }
328
329 bool isVSrcF32() const {
330 return isVCSrcF32() || isLiteralImm(MVT::f32);
331 }
332
333 bool isVSrcF64() const {
334 return isVCSrcF64() || isLiteralImm(MVT::f64);
335 }
336
337 bool isKImmFP32() const {
338 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000339 }
340
341 bool isMem() const override {
342 return false;
343 }
344
345 bool isExpr() const {
346 return Kind == Expression;
347 }
348
349 bool isSoppBrTarget() const {
350 return isExpr() || isImm();
351 }
352
Sam Kolton945231a2016-06-10 09:57:59 +0000353 bool isSWaitCnt() const;
354 bool isHwreg() const;
355 bool isSendMsg() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000356 bool isSMRDOffset8() const;
357 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000358 bool isSMRDLiteralOffset() const;
359 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000360 bool isGPRIdxMode() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000361
Tom Stellard89049702016-06-15 02:54:14 +0000362 StringRef getExpressionAsToken() const {
363 assert(isExpr());
364 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
365 return S->getSymbol().getName();
366 }
367
368
Sam Kolton945231a2016-06-10 09:57:59 +0000369 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000370 assert(isToken());
371
372 if (Kind == Expression)
373 return getExpressionAsToken();
374
Sam Kolton945231a2016-06-10 09:57:59 +0000375 return StringRef(Tok.Data, Tok.Length);
376 }
377
378 int64_t getImm() const {
379 assert(isImm());
380 return Imm.Val;
381 }
382
383 enum ImmTy getImmTy() const {
384 assert(isImm());
385 return Imm.Type;
386 }
387
388 unsigned getReg() const override {
389 return Reg.RegNo;
390 }
391
Tom Stellard45bb48e2015-06-13 03:28:10 +0000392 SMLoc getStartLoc() const override {
393 return StartLoc;
394 }
395
Peter Collingbourne0da86302016-10-10 22:49:37 +0000396 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000397 return EndLoc;
398 }
399
Sam Kolton945231a2016-06-10 09:57:59 +0000400 Modifiers getModifiers() const {
401 assert(isRegKind() || isImmTy(ImmTyNone));
402 return isRegKind() ? Reg.Mods : Imm.Mods;
403 }
404
405 void setModifiers(Modifiers Mods) {
406 assert(isRegKind() || isImmTy(ImmTyNone));
407 if (isRegKind())
408 Reg.Mods = Mods;
409 else
410 Imm.Mods = Mods;
411 }
412
413 bool hasModifiers() const {
414 return getModifiers().hasModifiers();
415 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000416
Sam Kolton945231a2016-06-10 09:57:59 +0000417 bool hasFPModifiers() const {
418 return getModifiers().hasFPModifiers();
419 }
420
421 bool hasIntModifiers() const {
422 return getModifiers().hasIntModifiers();
423 }
424
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000425 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000426
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000427 void addLiteralImmOperand(MCInst &Inst, int64_t Val) const;
428
429 void addKImmFP32Operands(MCInst &Inst, unsigned N) const;
430
431 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000432
433 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
434 if (isRegKind())
435 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000436 else if (isExpr())
437 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000438 else
439 addImmOperands(Inst, N);
440 }
441
442 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
443 Modifiers Mods = getModifiers();
444 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
445 if (isRegKind()) {
446 addRegOperands(Inst, N);
447 } else {
448 addImmOperands(Inst, N, false);
449 }
450 }
451
452 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
453 assert(!hasIntModifiers());
454 addRegOrImmWithInputModsOperands(Inst, N);
455 }
456
457 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
458 assert(!hasFPModifiers());
459 addRegOrImmWithInputModsOperands(Inst, N);
460 }
461
462 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
463 if (isImm())
464 addImmOperands(Inst, N);
465 else {
466 assert(isExpr());
467 Inst.addOperand(MCOperand::createExpr(Expr));
468 }
469 }
470
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000471 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000472 switch (Type) {
473 case ImmTyNone: OS << "None"; break;
474 case ImmTyGDS: OS << "GDS"; break;
475 case ImmTyOffen: OS << "Offen"; break;
476 case ImmTyIdxen: OS << "Idxen"; break;
477 case ImmTyAddr64: OS << "Addr64"; break;
478 case ImmTyOffset: OS << "Offset"; break;
479 case ImmTyOffset0: OS << "Offset0"; break;
480 case ImmTyOffset1: OS << "Offset1"; break;
481 case ImmTyGLC: OS << "GLC"; break;
482 case ImmTySLC: OS << "SLC"; break;
483 case ImmTyTFE: OS << "TFE"; break;
484 case ImmTyClampSI: OS << "ClampSI"; break;
485 case ImmTyOModSI: OS << "OModSI"; break;
486 case ImmTyDppCtrl: OS << "DppCtrl"; break;
487 case ImmTyDppRowMask: OS << "DppRowMask"; break;
488 case ImmTyDppBankMask: OS << "DppBankMask"; break;
489 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000490 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
491 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
492 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000493 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
494 case ImmTyDMask: OS << "DMask"; break;
495 case ImmTyUNorm: OS << "UNorm"; break;
496 case ImmTyDA: OS << "DA"; break;
497 case ImmTyR128: OS << "R128"; break;
498 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000499 case ImmTyOff: OS << "Off"; break;
500 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000501 case ImmTyExpCompr: OS << "ExpCompr"; break;
502 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000503 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000504 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000505 }
506 }
507
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000508 void print(raw_ostream &OS) const override {
509 switch (Kind) {
510 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000511 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000512 break;
513 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000514 OS << '<' << getImm();
515 if (getImmTy() != ImmTyNone) {
516 OS << " type: "; printImmTy(OS, getImmTy());
517 }
Sam Kolton945231a2016-06-10 09:57:59 +0000518 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000519 break;
520 case Token:
521 OS << '\'' << getToken() << '\'';
522 break;
523 case Expression:
524 OS << "<expr " << *Expr << '>';
525 break;
526 }
527 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000528
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000529 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
530 int64_t Val, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000531 enum ImmTy Type = ImmTyNone,
532 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000533 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000534 Op->Imm.Val = Val;
535 Op->Imm.IsFPImm = IsFPImm;
536 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000537 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000538 Op->StartLoc = Loc;
539 Op->EndLoc = Loc;
540 return Op;
541 }
542
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000543 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
544 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000545 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000546 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000547 Res->Tok.Data = Str.data();
548 Res->Tok.Length = Str.size();
549 Res->StartLoc = Loc;
550 Res->EndLoc = Loc;
551 return Res;
552 }
553
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000554 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
555 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000556 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000557 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000558 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000559 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000560 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000561 Op->Reg.IsForcedVOP3 = ForceVOP3;
562 Op->StartLoc = S;
563 Op->EndLoc = E;
564 return Op;
565 }
566
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000567 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
568 const class MCExpr *Expr, SMLoc S) {
569 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000570 Op->Expr = Expr;
571 Op->StartLoc = S;
572 Op->EndLoc = S;
573 return Op;
574 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000575};
576
Sam Kolton945231a2016-06-10 09:57:59 +0000577raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
578 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
579 return OS;
580}
581
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000582//===----------------------------------------------------------------------===//
583// AsmParser
584//===----------------------------------------------------------------------===//
585
Tom Stellard45bb48e2015-06-13 03:28:10 +0000586class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000587 const MCInstrInfo &MII;
588 MCAsmParser &Parser;
589
590 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000591 bool ForcedDPP;
592 bool ForcedSDWA;
Matt Arsenault68802d32015-11-05 03:11:27 +0000593
Tom Stellard45bb48e2015-06-13 03:28:10 +0000594 /// @name Auto-generated Match Functions
595 /// {
596
597#define GET_ASSEMBLER_HEADER
598#include "AMDGPUGenAsmMatcher.inc"
599
600 /// }
601
Tom Stellard347ac792015-06-26 21:15:07 +0000602private:
603 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
604 bool ParseDirectiveHSACodeObjectVersion();
605 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000606 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
607 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000608 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000609 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000610 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000611 bool ParseDirectiveAMDGPUHsaModuleGlobal();
612 bool ParseDirectiveAMDGPUHsaProgramGlobal();
613 bool ParseSectionDirectiveHSADataGlobalAgent();
614 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000615 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000616 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
617 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000618 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000619
Tom Stellard45bb48e2015-06-13 03:28:10 +0000620public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000621 enum AMDGPUMatchResultTy {
622 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
623 };
624
Akira Hatanakab11ef082015-11-14 06:35:56 +0000625 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000626 const MCInstrInfo &MII,
627 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000628 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000629 ForcedEncodingSize(0),
630 ForcedDPP(false),
631 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000632 MCAsmParserExtension::Initialize(Parser);
633
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000634 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000635 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000636 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000637 }
638
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000639 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000640
641 {
642 // TODO: make those pre-defined variables read-only.
643 // Currently there is none suitable machinery in the core llvm-mc for this.
644 // MCSymbol::isRedefinable is intended for another purpose, and
645 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
646 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
647 MCContext &Ctx = getContext();
648 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
649 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
650 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
651 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
652 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
653 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
654 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000655 }
656
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000657 bool isSI() const {
658 return AMDGPU::isSI(getSTI());
659 }
660
661 bool isCI() const {
662 return AMDGPU::isCI(getSTI());
663 }
664
665 bool isVI() const {
666 return AMDGPU::isVI(getSTI());
667 }
668
Matt Arsenault26faed32016-12-05 22:26:17 +0000669 bool hasInv2PiInlineImm() const {
670 return getSTI().getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
671 }
672
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000673 bool hasSGPR102_SGPR103() const {
674 return !isVI();
675 }
676
Tom Stellard347ac792015-06-26 21:15:07 +0000677 AMDGPUTargetStreamer &getTargetStreamer() {
678 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
679 return static_cast<AMDGPUTargetStreamer &>(TS);
680 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000681
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000682 const MCRegisterInfo *getMRI() const {
683 // We need this const_cast because for some reason getContext() is not const
684 // in MCAsmParser.
685 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
686 }
687
688 const MCInstrInfo *getMII() const {
689 return &MII;
690 }
691
Sam Kolton05ef1c92016-06-03 10:27:37 +0000692 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
693 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
694 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000695
Sam Kolton05ef1c92016-06-03 10:27:37 +0000696 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
697 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
698 bool isForcedDPP() const { return ForcedDPP; }
699 bool isForcedSDWA() const { return ForcedSDWA; }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000700
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000701 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000702 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
703 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000704 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
705 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000706 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
707 OperandVector &Operands, MCStreamer &Out,
708 uint64_t &ErrorInfo,
709 bool MatchingInlineAsm) override;
710 bool ParseDirective(AsmToken DirectiveID) override;
711 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000712 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000713 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
714 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000715 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000716
Sam Kolton11de3702016-05-24 12:38:33 +0000717 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000718 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
719 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000720 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000721 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000722 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000723 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000724 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000725
Sam Kolton1bdcef72016-05-23 09:59:02 +0000726 OperandMatchResultTy parseImm(OperandVector &Operands);
727 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000728 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
729 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000730 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000731
Tom Stellard45bb48e2015-06-13 03:28:10 +0000732 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
733 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000734 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000735
736 bool parseCnt(int64_t &IntVal);
737 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000738 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000739
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000740private:
741 struct OperandInfoTy {
742 int64_t Id;
743 bool IsSymbolic;
744 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
745 };
Sam Kolton11de3702016-05-24 12:38:33 +0000746
Artem Tamazov6edc1352016-05-26 17:00:33 +0000747 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
748 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000749
750 void errorExpTgt();
751 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
752
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000753public:
Sam Kolton11de3702016-05-24 12:38:33 +0000754 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
755
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000756 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000757 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000758 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
759
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000760 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
761 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
762 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000763 AMDGPUOperand::Ptr defaultGLC() const;
764 AMDGPUOperand::Ptr defaultSLC() const;
765 AMDGPUOperand::Ptr defaultTFE() const;
766
Sam Kolton5f10a132016-05-06 11:31:17 +0000767 AMDGPUOperand::Ptr defaultDMask() const;
768 AMDGPUOperand::Ptr defaultUNorm() const;
769 AMDGPUOperand::Ptr defaultDA() const;
770 AMDGPUOperand::Ptr defaultR128() const;
771 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000772 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
773 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +0000774 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000775 AMDGPUOperand::Ptr defaultExpTgt() const;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000776 AMDGPUOperand::Ptr defaultExpCompr() const;
777 AMDGPUOperand::Ptr defaultExpVM() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000778
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000779 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
780
Tom Stellarda90b9522016-02-11 03:28:15 +0000781 void cvtId(MCInst &Inst, const OperandVector &Operands);
782 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000783 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000784
785 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000786 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000787
Sam Kolton11de3702016-05-24 12:38:33 +0000788 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000789 AMDGPUOperand::Ptr defaultRowMask() const;
790 AMDGPUOperand::Ptr defaultBankMask() const;
791 AMDGPUOperand::Ptr defaultBoundCtrl() const;
792 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000793
Sam Kolton05ef1c92016-06-03 10:27:37 +0000794 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
795 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000796 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000797 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
798 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000799 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
800 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
801 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000802};
803
804struct OptionalOperand {
805 const char *Name;
806 AMDGPUOperand::ImmTy Type;
807 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000808 bool (*ConvertResult)(int64_t&);
809};
810
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000811// May be called with integer type with equivalent bitwidth.
812static const fltSemantics *getFltSemantics(MVT VT) {
813 switch (VT.getSizeInBits()) {
814 case 32:
815 return &APFloat::IEEEsingle;
816 case 64:
817 return &APFloat::IEEEdouble;
818 case 16:
819 return &APFloat::IEEEhalf;
820 default:
821 llvm_unreachable("unsupported fp type");
822 }
823}
824
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000825}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000826
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000827//===----------------------------------------------------------------------===//
828// Operand
829//===----------------------------------------------------------------------===//
830
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000831static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
832 bool Lost;
833
834 // Convert literal to single precision
835 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
836 APFloat::rmNearestTiesToEven,
837 &Lost);
838 // We allow precision lost but not overflow or underflow
839 if (Status != APFloat::opOK &&
840 Lost &&
841 ((Status & APFloat::opOverflow) != 0 ||
842 (Status & APFloat::opUnderflow) != 0)) {
843 return false;
844 }
845
846 return true;
847}
848
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000849bool AMDGPUOperand::isInlinableImm(MVT type) const {
850 if (!isImmTy(ImmTyNone)) {
851 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
852 return false;
853 }
854 // TODO: We should avoid using host float here. It would be better to
855 // check the float bit values which is what a few other places do.
856 // We've had bot failures before due to weird NaN support on mips hosts.
857
858 APInt Literal(64, Imm.Val);
859
860 if (Imm.IsFPImm) { // We got fp literal token
861 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +0000862 return AMDGPU::isInlinableLiteral64(Imm.Val,
863 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000864 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000865
866 APFloat FPLiteral(APFloat::IEEEdouble, APInt(64, Imm.Val));
867 if (!canLosslesslyConvertToFPType(FPLiteral, type))
868 return false;
869
870 // Check if single precision literal is inlinable
871 return AMDGPU::isInlinableLiteral32(
872 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +0000873 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000874 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000875
876
877 // We got int literal token.
878 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +0000879 return AMDGPU::isInlinableLiteral64(Imm.Val,
880 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000881 }
882
883 return AMDGPU::isInlinableLiteral32(
884 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +0000885 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000886}
887
888bool AMDGPUOperand::isLiteralImm(MVT type) const {
889 // Check that this imediate can be added as literal
890 if (!isImmTy(ImmTyNone)) {
891 return false;
892 }
893
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000894 if (!Imm.IsFPImm) {
895 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000896
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000897 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
898 // types.
899 return isUInt<32>(Imm.Val) || isInt<32>(Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000900 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000901
902 // We got fp literal token
903 if (type == MVT::f64) { // Expected 64-bit fp operand
904 // We would set low 64-bits of literal to zeroes but we accept this literals
905 return true;
906 }
907
908 if (type == MVT::i64) { // Expected 64-bit int operand
909 // We don't allow fp literals in 64-bit integer instructions. It is
910 // unclear how we should encode them.
911 return false;
912 }
913
914 APFloat FPLiteral(APFloat::IEEEdouble, APInt(64, Imm.Val));
915 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000916}
917
918bool AMDGPUOperand::isRegClass(unsigned RCID) const {
919 return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
920}
921
922void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
923 int64_t Val = Imm.Val;
924 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers() && Imm.Mods.Neg) {
925 // Apply modifiers to immediate value. Only negate can get here
926 if (Imm.IsFPImm) {
927 APFloat F(BitsToDouble(Val));
928 F.changeSign();
929 Val = F.bitcastToAPInt().getZExtValue();
930 } else {
931 Val = -Val;
932 }
933 }
934
935 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()), Inst.getNumOperands())) {
936 addLiteralImmOperand(Inst, Val);
937 } else {
938 Inst.addOperand(MCOperand::createImm(Val));
939 }
940}
941
942void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val) const {
943 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
944 auto OpNum = Inst.getNumOperands();
945 // Check that this operand accepts literals
946 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
947
948 APInt Literal(64, Val);
949 auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), InstDesc, OpNum); // expected operand size
950
951 if (Imm.IsFPImm) { // We got fp literal token
952 if (OpSize == 8) { // Expected 64-bit operand
953 // Check if literal is inlinable
Matt Arsenault26faed32016-12-05 22:26:17 +0000954 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
955 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000956 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
957 } else if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
958 // For fp operands we check if low 32 bits are zeros
959 if (Literal.getLoBits(32) != 0) {
960 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
961 "Can't encode literal as exact 64-bit"
962 " floating-point operand. Low 32-bits will be"
963 " set to zero");
964 }
965 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
966 } else {
967 // We don't allow fp literals in 64-bit integer instructions. It is
968 // unclear how we should encode them. This case should be checked earlier
969 // in predicate methods (isLiteralImm())
970 llvm_unreachable("fp literal in 64-bit integer instruction.");
971 }
972 } else { // Expected 32-bit operand
973 bool lost;
974 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
975 // Convert literal to single precision
976 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
977 // We allow precision lost but not overflow or underflow. This should be
978 // checked earlier in isLiteralImm()
979 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
980 }
981 } else { // We got int literal token
982 if (OpSize == 8) { // Expected 64-bit operand
983 auto LiteralVal = Literal.getZExtValue();
Matt Arsenault26faed32016-12-05 22:26:17 +0000984 if (AMDGPU::isInlinableLiteral64(LiteralVal,
985 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000986 Inst.addOperand(MCOperand::createImm(LiteralVal));
987 return;
988 }
989 } else { // Expected 32-bit operand
990 auto LiteralVal = static_cast<int32_t>(Literal.getLoBits(32).getZExtValue());
Matt Arsenault26faed32016-12-05 22:26:17 +0000991 if (AMDGPU::isInlinableLiteral32(LiteralVal,
992 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000993 Inst.addOperand(MCOperand::createImm(LiteralVal));
994 return;
995 }
996 }
997 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
998 }
999}
1000
1001void AMDGPUOperand::addKImmFP32Operands(MCInst &Inst, unsigned N) const {
1002 APInt Literal(64, Imm.Val);
1003 if (Imm.IsFPImm) { // We got fp literal
1004 bool lost;
1005 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
1006 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
1007 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
1008 } else { // We got int literal token
1009 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
1010 }
1011}
1012
1013void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1014 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1015}
1016
1017//===----------------------------------------------------------------------===//
1018// AsmParser
1019//===----------------------------------------------------------------------===//
1020
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001021static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1022 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001023 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001024 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001025 case 1: return AMDGPU::VGPR_32RegClassID;
1026 case 2: return AMDGPU::VReg_64RegClassID;
1027 case 3: return AMDGPU::VReg_96RegClassID;
1028 case 4: return AMDGPU::VReg_128RegClassID;
1029 case 8: return AMDGPU::VReg_256RegClassID;
1030 case 16: return AMDGPU::VReg_512RegClassID;
1031 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001032 } else if (Is == IS_TTMP) {
1033 switch (RegWidth) {
1034 default: return -1;
1035 case 1: return AMDGPU::TTMP_32RegClassID;
1036 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001037 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001038 }
1039 } else if (Is == IS_SGPR) {
1040 switch (RegWidth) {
1041 default: return -1;
1042 case 1: return AMDGPU::SGPR_32RegClassID;
1043 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001044 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001045 case 8: return AMDGPU::SReg_256RegClassID;
1046 case 16: return AMDGPU::SReg_512RegClassID;
1047 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001048 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001049 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001050}
1051
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001052static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001053 return StringSwitch<unsigned>(RegName)
1054 .Case("exec", AMDGPU::EXEC)
1055 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001056 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001057 .Case("m0", AMDGPU::M0)
1058 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001059 .Case("tba", AMDGPU::TBA)
1060 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001061 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1062 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001063 .Case("vcc_lo", AMDGPU::VCC_LO)
1064 .Case("vcc_hi", AMDGPU::VCC_HI)
1065 .Case("exec_lo", AMDGPU::EXEC_LO)
1066 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001067 .Case("tma_lo", AMDGPU::TMA_LO)
1068 .Case("tma_hi", AMDGPU::TMA_HI)
1069 .Case("tba_lo", AMDGPU::TBA_LO)
1070 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001071 .Default(0);
1072}
1073
1074bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001075 auto R = parseRegister();
1076 if (!R) return true;
1077 assert(R->isReg());
1078 RegNo = R->getReg();
1079 StartLoc = R->getStartLoc();
1080 EndLoc = R->getEndLoc();
1081 return false;
1082}
1083
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001084bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
1085{
1086 switch (RegKind) {
1087 case IS_SPECIAL:
1088 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
1089 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
1090 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
1091 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
1092 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
1093 return false;
1094 case IS_VGPR:
1095 case IS_SGPR:
1096 case IS_TTMP:
1097 if (Reg1 != Reg + RegWidth) { return false; }
1098 RegWidth++;
1099 return true;
1100 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001101 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001102 }
1103}
1104
1105bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
1106{
1107 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1108 if (getLexer().is(AsmToken::Identifier)) {
1109 StringRef RegName = Parser.getTok().getString();
1110 if ((Reg = getSpecialRegForName(RegName))) {
1111 Parser.Lex();
1112 RegKind = IS_SPECIAL;
1113 } else {
1114 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001115 if (RegName[0] == 'v') {
1116 RegNumIndex = 1;
1117 RegKind = IS_VGPR;
1118 } else if (RegName[0] == 's') {
1119 RegNumIndex = 1;
1120 RegKind = IS_SGPR;
1121 } else if (RegName.startswith("ttmp")) {
1122 RegNumIndex = strlen("ttmp");
1123 RegKind = IS_TTMP;
1124 } else {
1125 return false;
1126 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001127 if (RegName.size() > RegNumIndex) {
1128 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001129 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1130 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001131 Parser.Lex();
1132 RegWidth = 1;
1133 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001134 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001135 Parser.Lex();
1136 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001137 if (getLexer().isNot(AsmToken::LBrac))
1138 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001139 Parser.Lex();
1140
Artem Tamazovf88397c2016-06-03 14:41:17 +00001141 if (getParser().parseAbsoluteExpression(RegLo))
1142 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001143
Artem Tamazov7da9b822016-05-27 12:50:13 +00001144 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001145 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1146 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001147 Parser.Lex();
1148
Artem Tamazov7da9b822016-05-27 12:50:13 +00001149 if (isRBrace) {
1150 RegHi = RegLo;
1151 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001152 if (getParser().parseAbsoluteExpression(RegHi))
1153 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001154
Artem Tamazovf88397c2016-06-03 14:41:17 +00001155 if (getLexer().isNot(AsmToken::RBrac))
1156 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001157 Parser.Lex();
1158 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001159 RegNum = (unsigned) RegLo;
1160 RegWidth = (RegHi - RegLo) + 1;
1161 }
1162 }
1163 } else if (getLexer().is(AsmToken::LBrac)) {
1164 // List of consecutive registers: [s0,s1,s2,s3]
1165 Parser.Lex();
Artem Tamazovf88397c2016-06-03 14:41:17 +00001166 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
1167 return false;
1168 if (RegWidth != 1)
1169 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001170 RegisterKind RegKind1;
1171 unsigned Reg1, RegNum1, RegWidth1;
1172 do {
1173 if (getLexer().is(AsmToken::Comma)) {
1174 Parser.Lex();
1175 } else if (getLexer().is(AsmToken::RBrac)) {
1176 Parser.Lex();
1177 break;
1178 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001179 if (RegWidth1 != 1) {
1180 return false;
1181 }
1182 if (RegKind1 != RegKind) {
1183 return false;
1184 }
1185 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1186 return false;
1187 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001188 } else {
1189 return false;
1190 }
1191 } while (true);
1192 } else {
1193 return false;
1194 }
1195 switch (RegKind) {
1196 case IS_SPECIAL:
1197 RegNum = 0;
1198 RegWidth = 1;
1199 break;
1200 case IS_VGPR:
1201 case IS_SGPR:
1202 case IS_TTMP:
1203 {
1204 unsigned Size = 1;
1205 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1206 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
1207 Size = std::min(RegWidth, 4u);
1208 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001209 if (RegNum % Size != 0)
1210 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001211 RegNum = RegNum / Size;
1212 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001213 if (RCID == -1)
1214 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001215 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001216 if (RegNum >= RC.getNumRegs())
1217 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001218 Reg = RC.getRegister(RegNum);
1219 break;
1220 }
1221
1222 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001223 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001224 }
1225
Artem Tamazovf88397c2016-06-03 14:41:17 +00001226 if (!subtargetHasRegister(*TRI, Reg))
1227 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001228 return true;
1229}
1230
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001231std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001232 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001233 SMLoc StartLoc = Tok.getLoc();
1234 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001235 RegisterKind RegKind;
1236 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001237
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001238 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
1239 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001240 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001241 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001242}
1243
Alex Bradbury58eba092016-11-01 16:32:05 +00001244OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001245AMDGPUAsmParser::parseImm(OperandVector &Operands) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001246 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001247 bool Minus = false;
1248 if (getLexer().getKind() == AsmToken::Minus) {
1249 Minus = true;
1250 Parser.Lex();
1251 }
1252
1253 SMLoc S = Parser.getTok().getLoc();
1254 switch(getLexer().getKind()) {
1255 case AsmToken::Integer: {
1256 int64_t IntVal;
1257 if (getParser().parseAbsoluteExpression(IntVal))
1258 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001259 if (Minus)
1260 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001261 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001262 return MatchOperand_Success;
1263 }
1264 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001265 int64_t IntVal;
1266 if (getParser().parseAbsoluteExpression(IntVal))
1267 return MatchOperand_ParseFail;
1268
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001269 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001270 if (Minus)
1271 F.changeSign();
1272 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001273 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001274 AMDGPUOperand::ImmTyNone, true));
1275 return MatchOperand_Success;
1276 }
1277 default:
1278 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1279 }
1280}
1281
Alex Bradbury58eba092016-11-01 16:32:05 +00001282OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001283AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1284 auto res = parseImm(Operands);
1285 if (res != MatchOperand_NoMatch) {
1286 return res;
1287 }
1288
1289 if (auto R = parseRegister()) {
1290 assert(R->isReg());
1291 R->Reg.IsForcedVOP3 = isForcedVOP3();
1292 Operands.push_back(std::move(R));
1293 return MatchOperand_Success;
1294 }
1295 return MatchOperand_ParseFail;
1296}
1297
Alex Bradbury58eba092016-11-01 16:32:05 +00001298OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001299AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001300 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001301 // negate-modifier or negative immediate value.
1302 // By default we suppose it is modifier.
1303 bool Negate = false, Abs = false, Abs2 = false;
1304
1305 if (getLexer().getKind()== AsmToken::Minus) {
1306 Parser.Lex();
1307 Negate = true;
1308 }
1309
1310 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1311 Parser.Lex();
1312 Abs2 = true;
1313 if (getLexer().isNot(AsmToken::LParen)) {
1314 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1315 return MatchOperand_ParseFail;
1316 }
1317 Parser.Lex();
1318 }
1319
1320 if (getLexer().getKind() == AsmToken::Pipe) {
1321 if (Abs2) {
1322 Error(Parser.getTok().getLoc(), "expected register or immediate");
1323 return MatchOperand_ParseFail;
1324 }
1325 Parser.Lex();
1326 Abs = true;
1327 }
1328
1329 auto Res = parseRegOrImm(Operands);
1330 if (Res != MatchOperand_Success) {
1331 return Res;
1332 }
1333
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001334 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001335 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001336 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001337 }
1338 if (Abs) {
1339 if (getLexer().getKind() != AsmToken::Pipe) {
1340 Error(Parser.getTok().getLoc(), "expected vertical bar");
1341 return MatchOperand_ParseFail;
1342 }
1343 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001344 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001345 }
1346 if (Abs2) {
1347 if (getLexer().isNot(AsmToken::RParen)) {
1348 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1349 return MatchOperand_ParseFail;
1350 }
1351 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001352 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001353 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001354
Sam Kolton945231a2016-06-10 09:57:59 +00001355 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001356 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001357 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001358 }
1359 return MatchOperand_Success;
1360}
1361
Alex Bradbury58eba092016-11-01 16:32:05 +00001362OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001363AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1364 bool Sext = false;
1365
1366 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1367 Parser.Lex();
1368 Sext = true;
1369 if (getLexer().isNot(AsmToken::LParen)) {
1370 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1371 return MatchOperand_ParseFail;
1372 }
1373 Parser.Lex();
1374 }
1375
1376 auto Res = parseRegOrImm(Operands);
1377 if (Res != MatchOperand_Success) {
1378 return Res;
1379 }
1380
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001381 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00001382 if (Sext) {
1383 if (getLexer().isNot(AsmToken::RParen)) {
1384 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1385 return MatchOperand_ParseFail;
1386 }
1387 Parser.Lex();
1388 Mods.Sext = true;
1389 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001390
Sam Kolton945231a2016-06-10 09:57:59 +00001391 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001392 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001393 Op.setModifiers(Mods);
1394 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001395
Sam Kolton945231a2016-06-10 09:57:59 +00001396 return MatchOperand_Success;
1397}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001398
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001399OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1400 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1401 if (Reg) {
1402 Operands.push_back(std::move(Reg));
1403 return MatchOperand_Success;
1404 }
1405
1406 const AsmToken &Tok = Parser.getTok();
1407 if (Tok.getString() == "off") {
1408 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1409 AMDGPUOperand::ImmTyOff, false));
1410 Parser.Lex();
1411 return MatchOperand_Success;
1412 }
1413
1414 return MatchOperand_NoMatch;
1415}
1416
Tom Stellard45bb48e2015-06-13 03:28:10 +00001417unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1418
1419 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1420
1421 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001422 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1423 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1424 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001425 return Match_InvalidOperand;
1426
Tom Stellard88e0b252015-10-06 15:57:53 +00001427 if ((TSFlags & SIInstrFlags::VOP3) &&
1428 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1429 getForcedEncodingSize() != 64)
1430 return Match_PreferE32;
1431
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001432 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa ||
1433 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001434 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001435 auto OpNum =
1436 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001437 const auto &Op = Inst.getOperand(OpNum);
1438 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1439 return Match_InvalidOperand;
1440 }
1441 }
1442
Tom Stellard45bb48e2015-06-13 03:28:10 +00001443 return Match_Success;
1444}
1445
Tom Stellard45bb48e2015-06-13 03:28:10 +00001446bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1447 OperandVector &Operands,
1448 MCStreamer &Out,
1449 uint64_t &ErrorInfo,
1450 bool MatchingInlineAsm) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001451 // What asm variants we should check
1452 std::vector<unsigned> MatchedVariants;
1453 if (getForcedEncodingSize() == 32) {
1454 MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
1455 } else if (isForcedVOP3()) {
1456 MatchedVariants = {AMDGPUAsmVariants::VOP3};
1457 } else if (isForcedSDWA()) {
1458 MatchedVariants = {AMDGPUAsmVariants::SDWA};
1459 } else if (isForcedDPP()) {
1460 MatchedVariants = {AMDGPUAsmVariants::DPP};
1461 } else {
1462 MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
1463 AMDGPUAsmVariants::VOP3,
1464 AMDGPUAsmVariants::SDWA,
1465 AMDGPUAsmVariants::DPP};
1466 }
1467
Tom Stellard45bb48e2015-06-13 03:28:10 +00001468 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001469 unsigned Result = Match_Success;
1470 for (auto Variant : MatchedVariants) {
1471 uint64_t EI;
1472 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1473 Variant);
1474 // We order match statuses from least to most specific. We use most specific
1475 // status as resulting
1476 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1477 if ((R == Match_Success) ||
1478 (R == Match_PreferE32) ||
1479 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1480 (R == Match_InvalidOperand && Result != Match_MissingFeature
1481 && Result != Match_PreferE32) ||
1482 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1483 && Result != Match_MissingFeature
1484 && Result != Match_PreferE32)) {
1485 Result = R;
1486 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001487 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001488 if (R == Match_Success)
1489 break;
1490 }
1491
1492 switch (Result) {
1493 default: break;
1494 case Match_Success:
1495 Inst.setLoc(IDLoc);
1496 Out.EmitInstruction(Inst, getSTI());
1497 return false;
1498
1499 case Match_MissingFeature:
1500 return Error(IDLoc, "instruction not supported on this GPU");
1501
1502 case Match_MnemonicFail:
1503 return Error(IDLoc, "unrecognized instruction mnemonic");
1504
1505 case Match_InvalidOperand: {
1506 SMLoc ErrorLoc = IDLoc;
1507 if (ErrorInfo != ~0ULL) {
1508 if (ErrorInfo >= Operands.size()) {
1509 return Error(IDLoc, "too few operands for instruction");
1510 }
1511 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1512 if (ErrorLoc == SMLoc())
1513 ErrorLoc = IDLoc;
1514 }
1515 return Error(ErrorLoc, "invalid operand for instruction");
1516 }
1517
1518 case Match_PreferE32:
1519 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1520 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001521 }
1522 llvm_unreachable("Implement any new match types added!");
1523}
1524
Tom Stellard347ac792015-06-26 21:15:07 +00001525bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1526 uint32_t &Minor) {
1527 if (getLexer().isNot(AsmToken::Integer))
1528 return TokError("invalid major version");
1529
1530 Major = getLexer().getTok().getIntVal();
1531 Lex();
1532
1533 if (getLexer().isNot(AsmToken::Comma))
1534 return TokError("minor version number required, comma expected");
1535 Lex();
1536
1537 if (getLexer().isNot(AsmToken::Integer))
1538 return TokError("invalid minor version");
1539
1540 Minor = getLexer().getTok().getIntVal();
1541 Lex();
1542
1543 return false;
1544}
1545
1546bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1547
1548 uint32_t Major;
1549 uint32_t Minor;
1550
1551 if (ParseDirectiveMajorMinor(Major, Minor))
1552 return true;
1553
1554 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1555 return false;
1556}
1557
1558bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1559
1560 uint32_t Major;
1561 uint32_t Minor;
1562 uint32_t Stepping;
1563 StringRef VendorName;
1564 StringRef ArchName;
1565
1566 // If this directive has no arguments, then use the ISA version for the
1567 // targeted GPU.
1568 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001569 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001570 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1571 Isa.Stepping,
1572 "AMD", "AMDGPU");
1573 return false;
1574 }
1575
1576
1577 if (ParseDirectiveMajorMinor(Major, Minor))
1578 return true;
1579
1580 if (getLexer().isNot(AsmToken::Comma))
1581 return TokError("stepping version number required, comma expected");
1582 Lex();
1583
1584 if (getLexer().isNot(AsmToken::Integer))
1585 return TokError("invalid stepping version");
1586
1587 Stepping = getLexer().getTok().getIntVal();
1588 Lex();
1589
1590 if (getLexer().isNot(AsmToken::Comma))
1591 return TokError("vendor name required, comma expected");
1592 Lex();
1593
1594 if (getLexer().isNot(AsmToken::String))
1595 return TokError("invalid vendor name");
1596
1597 VendorName = getLexer().getTok().getStringContents();
1598 Lex();
1599
1600 if (getLexer().isNot(AsmToken::Comma))
1601 return TokError("arch name required, comma expected");
1602 Lex();
1603
1604 if (getLexer().isNot(AsmToken::String))
1605 return TokError("invalid arch name");
1606
1607 ArchName = getLexer().getTok().getStringContents();
1608 Lex();
1609
1610 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1611 VendorName, ArchName);
1612 return false;
1613}
1614
Tom Stellardff7416b2015-06-26 21:58:31 +00001615bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1616 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001617 SmallString<40> ErrStr;
1618 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001619 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001620 return TokError(Err.str());
1621 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001622 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001623 return false;
1624}
1625
1626bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1627
1628 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001629 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001630
1631 while (true) {
1632
Tom Stellardff7416b2015-06-26 21:58:31 +00001633 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1634 // will set the current token to EndOfStatement.
1635 while(getLexer().is(AsmToken::EndOfStatement))
1636 Lex();
1637
1638 if (getLexer().isNot(AsmToken::Identifier))
1639 return TokError("expected value identifier or .end_amd_kernel_code_t");
1640
1641 StringRef ID = getLexer().getTok().getIdentifier();
1642 Lex();
1643
1644 if (ID == ".end_amd_kernel_code_t")
1645 break;
1646
1647 if (ParseAMDKernelCodeTValue(ID, Header))
1648 return true;
1649 }
1650
1651 getTargetStreamer().EmitAMDKernelCodeT(Header);
1652
1653 return false;
1654}
1655
Tom Stellarde135ffd2015-09-25 21:41:28 +00001656bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1657 getParser().getStreamer().SwitchSection(
1658 AMDGPU::getHSATextSection(getContext()));
1659 return false;
1660}
1661
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001662bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1663 if (getLexer().isNot(AsmToken::Identifier))
1664 return TokError("expected symbol name");
1665
1666 StringRef KernelName = Parser.getTok().getString();
1667
1668 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1669 ELF::STT_AMDGPU_HSA_KERNEL);
1670 Lex();
1671 return false;
1672}
1673
Tom Stellard00f2f912015-12-02 19:47:57 +00001674bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1675 if (getLexer().isNot(AsmToken::Identifier))
1676 return TokError("expected symbol name");
1677
1678 StringRef GlobalName = Parser.getTok().getIdentifier();
1679
1680 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1681 Lex();
1682 return false;
1683}
1684
1685bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1686 if (getLexer().isNot(AsmToken::Identifier))
1687 return TokError("expected symbol name");
1688
1689 StringRef GlobalName = Parser.getTok().getIdentifier();
1690
1691 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1692 Lex();
1693 return false;
1694}
1695
1696bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1697 getParser().getStreamer().SwitchSection(
1698 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1699 return false;
1700}
1701
1702bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1703 getParser().getStreamer().SwitchSection(
1704 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1705 return false;
1706}
1707
Tom Stellard9760f032015-12-03 03:34:32 +00001708bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1709 getParser().getStreamer().SwitchSection(
1710 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1711 return false;
1712}
1713
Tom Stellard45bb48e2015-06-13 03:28:10 +00001714bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001715 StringRef IDVal = DirectiveID.getString();
1716
1717 if (IDVal == ".hsa_code_object_version")
1718 return ParseDirectiveHSACodeObjectVersion();
1719
1720 if (IDVal == ".hsa_code_object_isa")
1721 return ParseDirectiveHSACodeObjectISA();
1722
Tom Stellardff7416b2015-06-26 21:58:31 +00001723 if (IDVal == ".amd_kernel_code_t")
1724 return ParseDirectiveAMDKernelCodeT();
1725
Tom Stellardfcfaea42016-05-05 17:03:33 +00001726 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001727 return ParseSectionDirectiveHSAText();
1728
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001729 if (IDVal == ".amdgpu_hsa_kernel")
1730 return ParseDirectiveAMDGPUHsaKernel();
1731
Tom Stellard00f2f912015-12-02 19:47:57 +00001732 if (IDVal == ".amdgpu_hsa_module_global")
1733 return ParseDirectiveAMDGPUHsaModuleGlobal();
1734
1735 if (IDVal == ".amdgpu_hsa_program_global")
1736 return ParseDirectiveAMDGPUHsaProgramGlobal();
1737
1738 if (IDVal == ".hsadata_global_agent")
1739 return ParseSectionDirectiveHSADataGlobalAgent();
1740
1741 if (IDVal == ".hsadata_global_program")
1742 return ParseSectionDirectiveHSADataGlobalProgram();
1743
Tom Stellard9760f032015-12-03 03:34:32 +00001744 if (IDVal == ".hsarodata_readonly_agent")
1745 return ParseSectionDirectiveHSARodataReadonlyAgent();
1746
Tom Stellard45bb48e2015-06-13 03:28:10 +00001747 return true;
1748}
1749
Matt Arsenault68802d32015-11-05 03:11:27 +00001750bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1751 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001752 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001753 return true;
1754
Matt Arsenault3b159672015-12-01 20:31:08 +00001755 if (isSI()) {
1756 // No flat_scr
1757 switch (RegNo) {
1758 case AMDGPU::FLAT_SCR:
1759 case AMDGPU::FLAT_SCR_LO:
1760 case AMDGPU::FLAT_SCR_HI:
1761 return false;
1762 default:
1763 return true;
1764 }
1765 }
1766
Matt Arsenault68802d32015-11-05 03:11:27 +00001767 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1768 // SI/CI have.
1769 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1770 R.isValid(); ++R) {
1771 if (*R == RegNo)
1772 return false;
1773 }
1774
1775 return true;
1776}
1777
Alex Bradbury58eba092016-11-01 16:32:05 +00001778OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001779AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1780
1781 // Try to parse with a custom parser
1782 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1783
1784 // If we successfully parsed the operand or if there as an error parsing,
1785 // we are done.
1786 //
1787 // If we are parsing after we reach EndOfStatement then this means we
1788 // are appending default values to the Operands list. This is only done
1789 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001790 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001791 getLexer().is(AsmToken::EndOfStatement))
1792 return ResTy;
1793
Sam Kolton1bdcef72016-05-23 09:59:02 +00001794 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001795
Sam Kolton1bdcef72016-05-23 09:59:02 +00001796 if (ResTy == MatchOperand_Success)
1797 return ResTy;
1798
1799 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00001800 // If this identifier is a symbol, we want to create an expression for it.
1801 // It is a little difficult to distinguish between a symbol name, and
1802 // an instruction flag like 'gds'. In order to do this, we parse
1803 // all tokens as expressions and then treate the symbol name as the token
1804 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001805 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00001806 SMLoc S = Tok.getLoc();
1807 const MCExpr *Expr = nullptr;
1808 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001809 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00001810 return MatchOperand_Success;
1811 }
1812
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001813 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001814 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001815 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001816 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001817 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001818}
1819
Sam Kolton05ef1c92016-06-03 10:27:37 +00001820StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1821 // Clear any forced encodings from the previous instruction.
1822 setForcedEncodingSize(0);
1823 setForcedDPP(false);
1824 setForcedSDWA(false);
1825
1826 if (Name.endswith("_e64")) {
1827 setForcedEncodingSize(64);
1828 return Name.substr(0, Name.size() - 4);
1829 } else if (Name.endswith("_e32")) {
1830 setForcedEncodingSize(32);
1831 return Name.substr(0, Name.size() - 4);
1832 } else if (Name.endswith("_dpp")) {
1833 setForcedDPP(true);
1834 return Name.substr(0, Name.size() - 4);
1835 } else if (Name.endswith("_sdwa")) {
1836 setForcedSDWA(true);
1837 return Name.substr(0, Name.size() - 5);
1838 }
1839 return Name;
1840}
1841
Tom Stellard45bb48e2015-06-13 03:28:10 +00001842bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1843 StringRef Name,
1844 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001845 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00001846 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001847 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00001848
Tom Stellard45bb48e2015-06-13 03:28:10 +00001849 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00001850 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001851
1852 // Eat the comma or space if there is one.
1853 if (getLexer().is(AsmToken::Comma))
1854 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00001855
Tom Stellard45bb48e2015-06-13 03:28:10 +00001856 switch (Res) {
1857 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001858 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001859 Error(getLexer().getLoc(), "failed parsing operand.");
1860 while (!getLexer().is(AsmToken::EndOfStatement)) {
1861 Parser.Lex();
1862 }
1863 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001864 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001865 Error(getLexer().getLoc(), "not a valid operand.");
1866 while (!getLexer().is(AsmToken::EndOfStatement)) {
1867 Parser.Lex();
1868 }
1869 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001870 }
1871 }
1872
Tom Stellard45bb48e2015-06-13 03:28:10 +00001873 return false;
1874}
1875
1876//===----------------------------------------------------------------------===//
1877// Utility functions
1878//===----------------------------------------------------------------------===//
1879
Alex Bradbury58eba092016-11-01 16:32:05 +00001880OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001881AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001882 switch(getLexer().getKind()) {
1883 default: return MatchOperand_NoMatch;
1884 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001885 StringRef Name = Parser.getTok().getString();
1886 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001887 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001888 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001889
1890 Parser.Lex();
1891 if (getLexer().isNot(AsmToken::Colon))
1892 return MatchOperand_ParseFail;
1893
1894 Parser.Lex();
1895 if (getLexer().isNot(AsmToken::Integer))
1896 return MatchOperand_ParseFail;
1897
1898 if (getParser().parseAbsoluteExpression(Int))
1899 return MatchOperand_ParseFail;
1900 break;
1901 }
1902 }
1903 return MatchOperand_Success;
1904}
1905
Alex Bradbury58eba092016-11-01 16:32:05 +00001906OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001907AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001908 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001909 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001910 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001911 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001912
Alex Bradbury58eba092016-11-01 16:32:05 +00001913 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001914 if (Res != MatchOperand_Success)
1915 return Res;
1916
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001917 if (ConvertResult && !ConvertResult(Value)) {
1918 return MatchOperand_ParseFail;
1919 }
1920
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001921 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001922 return MatchOperand_Success;
1923}
1924
Alex Bradbury58eba092016-11-01 16:32:05 +00001925OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001926AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001927 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001928 int64_t Bit = 0;
1929 SMLoc S = Parser.getTok().getLoc();
1930
1931 // We are at the end of the statement, and this is a default argument, so
1932 // use a default value.
1933 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1934 switch(getLexer().getKind()) {
1935 case AsmToken::Identifier: {
1936 StringRef Tok = Parser.getTok().getString();
1937 if (Tok == Name) {
1938 Bit = 1;
1939 Parser.Lex();
1940 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1941 Bit = 0;
1942 Parser.Lex();
1943 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001944 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001945 }
1946 break;
1947 }
1948 default:
1949 return MatchOperand_NoMatch;
1950 }
1951 }
1952
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001953 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001954 return MatchOperand_Success;
1955}
1956
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001957typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1958
Sam Koltona74cd522016-03-18 15:35:51 +00001959void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1960 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001961 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001962 auto i = OptionalIdx.find(ImmT);
1963 if (i != OptionalIdx.end()) {
1964 unsigned Idx = i->second;
1965 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1966 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001967 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001968 }
1969}
1970
Alex Bradbury58eba092016-11-01 16:32:05 +00001971OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00001972AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00001973 if (getLexer().isNot(AsmToken::Identifier)) {
1974 return MatchOperand_NoMatch;
1975 }
1976 StringRef Tok = Parser.getTok().getString();
1977 if (Tok != Prefix) {
1978 return MatchOperand_NoMatch;
1979 }
1980
1981 Parser.Lex();
1982 if (getLexer().isNot(AsmToken::Colon)) {
1983 return MatchOperand_ParseFail;
1984 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001985
Sam Kolton3025e7f2016-04-26 13:33:56 +00001986 Parser.Lex();
1987 if (getLexer().isNot(AsmToken::Identifier)) {
1988 return MatchOperand_ParseFail;
1989 }
1990
1991 Value = Parser.getTok().getString();
1992 return MatchOperand_Success;
1993}
1994
Tom Stellard45bb48e2015-06-13 03:28:10 +00001995//===----------------------------------------------------------------------===//
1996// ds
1997//===----------------------------------------------------------------------===//
1998
Tom Stellard45bb48e2015-06-13 03:28:10 +00001999void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2000 const OperandVector &Operands) {
2001
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002002 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002003
2004 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2005 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2006
2007 // Add the register arguments
2008 if (Op.isReg()) {
2009 Op.addRegOperands(Inst, 1);
2010 continue;
2011 }
2012
2013 // Handle optional arguments
2014 OptionalIdx[Op.getImmTy()] = i;
2015 }
2016
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002017 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2018 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002019 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002020
Tom Stellard45bb48e2015-06-13 03:28:10 +00002021 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2022}
2023
2024void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
2025
2026 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
2027 bool GDSOnly = false;
2028
2029 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2030 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2031
2032 // Add the register arguments
2033 if (Op.isReg()) {
2034 Op.addRegOperands(Inst, 1);
2035 continue;
2036 }
2037
2038 if (Op.isToken() && Op.getToken() == "gds") {
2039 GDSOnly = true;
2040 continue;
2041 }
2042
2043 // Handle optional arguments
2044 OptionalIdx[Op.getImmTy()] = i;
2045 }
2046
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002047 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
2048 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002049
2050 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002051 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002052 }
2053 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2054}
2055
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002056void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2057 OptionalImmIndexMap OptionalIdx;
2058
2059 unsigned EnMask = 0;
2060 int SrcIdx = 0;
2061
2062 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2063 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2064
2065 // Add the register arguments
2066 if (Op.isReg()) {
2067 EnMask |= (1 << SrcIdx);
2068 Op.addRegOperands(Inst, 1);
2069 ++SrcIdx;
2070 continue;
2071 }
2072
2073 if (Op.isOff()) {
2074 ++SrcIdx;
2075 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
2076 continue;
2077 }
2078
2079 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2080 Op.addImmOperands(Inst, 1);
2081 continue;
2082 }
2083
2084 if (Op.isToken() && Op.getToken() == "done")
2085 continue;
2086
2087 // Handle optional arguments
2088 OptionalIdx[Op.getImmTy()] = i;
2089 }
2090
2091 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2092 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2093
2094 Inst.addOperand(MCOperand::createImm(EnMask));
2095}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002096
2097//===----------------------------------------------------------------------===//
2098// s_waitcnt
2099//===----------------------------------------------------------------------===//
2100
2101bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2102 StringRef CntName = Parser.getTok().getString();
2103 int64_t CntVal;
2104
2105 Parser.Lex();
2106 if (getLexer().isNot(AsmToken::LParen))
2107 return true;
2108
2109 Parser.Lex();
2110 if (getLexer().isNot(AsmToken::Integer))
2111 return true;
2112
2113 if (getParser().parseAbsoluteExpression(CntVal))
2114 return true;
2115
2116 if (getLexer().isNot(AsmToken::RParen))
2117 return true;
2118
2119 Parser.Lex();
2120 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2121 Parser.Lex();
2122
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002123 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002124 if (CntName == "vmcnt")
2125 IntVal = encodeVmcnt(IV, IntVal, CntVal);
2126 else if (CntName == "expcnt")
2127 IntVal = encodeExpcnt(IV, IntVal, CntVal);
2128 else if (CntName == "lgkmcnt")
2129 IntVal = encodeLgkmcnt(IV, IntVal, CntVal);
2130 else
Tom Stellard45bb48e2015-06-13 03:28:10 +00002131 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002132
Tom Stellard45bb48e2015-06-13 03:28:10 +00002133 return false;
2134}
2135
Alex Bradbury58eba092016-11-01 16:32:05 +00002136OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002137AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002138 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
2139 int64_t Waitcnt = getWaitcntBitMask(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002140 SMLoc S = Parser.getTok().getLoc();
2141
2142 switch(getLexer().getKind()) {
2143 default: return MatchOperand_ParseFail;
2144 case AsmToken::Integer:
2145 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002146 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002147 return MatchOperand_ParseFail;
2148 break;
2149
2150 case AsmToken::Identifier:
2151 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002152 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002153 return MatchOperand_ParseFail;
2154 } while(getLexer().isNot(AsmToken::EndOfStatement));
2155 break;
2156 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002157 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002158 return MatchOperand_Success;
2159}
2160
Artem Tamazov6edc1352016-05-26 17:00:33 +00002161bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
2162 using namespace llvm::AMDGPU::Hwreg;
2163
Artem Tamazovd6468662016-04-25 14:13:51 +00002164 if (Parser.getTok().getString() != "hwreg")
2165 return true;
2166 Parser.Lex();
2167
2168 if (getLexer().isNot(AsmToken::LParen))
2169 return true;
2170 Parser.Lex();
2171
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002172 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002173 HwReg.IsSymbolic = true;
2174 HwReg.Id = ID_UNKNOWN_;
2175 const StringRef tok = Parser.getTok().getString();
2176 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2177 if (tok == IdSymbolic[i]) {
2178 HwReg.Id = i;
2179 break;
2180 }
2181 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002182 Parser.Lex();
2183 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002184 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002185 if (getLexer().isNot(AsmToken::Integer))
2186 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002187 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002188 return true;
2189 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002190
2191 if (getLexer().is(AsmToken::RParen)) {
2192 Parser.Lex();
2193 return false;
2194 }
2195
2196 // optional params
2197 if (getLexer().isNot(AsmToken::Comma))
2198 return true;
2199 Parser.Lex();
2200
2201 if (getLexer().isNot(AsmToken::Integer))
2202 return true;
2203 if (getParser().parseAbsoluteExpression(Offset))
2204 return true;
2205
2206 if (getLexer().isNot(AsmToken::Comma))
2207 return true;
2208 Parser.Lex();
2209
2210 if (getLexer().isNot(AsmToken::Integer))
2211 return true;
2212 if (getParser().parseAbsoluteExpression(Width))
2213 return true;
2214
2215 if (getLexer().isNot(AsmToken::RParen))
2216 return true;
2217 Parser.Lex();
2218
2219 return false;
2220}
2221
Alex Bradbury58eba092016-11-01 16:32:05 +00002222OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002223AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002224 using namespace llvm::AMDGPU::Hwreg;
2225
Artem Tamazovd6468662016-04-25 14:13:51 +00002226 int64_t Imm16Val = 0;
2227 SMLoc S = Parser.getTok().getLoc();
2228
2229 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002230 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002231 case AsmToken::Integer:
2232 // The operand can be an integer value.
2233 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002234 return MatchOperand_NoMatch;
2235 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002236 Error(S, "invalid immediate: only 16-bit values are legal");
2237 // Do not return error code, but create an imm operand anyway and proceed
2238 // to the next operand, if any. That avoids unneccessary error messages.
2239 }
2240 break;
2241
2242 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002243 OperandInfoTy HwReg(ID_UNKNOWN_);
2244 int64_t Offset = OFFSET_DEFAULT_;
2245 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2246 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002247 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002248 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2249 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002250 Error(S, "invalid symbolic name of hardware register");
2251 else
2252 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002253 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002254 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002255 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002256 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002257 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002258 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002259 }
2260 break;
2261 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002262 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00002263 return MatchOperand_Success;
2264}
2265
Tom Stellard45bb48e2015-06-13 03:28:10 +00002266bool AMDGPUOperand::isSWaitCnt() const {
2267 return isImm();
2268}
2269
Artem Tamazovd6468662016-04-25 14:13:51 +00002270bool AMDGPUOperand::isHwreg() const {
2271 return isImmTy(ImmTyHwreg);
2272}
2273
Artem Tamazov6edc1352016-05-26 17:00:33 +00002274bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002275 using namespace llvm::AMDGPU::SendMsg;
2276
2277 if (Parser.getTok().getString() != "sendmsg")
2278 return true;
2279 Parser.Lex();
2280
2281 if (getLexer().isNot(AsmToken::LParen))
2282 return true;
2283 Parser.Lex();
2284
2285 if (getLexer().is(AsmToken::Identifier)) {
2286 Msg.IsSymbolic = true;
2287 Msg.Id = ID_UNKNOWN_;
2288 const std::string tok = Parser.getTok().getString();
2289 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
2290 switch(i) {
2291 default: continue; // Omit gaps.
2292 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
2293 }
2294 if (tok == IdSymbolic[i]) {
2295 Msg.Id = i;
2296 break;
2297 }
2298 }
2299 Parser.Lex();
2300 } else {
2301 Msg.IsSymbolic = false;
2302 if (getLexer().isNot(AsmToken::Integer))
2303 return true;
2304 if (getParser().parseAbsoluteExpression(Msg.Id))
2305 return true;
2306 if (getLexer().is(AsmToken::Integer))
2307 if (getParser().parseAbsoluteExpression(Msg.Id))
2308 Msg.Id = ID_UNKNOWN_;
2309 }
2310 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
2311 return false;
2312
2313 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
2314 if (getLexer().isNot(AsmToken::RParen))
2315 return true;
2316 Parser.Lex();
2317 return false;
2318 }
2319
2320 if (getLexer().isNot(AsmToken::Comma))
2321 return true;
2322 Parser.Lex();
2323
2324 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2325 Operation.Id = ID_UNKNOWN_;
2326 if (getLexer().is(AsmToken::Identifier)) {
2327 Operation.IsSymbolic = true;
2328 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2329 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2330 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002331 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002332 for (int i = F; i < L; ++i) {
2333 if (Tok == S[i]) {
2334 Operation.Id = i;
2335 break;
2336 }
2337 }
2338 Parser.Lex();
2339 } else {
2340 Operation.IsSymbolic = false;
2341 if (getLexer().isNot(AsmToken::Integer))
2342 return true;
2343 if (getParser().parseAbsoluteExpression(Operation.Id))
2344 return true;
2345 }
2346
2347 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2348 // Stream id is optional.
2349 if (getLexer().is(AsmToken::RParen)) {
2350 Parser.Lex();
2351 return false;
2352 }
2353
2354 if (getLexer().isNot(AsmToken::Comma))
2355 return true;
2356 Parser.Lex();
2357
2358 if (getLexer().isNot(AsmToken::Integer))
2359 return true;
2360 if (getParser().parseAbsoluteExpression(StreamId))
2361 return true;
2362 }
2363
2364 if (getLexer().isNot(AsmToken::RParen))
2365 return true;
2366 Parser.Lex();
2367 return false;
2368}
2369
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002370void AMDGPUAsmParser::errorExpTgt() {
2371 Error(Parser.getTok().getLoc(), "invalid exp target");
2372}
2373
2374OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
2375 uint8_t &Val) {
2376 if (Str == "null") {
2377 Val = 9;
2378 return MatchOperand_Success;
2379 }
2380
2381 if (Str.startswith("mrt")) {
2382 Str = Str.drop_front(3);
2383 if (Str == "z") { // == mrtz
2384 Val = 8;
2385 return MatchOperand_Success;
2386 }
2387
2388 if (Str.getAsInteger(10, Val))
2389 return MatchOperand_ParseFail;
2390
2391 if (Val > 7)
2392 errorExpTgt();
2393
2394 return MatchOperand_Success;
2395 }
2396
2397 if (Str.startswith("pos")) {
2398 Str = Str.drop_front(3);
2399 if (Str.getAsInteger(10, Val))
2400 return MatchOperand_ParseFail;
2401
2402 if (Val > 3)
2403 errorExpTgt();
2404
2405 Val += 12;
2406 return MatchOperand_Success;
2407 }
2408
2409 if (Str.startswith("param")) {
2410 Str = Str.drop_front(5);
2411 if (Str.getAsInteger(10, Val))
2412 return MatchOperand_ParseFail;
2413
2414 if (Val >= 32)
2415 errorExpTgt();
2416
2417 Val += 32;
2418 return MatchOperand_Success;
2419 }
2420
2421 if (Str.startswith("invalid_target_")) {
2422 Str = Str.drop_front(15);
2423 if (Str.getAsInteger(10, Val))
2424 return MatchOperand_ParseFail;
2425
2426 errorExpTgt();
2427 return MatchOperand_Success;
2428 }
2429
2430 return MatchOperand_NoMatch;
2431}
2432
2433OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
2434 uint8_t Val;
2435 StringRef Str = Parser.getTok().getString();
2436
2437 auto Res = parseExpTgtImpl(Str, Val);
2438 if (Res != MatchOperand_Success)
2439 return Res;
2440
2441 SMLoc S = Parser.getTok().getLoc();
2442 Parser.Lex();
2443
2444 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
2445 AMDGPUOperand::ImmTyExpTgt));
2446 return MatchOperand_Success;
2447}
2448
Alex Bradbury58eba092016-11-01 16:32:05 +00002449OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002450AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2451 using namespace llvm::AMDGPU::SendMsg;
2452
2453 int64_t Imm16Val = 0;
2454 SMLoc S = Parser.getTok().getLoc();
2455
2456 switch(getLexer().getKind()) {
2457 default:
2458 return MatchOperand_NoMatch;
2459 case AsmToken::Integer:
2460 // The operand can be an integer value.
2461 if (getParser().parseAbsoluteExpression(Imm16Val))
2462 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002463 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002464 Error(S, "invalid immediate: only 16-bit values are legal");
2465 // Do not return error code, but create an imm operand anyway and proceed
2466 // to the next operand, if any. That avoids unneccessary error messages.
2467 }
2468 break;
2469 case AsmToken::Identifier: {
2470 OperandInfoTy Msg(ID_UNKNOWN_);
2471 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002472 int64_t StreamId = STREAM_ID_DEFAULT_;
2473 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2474 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002475 do {
2476 // Validate and encode message ID.
2477 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2478 || Msg.Id == ID_SYSMSG)) {
2479 if (Msg.IsSymbolic)
2480 Error(S, "invalid/unsupported symbolic name of message");
2481 else
2482 Error(S, "invalid/unsupported code of message");
2483 break;
2484 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002485 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002486 // Validate and encode operation ID.
2487 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2488 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2489 if (Operation.IsSymbolic)
2490 Error(S, "invalid symbolic name of GS_OP");
2491 else
2492 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2493 break;
2494 }
2495 if (Operation.Id == OP_GS_NOP
2496 && Msg.Id != ID_GS_DONE) {
2497 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2498 break;
2499 }
2500 Imm16Val |= (Operation.Id << OP_SHIFT_);
2501 }
2502 if (Msg.Id == ID_SYSMSG) {
2503 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2504 if (Operation.IsSymbolic)
2505 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2506 else
2507 Error(S, "invalid/unsupported code of SYSMSG_OP");
2508 break;
2509 }
2510 Imm16Val |= (Operation.Id << OP_SHIFT_);
2511 }
2512 // Validate and encode stream ID.
2513 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2514 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2515 Error(S, "invalid stream id: only 2-bit values are legal");
2516 break;
2517 }
2518 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2519 }
2520 } while (0);
2521 }
2522 break;
2523 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002524 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002525 return MatchOperand_Success;
2526}
2527
2528bool AMDGPUOperand::isSendMsg() const {
2529 return isImmTy(ImmTySendMsg);
2530}
2531
Tom Stellard45bb48e2015-06-13 03:28:10 +00002532//===----------------------------------------------------------------------===//
2533// sopp branch targets
2534//===----------------------------------------------------------------------===//
2535
Alex Bradbury58eba092016-11-01 16:32:05 +00002536OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002537AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2538 SMLoc S = Parser.getTok().getLoc();
2539
2540 switch (getLexer().getKind()) {
2541 default: return MatchOperand_ParseFail;
2542 case AsmToken::Integer: {
2543 int64_t Imm;
2544 if (getParser().parseAbsoluteExpression(Imm))
2545 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002546 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002547 return MatchOperand_Success;
2548 }
2549
2550 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002551 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00002552 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2553 Parser.getTok().getString()), getContext()), S));
2554 Parser.Lex();
2555 return MatchOperand_Success;
2556 }
2557}
2558
2559//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002560// mubuf
2561//===----------------------------------------------------------------------===//
2562
Sam Kolton5f10a132016-05-06 11:31:17 +00002563AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002564 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002565}
2566
2567AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002568 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002569}
2570
2571AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002572 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002573}
2574
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002575void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2576 const OperandVector &Operands,
2577 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002578 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002579 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002580
2581 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2582 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2583
2584 // Add the register arguments
2585 if (Op.isReg()) {
2586 Op.addRegOperands(Inst, 1);
2587 continue;
2588 }
2589
2590 // Handle the case where soffset is an immediate
2591 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2592 Op.addImmOperands(Inst, 1);
2593 continue;
2594 }
2595
2596 // Handle tokens like 'offen' which are sometimes hard-coded into the
2597 // asm string. There are no MCInst operands for these.
2598 if (Op.isToken()) {
2599 continue;
2600 }
2601 assert(Op.isImm());
2602
2603 // Handle optional arguments
2604 OptionalIdx[Op.getImmTy()] = i;
2605 }
2606
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002607 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2608 if (IsAtomicReturn) {
2609 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2610 Inst.insert(I, *I);
2611 }
2612
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002613 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002614 if (!IsAtomic) { // glc is hard-coded.
2615 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2616 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002617 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2618 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002619}
2620
2621//===----------------------------------------------------------------------===//
2622// mimg
2623//===----------------------------------------------------------------------===//
2624
Sam Kolton1bdcef72016-05-23 09:59:02 +00002625void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2626 unsigned I = 1;
2627 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2628 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2629 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2630 }
2631
2632 OptionalImmIndexMap OptionalIdx;
2633
2634 for (unsigned E = Operands.size(); I != E; ++I) {
2635 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2636
2637 // Add the register arguments
2638 if (Op.isRegOrImm()) {
2639 Op.addRegOrImmOperands(Inst, 1);
2640 continue;
2641 } else if (Op.isImmModifier()) {
2642 OptionalIdx[Op.getImmTy()] = I;
2643 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002644 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002645 }
2646 }
2647
2648 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2649 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2650 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2651 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2652 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2653 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2654 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2655 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2656}
2657
2658void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2659 unsigned I = 1;
2660 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2661 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2662 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2663 }
2664
2665 // Add src, same as dst
2666 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2667
2668 OptionalImmIndexMap OptionalIdx;
2669
2670 for (unsigned E = Operands.size(); I != E; ++I) {
2671 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2672
2673 // Add the register arguments
2674 if (Op.isRegOrImm()) {
2675 Op.addRegOrImmOperands(Inst, 1);
2676 continue;
2677 } else if (Op.isImmModifier()) {
2678 OptionalIdx[Op.getImmTy()] = I;
2679 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002680 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002681 }
2682 }
2683
2684 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2685 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2686 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2687 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2688 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2689 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2690 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2691 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2692}
2693
Sam Kolton5f10a132016-05-06 11:31:17 +00002694AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002695 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00002696}
2697
2698AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002699 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00002700}
2701
2702AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002703 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00002704}
2705
2706AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002707 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00002708}
2709
2710AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002711 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002712}
2713
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002714AMDGPUOperand::Ptr AMDGPUAsmParser::defaultExpTgt() const {
2715 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyExpTgt);
2716}
2717
Matt Arsenault8a63cb92016-12-05 20:31:49 +00002718AMDGPUOperand::Ptr AMDGPUAsmParser::defaultExpCompr() const {
2719 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyExpCompr);
2720}
2721
2722AMDGPUOperand::Ptr AMDGPUAsmParser::defaultExpVM() const {
2723 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyExpVM);
2724}
2725
Tom Stellard45bb48e2015-06-13 03:28:10 +00002726//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002727// smrd
2728//===----------------------------------------------------------------------===//
2729
Artem Tamazov54bfd542016-10-31 16:07:39 +00002730bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00002731 return isImm() && isUInt<8>(getImm());
2732}
2733
Artem Tamazov54bfd542016-10-31 16:07:39 +00002734bool AMDGPUOperand::isSMRDOffset20() const {
2735 return isImm() && isUInt<20>(getImm());
2736}
2737
Tom Stellard217361c2015-08-06 19:28:38 +00002738bool AMDGPUOperand::isSMRDLiteralOffset() const {
2739 // 32-bit literals are only supported on CI and we only want to use them
2740 // when the offset is > 8-bits.
2741 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2742}
2743
Artem Tamazov54bfd542016-10-31 16:07:39 +00002744AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
2745 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2746}
2747
2748AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002749 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002750}
2751
2752AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002753 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002754}
2755
Tom Stellard217361c2015-08-06 19:28:38 +00002756//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002757// vop3
2758//===----------------------------------------------------------------------===//
2759
2760static bool ConvertOmodMul(int64_t &Mul) {
2761 if (Mul != 1 && Mul != 2 && Mul != 4)
2762 return false;
2763
2764 Mul >>= 1;
2765 return true;
2766}
2767
2768static bool ConvertOmodDiv(int64_t &Div) {
2769 if (Div == 1) {
2770 Div = 0;
2771 return true;
2772 }
2773
2774 if (Div == 2) {
2775 Div = 3;
2776 return true;
2777 }
2778
2779 return false;
2780}
2781
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002782static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2783 if (BoundCtrl == 0) {
2784 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002785 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00002786 }
2787
2788 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002789 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002790 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002791 }
Matt Arsenault12c53892016-11-15 19:58:54 +00002792
Tom Stellard45bb48e2015-06-13 03:28:10 +00002793 return false;
2794}
2795
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002796// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002797static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2798 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2799 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2800 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2801 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2802 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2803 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2804 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2805 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2806 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2807 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2808 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2809 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2810 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2811 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2812 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2813 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2814 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2815 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2816 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2817 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00002818 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2819 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2820 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00002821 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002822 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002823};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002824
Alex Bradbury58eba092016-11-01 16:32:05 +00002825OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00002826 OperandMatchResultTy res;
2827 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2828 // try to parse any optional operand here
2829 if (Op.IsBit) {
2830 res = parseNamedBit(Op.Name, Operands, Op.Type);
2831 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2832 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002833 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2834 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2835 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2836 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00002837 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2838 res = parseSDWADstUnused(Operands);
2839 } else {
2840 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2841 }
2842 if (res != MatchOperand_NoMatch) {
2843 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002844 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002845 }
2846 return MatchOperand_NoMatch;
2847}
2848
Matt Arsenault12c53892016-11-15 19:58:54 +00002849OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002850 StringRef Name = Parser.getTok().getString();
2851 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00002852 return parseIntWithPrefix("mul", Operands,
2853 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002854 }
Matt Arsenault12c53892016-11-15 19:58:54 +00002855
2856 if (Name == "div") {
2857 return parseIntWithPrefix("div", Operands,
2858 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
2859 }
2860
2861 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002862}
2863
Tom Stellarda90b9522016-02-11 03:28:15 +00002864void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2865 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002866 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002867 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002868 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2869 }
2870 for (unsigned E = Operands.size(); I != E; ++I)
2871 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2872}
2873
2874void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002875 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2876 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002877 cvtVOP3(Inst, Operands);
2878 } else {
2879 cvtId(Inst, Operands);
2880 }
2881}
2882
Sam Koltona3ec5c12016-10-07 14:46:06 +00002883static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
2884 // 1. This operand is input modifiers
2885 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
2886 // 2. This is not last operand
2887 && Desc.NumOperands > (OpNum + 1)
2888 // 3. Next operand is register class
2889 && Desc.OpInfo[OpNum + 1].RegClass != -1
2890 // 4. Next register is not tied to any other operand
2891 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
2892}
2893
Tom Stellarda90b9522016-02-11 03:28:15 +00002894void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002895 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002896 unsigned I = 1;
2897 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002898 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002899 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002900 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002901
Tom Stellarda90b9522016-02-11 03:28:15 +00002902 for (unsigned E = Operands.size(); I != E; ++I) {
2903 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002904 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00002905 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002906 } else if (Op.isImm()) {
2907 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002908 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002909 llvm_unreachable("unhandled operand type");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002910 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002911 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002912
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002913 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2914 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002915
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002916 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00002917 // it has src2 register operand that is tied to dst operand
2918 // we don't allow modifiers for this operand in assembler so src2_modifiers
2919 // should be 0
2920 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002921 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
2922 Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002923 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002924 std::advance(
2925 it,
2926 AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
2927 AMDGPU::V_MAC_F16_e64 :
2928 AMDGPU::V_MAC_F32_e64,
2929 AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00002930 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
2931 ++it;
2932 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
2933 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002934}
2935
Sam Koltondfa29f72016-03-09 12:29:31 +00002936//===----------------------------------------------------------------------===//
2937// dpp
2938//===----------------------------------------------------------------------===//
2939
2940bool AMDGPUOperand::isDPPCtrl() const {
2941 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2942 if (result) {
2943 int64_t Imm = getImm();
2944 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2945 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2946 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2947 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2948 (Imm == 0x130) ||
2949 (Imm == 0x134) ||
2950 (Imm == 0x138) ||
2951 (Imm == 0x13c) ||
2952 (Imm == 0x140) ||
2953 (Imm == 0x141) ||
2954 (Imm == 0x142) ||
2955 (Imm == 0x143);
2956 }
2957 return false;
2958}
2959
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00002960bool AMDGPUOperand::isGPRIdxMode() const {
2961 return isImm() && isUInt<4>(getImm());
2962}
2963
Alex Bradbury58eba092016-11-01 16:32:05 +00002964OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002965AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002966 SMLoc S = Parser.getTok().getLoc();
2967 StringRef Prefix;
2968 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002969
Sam Koltona74cd522016-03-18 15:35:51 +00002970 if (getLexer().getKind() == AsmToken::Identifier) {
2971 Prefix = Parser.getTok().getString();
2972 } else {
2973 return MatchOperand_NoMatch;
2974 }
2975
2976 if (Prefix == "row_mirror") {
2977 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002978 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002979 } else if (Prefix == "row_half_mirror") {
2980 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002981 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002982 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002983 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2984 if (Prefix != "quad_perm"
2985 && Prefix != "row_shl"
2986 && Prefix != "row_shr"
2987 && Prefix != "row_ror"
2988 && Prefix != "wave_shl"
2989 && Prefix != "wave_rol"
2990 && Prefix != "wave_shr"
2991 && Prefix != "wave_ror"
2992 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002993 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002994 }
2995
Sam Koltona74cd522016-03-18 15:35:51 +00002996 Parser.Lex();
2997 if (getLexer().isNot(AsmToken::Colon))
2998 return MatchOperand_ParseFail;
2999
3000 if (Prefix == "quad_perm") {
3001 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00003002 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003003 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00003004 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003005 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003006
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003007 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00003008 return MatchOperand_ParseFail;
3009
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003010 for (int i = 0; i < 3; ++i) {
3011 if (getLexer().isNot(AsmToken::Comma))
3012 return MatchOperand_ParseFail;
3013 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003014
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003015 int64_t Temp;
3016 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
3017 return MatchOperand_ParseFail;
3018 const int shift = i*2 + 2;
3019 Int += (Temp << shift);
3020 }
Sam Koltona74cd522016-03-18 15:35:51 +00003021
Sam Koltona74cd522016-03-18 15:35:51 +00003022 if (getLexer().isNot(AsmToken::RBrac))
3023 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003024 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003025
3026 } else {
3027 // sel:%d
3028 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003029 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00003030 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003031
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003032 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003033 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003034 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003035 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003036 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003037 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003038 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003039 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003040 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003041 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003042 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003043 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003044 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003045 Int = 0x13C;
3046 } else if (Prefix == "row_bcast") {
3047 if (Int == 15) {
3048 Int = 0x142;
3049 } else if (Int == 31) {
3050 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00003051 } else {
3052 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003053 }
3054 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00003055 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003056 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003057 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003058 }
Sam Koltona74cd522016-03-18 15:35:51 +00003059
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003060 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00003061 return MatchOperand_Success;
3062}
3063
Sam Kolton5f10a132016-05-06 11:31:17 +00003064AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003065 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003066}
3067
Sam Kolton5f10a132016-05-06 11:31:17 +00003068AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003069 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003070}
3071
Sam Kolton5f10a132016-05-06 11:31:17 +00003072AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003073 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00003074}
3075
3076void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00003077 OptionalImmIndexMap OptionalIdx;
3078
3079 unsigned I = 1;
3080 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3081 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3082 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3083 }
3084
3085 for (unsigned E = Operands.size(); I != E; ++I) {
3086 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3087 // Add the register arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003088 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00003089 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00003090 } else if (Op.isDPPCtrl()) {
3091 Op.addImmOperands(Inst, 1);
3092 } else if (Op.isImm()) {
3093 // Handle optional arguments
3094 OptionalIdx[Op.getImmTy()] = I;
3095 } else {
3096 llvm_unreachable("Invalid operand type");
3097 }
3098 }
3099
Sam Koltondfa29f72016-03-09 12:29:31 +00003100 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
3101 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
3102 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003103
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003104 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003105 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003106 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
3107 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003108 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003109 std::advance(
3110 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003111 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
3112 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003113}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00003114
Sam Kolton3025e7f2016-04-26 13:33:56 +00003115//===----------------------------------------------------------------------===//
3116// sdwa
3117//===----------------------------------------------------------------------===//
3118
Alex Bradbury58eba092016-11-01 16:32:05 +00003119OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003120AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
3121 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003122 using namespace llvm::AMDGPU::SDWA;
3123
Sam Kolton3025e7f2016-04-26 13:33:56 +00003124 SMLoc S = Parser.getTok().getLoc();
3125 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00003126 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003127
Sam Kolton05ef1c92016-06-03 10:27:37 +00003128 res = parseStringWithPrefix(Prefix, Value);
3129 if (res != MatchOperand_Success) {
3130 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00003131 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003132
Sam Kolton3025e7f2016-04-26 13:33:56 +00003133 int64_t Int;
3134 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00003135 .Case("BYTE_0", SdwaSel::BYTE_0)
3136 .Case("BYTE_1", SdwaSel::BYTE_1)
3137 .Case("BYTE_2", SdwaSel::BYTE_2)
3138 .Case("BYTE_3", SdwaSel::BYTE_3)
3139 .Case("WORD_0", SdwaSel::WORD_0)
3140 .Case("WORD_1", SdwaSel::WORD_1)
3141 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00003142 .Default(0xffffffff);
3143 Parser.Lex(); // eat last token
3144
3145 if (Int == 0xffffffff) {
3146 return MatchOperand_ParseFail;
3147 }
3148
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003149 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00003150 return MatchOperand_Success;
3151}
3152
Alex Bradbury58eba092016-11-01 16:32:05 +00003153OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00003154AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003155 using namespace llvm::AMDGPU::SDWA;
3156
Sam Kolton3025e7f2016-04-26 13:33:56 +00003157 SMLoc S = Parser.getTok().getLoc();
3158 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00003159 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00003160
3161 res = parseStringWithPrefix("dst_unused", Value);
3162 if (res != MatchOperand_Success) {
3163 return res;
3164 }
3165
3166 int64_t Int;
3167 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00003168 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
3169 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
3170 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00003171 .Default(0xffffffff);
3172 Parser.Lex(); // eat last token
3173
3174 if (Int == 0xffffffff) {
3175 return MatchOperand_ParseFail;
3176 }
3177
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003178 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00003179 return MatchOperand_Success;
3180}
3181
Sam Kolton945231a2016-06-10 09:57:59 +00003182void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00003183 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003184}
3185
Sam Kolton945231a2016-06-10 09:57:59 +00003186void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00003187 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
3188}
3189
3190void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
3191 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003192}
3193
3194void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00003195 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003196 OptionalImmIndexMap OptionalIdx;
3197
3198 unsigned I = 1;
3199 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3200 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3201 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3202 }
3203
3204 for (unsigned E = Operands.size(); I != E; ++I) {
3205 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3206 // Add the register arguments
Sam Kolton5196b882016-07-01 09:59:21 +00003207 if (BasicInstType == SIInstrFlags::VOPC &&
3208 Op.isReg() &&
3209 Op.Reg.RegNo == AMDGPU::VCC) {
3210 // VOPC sdwa use "vcc" token as dst. Skip it.
3211 continue;
Sam Koltona3ec5c12016-10-07 14:46:06 +00003212 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003213 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003214 } else if (Op.isImm()) {
3215 // Handle optional arguments
3216 OptionalIdx[Op.getImmTy()] = I;
3217 } else {
3218 llvm_unreachable("Invalid operand type");
3219 }
3220 }
3221
Sam Kolton945231a2016-06-10 09:57:59 +00003222 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003223
Sam Koltona3ec5c12016-10-07 14:46:06 +00003224 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003225 // V_NOP_sdwa has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003226 switch (BasicInstType) {
3227 case SIInstrFlags::VOP1: {
3228 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3229 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3230 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3231 break;
3232 }
3233 case SIInstrFlags::VOP2: {
3234 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3235 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3236 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3237 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3238 break;
3239 }
3240 case SIInstrFlags::VOPC: {
3241 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3242 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3243 break;
3244 }
3245 default:
3246 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
3247 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00003248 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003249
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003250 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003251 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003252 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa ||
3253 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003254 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003255 std::advance(
3256 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003257 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00003258 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00003259
Sam Kolton05ef1c92016-06-03 10:27:37 +00003260}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003261
Tom Stellard45bb48e2015-06-13 03:28:10 +00003262/// Force static initialization.
3263extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00003264 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
3265 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003266}
3267
3268#define GET_REGISTER_MATCHER
3269#define GET_MATCHER_IMPLEMENTATION
3270#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00003271
3272
3273// This fuction should be defined after auto-generated include so that we have
3274// MatchClassKind enum defined
3275unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
3276 unsigned Kind) {
3277 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00003278 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00003279 // operand. This method checks if we are given immediate operand but expect to
3280 // get corresponding token.
3281 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
3282 switch (Kind) {
3283 case MCK_addr64:
3284 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
3285 case MCK_gds:
3286 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
3287 case MCK_glc:
3288 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
3289 case MCK_idxen:
3290 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
3291 case MCK_offen:
3292 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003293 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00003294 // When operands have expression values, they will return true for isToken,
3295 // because it is not possible to distinguish between a token and an
3296 // expression at parse time. MatchInstructionImpl() will always try to
3297 // match an operand as a token, when isToken returns true, and when the
3298 // name of the expression is not a valid token, the match will fail,
3299 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003300 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
3301 case MCK_SSrcF32:
3302 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00003303 case MCK_SoppBrTarget:
3304 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003305 case MCK_VReg32OrOff:
3306 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
3307 default:
3308 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00003309 }
3310}