blob: 2f0e6027c7fc19628e1b776a7e72f5cf964c9815 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrInfo.h"
27#include "llvm/MC/MCParser/MCAsmLexer.h"
28#include "llvm/MC/MCParser/MCAsmParser.h"
29#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000030#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000031#include "llvm/MC/MCRegisterInfo.h"
32#include "llvm/MC/MCStreamer.h"
33#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000035#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000036#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/Support/SourceMgr.h"
38#include "llvm/Support/TargetRegistry.h"
39#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000040#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000041
Tom Stellard45bb48e2015-06-13 03:28:10 +000042using namespace llvm;
43
44namespace {
45
46struct OptionalOperand;
47
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000048enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
49
Tom Stellard45bb48e2015-06-13 03:28:10 +000050class AMDGPUOperand : public MCParsedAsmOperand {
51 enum KindTy {
52 Token,
53 Immediate,
54 Register,
55 Expression
56 } Kind;
57
58 SMLoc StartLoc, EndLoc;
59
60public:
61 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
62
Sam Kolton5f10a132016-05-06 11:31:17 +000063 typedef std::unique_ptr<AMDGPUOperand> Ptr;
64
Sam Kolton945231a2016-06-10 09:57:59 +000065 struct Modifiers {
66 bool Abs;
67 bool Neg;
68 bool Sext;
69
70 bool hasFPModifiers() const { return Abs || Neg; }
71 bool hasIntModifiers() const { return Sext; }
72 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
73
74 int64_t getFPModifiersOperand() const {
75 int64_t Operand = 0;
76 Operand |= Abs ? SISrcMods::ABS : 0;
77 Operand |= Neg ? SISrcMods::NEG : 0;
78 return Operand;
79 }
80
81 int64_t getIntModifiersOperand() const {
82 int64_t Operand = 0;
83 Operand |= Sext ? SISrcMods::SEXT : 0;
84 return Operand;
85 }
86
87 int64_t getModifiersOperand() const {
88 assert(!(hasFPModifiers() && hasIntModifiers())
89 && "fp and int modifiers should not be used simultaneously");
90 if (hasFPModifiers()) {
91 return getFPModifiersOperand();
92 } else if (hasIntModifiers()) {
93 return getIntModifiersOperand();
94 } else {
95 return 0;
96 }
97 }
98
99 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
100 };
101
Tom Stellard45bb48e2015-06-13 03:28:10 +0000102 enum ImmTy {
103 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000104 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000105 ImmTyOffen,
106 ImmTyIdxen,
107 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000108 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000109 ImmTyOffset0,
110 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000111 ImmTyGLC,
112 ImmTySLC,
113 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000114 ImmTyClampSI,
115 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000116 ImmTyDppCtrl,
117 ImmTyDppRowMask,
118 ImmTyDppBankMask,
119 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000120 ImmTySdwaDstSel,
121 ImmTySdwaSrc0Sel,
122 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000123 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000124 ImmTyDMask,
125 ImmTyUNorm,
126 ImmTyDA,
127 ImmTyR128,
128 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000129 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000130 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 };
132
133 struct TokOp {
134 const char *Data;
135 unsigned Length;
136 };
137
138 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000140 ImmTy Type;
141 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000142 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000143 };
144
145 struct RegOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000146 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000147 const MCSubtargetInfo *STI;
Matt Arsenault7f192982016-08-16 20:28:06 +0000148 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000149 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000150 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000151 };
152
153 union {
154 TokOp Tok;
155 ImmOp Imm;
156 RegOp Reg;
157 const MCExpr *Expr;
158 };
159
Tom Stellard45bb48e2015-06-13 03:28:10 +0000160 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000161 if (Kind == Token)
162 return true;
163
164 if (Kind != Expression || !Expr)
165 return false;
166
167 // When parsing operands, we can't always tell if something was meant to be
168 // a token, like 'gds', or an expression that references a global variable.
169 // In this case, we assume the string is an expression, and if we need to
170 // interpret is a token, then we treat the symbol name as the token.
171 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000172 }
173
174 bool isImm() const override {
175 return Kind == Immediate;
176 }
177
Tom Stellardd93a34f2016-02-22 19:17:56 +0000178 bool isInlinableImm() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000179 if (!isImmTy(ImmTyNone)) {
180 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
Tom Stellardd93a34f2016-02-22 19:17:56 +0000181 return false;
Sam Kolton945231a2016-06-10 09:57:59 +0000182 }
Tom Stellardd93a34f2016-02-22 19:17:56 +0000183 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000184 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000185 // We've had bot failures before due to weird NaN support on mips hosts.
186 const float F = BitsToFloat(Imm.Val);
187 // TODO: Add 1/(2*pi) for VI
188 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000189 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000190 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 }
192
Tom Stellard45bb48e2015-06-13 03:28:10 +0000193 bool isRegKind() const {
194 return Kind == Register;
195 }
196
197 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000198 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000199 }
200
Tom Stellardd93a34f2016-02-22 19:17:56 +0000201 bool isRegOrImmWithInputMods() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000202 return isRegKind() || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000203 }
204
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000205 bool isImmTy(ImmTy ImmT) const {
206 return isImm() && Imm.Type == ImmT;
207 }
Sam Kolton945231a2016-06-10 09:57:59 +0000208
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000209 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000210 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000211 }
Sam Kolton945231a2016-06-10 09:57:59 +0000212
213 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
214 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
215 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000216 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
217 bool isDA() const { return isImmTy(ImmTyDA); }
218 bool isR128() const { return isImmTy(ImmTyUNorm); }
219 bool isLWE() const { return isImmTy(ImmTyLWE); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000220 bool isOffen() const { return isImmTy(ImmTyOffen); }
221 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
222 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
223 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
224 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
225 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000226 bool isGDS() const { return isImmTy(ImmTyGDS); }
227 bool isGLC() const { return isImmTy(ImmTyGLC); }
228 bool isSLC() const { return isImmTy(ImmTySLC); }
229 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000230 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
231 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
232 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
233 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
234 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
235 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
236 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
237
238 bool isMod() const {
239 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000240 }
241
242 bool isRegOrImm() const {
243 return isReg() || isImm();
244 }
245
246 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000247 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000248 }
249
250 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000251 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000252 }
253
Matt Arsenault86d336e2015-09-08 21:15:00 +0000254 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000255 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000256 }
257
258 bool isSSrc32() const {
Tom Stellard89049702016-06-15 02:54:14 +0000259 return isImm() || isSCSrc32() || isExpr();
Tom Stellardd93a34f2016-02-22 19:17:56 +0000260 }
261
262 bool isSSrc64() const {
263 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
264 // See isVSrc64().
265 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000266 }
267
Tom Stellard45bb48e2015-06-13 03:28:10 +0000268 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000269 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000270 }
271
272 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000273 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000274 }
275
276 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000277 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000278 }
279
280 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000281 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000282 // narrowed to 32 bits (in the instruction stream). That require knowledge
283 // of instruction type (unsigned/signed, floating or "untyped"/B64),
284 // see [AMD GCN3 ISA 6.3.1].
285 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
286 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000287 }
288
289 bool isMem() const override {
290 return false;
291 }
292
293 bool isExpr() const {
294 return Kind == Expression;
295 }
296
297 bool isSoppBrTarget() const {
298 return isExpr() || isImm();
299 }
300
Sam Kolton945231a2016-06-10 09:57:59 +0000301 bool isSWaitCnt() const;
302 bool isHwreg() const;
303 bool isSendMsg() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000304 bool isSMRDOffset() const;
305 bool isSMRDLiteralOffset() const;
306 bool isDPPCtrl() const;
307
Tom Stellard89049702016-06-15 02:54:14 +0000308 StringRef getExpressionAsToken() const {
309 assert(isExpr());
310 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
311 return S->getSymbol().getName();
312 }
313
314
Sam Kolton945231a2016-06-10 09:57:59 +0000315 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000316 assert(isToken());
317
318 if (Kind == Expression)
319 return getExpressionAsToken();
320
Sam Kolton945231a2016-06-10 09:57:59 +0000321 return StringRef(Tok.Data, Tok.Length);
322 }
323
324 int64_t getImm() const {
325 assert(isImm());
326 return Imm.Val;
327 }
328
329 enum ImmTy getImmTy() const {
330 assert(isImm());
331 return Imm.Type;
332 }
333
334 unsigned getReg() const override {
335 return Reg.RegNo;
336 }
337
Tom Stellard45bb48e2015-06-13 03:28:10 +0000338 SMLoc getStartLoc() const override {
339 return StartLoc;
340 }
341
342 SMLoc getEndLoc() const override {
343 return EndLoc;
344 }
345
Sam Kolton945231a2016-06-10 09:57:59 +0000346 Modifiers getModifiers() const {
347 assert(isRegKind() || isImmTy(ImmTyNone));
348 return isRegKind() ? Reg.Mods : Imm.Mods;
349 }
350
351 void setModifiers(Modifiers Mods) {
352 assert(isRegKind() || isImmTy(ImmTyNone));
353 if (isRegKind())
354 Reg.Mods = Mods;
355 else
356 Imm.Mods = Mods;
357 }
358
359 bool hasModifiers() const {
360 return getModifiers().hasModifiers();
361 }
362
363 bool hasFPModifiers() const {
364 return getModifiers().hasFPModifiers();
365 }
366
367 bool hasIntModifiers() const {
368 return getModifiers().hasIntModifiers();
369 }
370
371 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
372 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers()) {
373 // Apply modifiers to immediate value
374 int64_t Val = Imm.Val;
375 bool Negate = Imm.Mods.Neg; // Only negate can get here
376 if (Imm.IsFPImm) {
377 APFloat F(BitsToFloat(Val));
378 if (Negate) {
379 F.changeSign();
380 }
381 Val = F.bitcastToAPInt().getZExtValue();
382 } else {
383 Val = Negate ? -Val : Val;
384 }
385 Inst.addOperand(MCOperand::createImm(Val));
386 } else {
387 Inst.addOperand(MCOperand::createImm(getImm()));
388 }
389 }
390
391 void addRegOperands(MCInst &Inst, unsigned N) const {
392 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
393 }
394
395 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
396 if (isRegKind())
397 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000398 else if (isExpr())
399 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000400 else
401 addImmOperands(Inst, N);
402 }
403
404 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
405 Modifiers Mods = getModifiers();
406 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
407 if (isRegKind()) {
408 addRegOperands(Inst, N);
409 } else {
410 addImmOperands(Inst, N, false);
411 }
412 }
413
414 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
415 assert(!hasIntModifiers());
416 addRegOrImmWithInputModsOperands(Inst, N);
417 }
418
419 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
420 assert(!hasFPModifiers());
421 addRegOrImmWithInputModsOperands(Inst, N);
422 }
423
424 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
425 if (isImm())
426 addImmOperands(Inst, N);
427 else {
428 assert(isExpr());
429 Inst.addOperand(MCOperand::createExpr(Expr));
430 }
431 }
432
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000433 void printImmTy(raw_ostream& OS, ImmTy Type) const {
434 switch (Type) {
435 case ImmTyNone: OS << "None"; break;
436 case ImmTyGDS: OS << "GDS"; break;
437 case ImmTyOffen: OS << "Offen"; break;
438 case ImmTyIdxen: OS << "Idxen"; break;
439 case ImmTyAddr64: OS << "Addr64"; break;
440 case ImmTyOffset: OS << "Offset"; break;
441 case ImmTyOffset0: OS << "Offset0"; break;
442 case ImmTyOffset1: OS << "Offset1"; break;
443 case ImmTyGLC: OS << "GLC"; break;
444 case ImmTySLC: OS << "SLC"; break;
445 case ImmTyTFE: OS << "TFE"; break;
446 case ImmTyClampSI: OS << "ClampSI"; break;
447 case ImmTyOModSI: OS << "OModSI"; break;
448 case ImmTyDppCtrl: OS << "DppCtrl"; break;
449 case ImmTyDppRowMask: OS << "DppRowMask"; break;
450 case ImmTyDppBankMask: OS << "DppBankMask"; break;
451 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000452 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
453 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
454 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000455 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
456 case ImmTyDMask: OS << "DMask"; break;
457 case ImmTyUNorm: OS << "UNorm"; break;
458 case ImmTyDA: OS << "DA"; break;
459 case ImmTyR128: OS << "R128"; break;
460 case ImmTyLWE: OS << "LWE"; break;
461 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000462 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000463 }
464 }
465
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000466 void print(raw_ostream &OS) const override {
467 switch (Kind) {
468 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000469 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000470 break;
471 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000472 OS << '<' << getImm();
473 if (getImmTy() != ImmTyNone) {
474 OS << " type: "; printImmTy(OS, getImmTy());
475 }
Sam Kolton945231a2016-06-10 09:57:59 +0000476 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000477 break;
478 case Token:
479 OS << '\'' << getToken() << '\'';
480 break;
481 case Expression:
482 OS << "<expr " << *Expr << '>';
483 break;
484 }
485 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000486
Sam Kolton5f10a132016-05-06 11:31:17 +0000487 static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
488 enum ImmTy Type = ImmTyNone,
489 bool IsFPImm = false) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000490 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
491 Op->Imm.Val = Val;
492 Op->Imm.IsFPImm = IsFPImm;
493 Op->Imm.Type = Type;
Sam Kolton945231a2016-06-10 09:57:59 +0000494 Op->Imm.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000495 Op->StartLoc = Loc;
496 Op->EndLoc = Loc;
497 return Op;
498 }
499
Sam Kolton5f10a132016-05-06 11:31:17 +0000500 static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
501 bool HasExplicitEncodingSize = true) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000502 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
503 Res->Tok.Data = Str.data();
504 Res->Tok.Length = Str.size();
505 Res->StartLoc = Loc;
506 Res->EndLoc = Loc;
507 return Res;
508 }
509
Sam Kolton5f10a132016-05-06 11:31:17 +0000510 static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
511 SMLoc E,
512 const MCRegisterInfo *TRI,
513 const MCSubtargetInfo *STI,
514 bool ForceVOP3) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000515 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
516 Op->Reg.RegNo = RegNo;
517 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000518 Op->Reg.STI = STI;
Sam Kolton945231a2016-06-10 09:57:59 +0000519 Op->Reg.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000520 Op->Reg.IsForcedVOP3 = ForceVOP3;
521 Op->StartLoc = S;
522 Op->EndLoc = E;
523 return Op;
524 }
525
Sam Kolton5f10a132016-05-06 11:31:17 +0000526 static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000527 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
528 Op->Expr = Expr;
529 Op->StartLoc = S;
530 Op->EndLoc = S;
531 return Op;
532 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000533};
534
Sam Kolton945231a2016-06-10 09:57:59 +0000535raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
536 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
537 return OS;
538}
539
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000541 const MCInstrInfo &MII;
542 MCAsmParser &Parser;
543
544 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000545 bool ForcedDPP;
546 bool ForcedSDWA;
Matt Arsenault68802d32015-11-05 03:11:27 +0000547
Matt Arsenault3b159672015-12-01 20:31:08 +0000548 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000549 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000550 }
551
552 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000553 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000554 }
555
Matt Arsenault68802d32015-11-05 03:11:27 +0000556 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000557 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000558 }
559
560 bool hasSGPR102_SGPR103() const {
561 return !isVI();
562 }
563
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564 /// @name Auto-generated Match Functions
565 /// {
566
567#define GET_ASSEMBLER_HEADER
568#include "AMDGPUGenAsmMatcher.inc"
569
570 /// }
571
Tom Stellard347ac792015-06-26 21:15:07 +0000572private:
573 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
574 bool ParseDirectiveHSACodeObjectVersion();
575 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000576 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
577 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000578 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000579 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000580 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000581 bool ParseDirectiveAMDGPUHsaModuleGlobal();
582 bool ParseDirectiveAMDGPUHsaProgramGlobal();
583 bool ParseSectionDirectiveHSADataGlobalAgent();
584 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000585 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000586 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
587 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000588 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000589
Tom Stellard45bb48e2015-06-13 03:28:10 +0000590public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000591 enum AMDGPUMatchResultTy {
592 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
593 };
594
Akira Hatanakab11ef082015-11-14 06:35:56 +0000595 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000596 const MCInstrInfo &MII,
597 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000598 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000599 ForcedEncodingSize(0),
600 ForcedDPP(false),
601 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000602 MCAsmParserExtension::Initialize(Parser);
603
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000604 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000605 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000606 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000607 }
608
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000609 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000610
611 {
612 // TODO: make those pre-defined variables read-only.
613 // Currently there is none suitable machinery in the core llvm-mc for this.
614 // MCSymbol::isRedefinable is intended for another purpose, and
615 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
616 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
617 MCContext &Ctx = getContext();
618 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
619 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
620 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
621 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
622 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
623 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
624 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000625 }
626
Tom Stellard347ac792015-06-26 21:15:07 +0000627 AMDGPUTargetStreamer &getTargetStreamer() {
628 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
629 return static_cast<AMDGPUTargetStreamer &>(TS);
630 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000631
Sam Kolton05ef1c92016-06-03 10:27:37 +0000632 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
633 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
634 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000635
Sam Kolton05ef1c92016-06-03 10:27:37 +0000636 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
637 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
638 bool isForcedDPP() const { return ForcedDPP; }
639 bool isForcedSDWA() const { return ForcedSDWA; }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000640
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000641 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000642 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
643 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000644 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
645 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000646 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
647 OperandVector &Operands, MCStreamer &Out,
648 uint64_t &ErrorInfo,
649 bool MatchingInlineAsm) override;
650 bool ParseDirective(AsmToken DirectiveID) override;
651 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000652 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000653 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
654 SMLoc NameLoc, OperandVector &Operands) override;
655
Sam Kolton11de3702016-05-24 12:38:33 +0000656 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000657 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
658 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000659 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000660 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000661 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000662 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000663 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000664
Sam Kolton1bdcef72016-05-23 09:59:02 +0000665 OperandMatchResultTy parseImm(OperandVector &Operands);
666 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000667 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
668 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000669
Tom Stellard45bb48e2015-06-13 03:28:10 +0000670 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
671 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000672
673 bool parseCnt(int64_t &IntVal);
674 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000675 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000676
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000677private:
678 struct OperandInfoTy {
679 int64_t Id;
680 bool IsSymbolic;
681 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
682 };
Sam Kolton11de3702016-05-24 12:38:33 +0000683
Artem Tamazov6edc1352016-05-26 17:00:33 +0000684 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
685 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000686public:
Sam Kolton11de3702016-05-24 12:38:33 +0000687 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
688
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000689 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000690 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
691
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000692 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
693 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
694 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000695 AMDGPUOperand::Ptr defaultGLC() const;
696 AMDGPUOperand::Ptr defaultSLC() const;
697 AMDGPUOperand::Ptr defaultTFE() const;
698
Sam Kolton5f10a132016-05-06 11:31:17 +0000699 AMDGPUOperand::Ptr defaultDMask() const;
700 AMDGPUOperand::Ptr defaultUNorm() const;
701 AMDGPUOperand::Ptr defaultDA() const;
702 AMDGPUOperand::Ptr defaultR128() const;
703 AMDGPUOperand::Ptr defaultLWE() const;
704 AMDGPUOperand::Ptr defaultSMRDOffset() const;
705 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000706
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000707 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
708
Tom Stellarda90b9522016-02-11 03:28:15 +0000709 void cvtId(MCInst &Inst, const OperandVector &Operands);
710 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000712
713 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000714 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000715
Sam Kolton11de3702016-05-24 12:38:33 +0000716 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000717 AMDGPUOperand::Ptr defaultRowMask() const;
718 AMDGPUOperand::Ptr defaultBankMask() const;
719 AMDGPUOperand::Ptr defaultBoundCtrl() const;
720 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000721
Sam Kolton05ef1c92016-06-03 10:27:37 +0000722 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
723 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000724 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000725 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
726 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000727 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
728 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
729 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000730};
731
732struct OptionalOperand {
733 const char *Name;
734 AMDGPUOperand::ImmTy Type;
735 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000736 bool (*ConvertResult)(int64_t&);
737};
738
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000739}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000740
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000741static int getRegClass(RegisterKind Is, unsigned RegWidth) {
742 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000743 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000744 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000745 case 1: return AMDGPU::VGPR_32RegClassID;
746 case 2: return AMDGPU::VReg_64RegClassID;
747 case 3: return AMDGPU::VReg_96RegClassID;
748 case 4: return AMDGPU::VReg_128RegClassID;
749 case 8: return AMDGPU::VReg_256RegClassID;
750 case 16: return AMDGPU::VReg_512RegClassID;
751 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000752 } else if (Is == IS_TTMP) {
753 switch (RegWidth) {
754 default: return -1;
755 case 1: return AMDGPU::TTMP_32RegClassID;
756 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000757 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000758 }
759 } else if (Is == IS_SGPR) {
760 switch (RegWidth) {
761 default: return -1;
762 case 1: return AMDGPU::SGPR_32RegClassID;
763 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000764 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000765 case 8: return AMDGPU::SReg_256RegClassID;
766 case 16: return AMDGPU::SReg_512RegClassID;
767 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000768 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000769 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000770}
771
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000772static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000773 return StringSwitch<unsigned>(RegName)
774 .Case("exec", AMDGPU::EXEC)
775 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000776 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000777 .Case("m0", AMDGPU::M0)
778 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000779 .Case("tba", AMDGPU::TBA)
780 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000781 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
782 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000783 .Case("vcc_lo", AMDGPU::VCC_LO)
784 .Case("vcc_hi", AMDGPU::VCC_HI)
785 .Case("exec_lo", AMDGPU::EXEC_LO)
786 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000787 .Case("tma_lo", AMDGPU::TMA_LO)
788 .Case("tma_hi", AMDGPU::TMA_HI)
789 .Case("tba_lo", AMDGPU::TBA_LO)
790 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000791 .Default(0);
792}
793
794bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000795 auto R = parseRegister();
796 if (!R) return true;
797 assert(R->isReg());
798 RegNo = R->getReg();
799 StartLoc = R->getStartLoc();
800 EndLoc = R->getEndLoc();
801 return false;
802}
803
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000804bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
805{
806 switch (RegKind) {
807 case IS_SPECIAL:
808 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
809 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
810 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
811 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
812 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
813 return false;
814 case IS_VGPR:
815 case IS_SGPR:
816 case IS_TTMP:
817 if (Reg1 != Reg + RegWidth) { return false; }
818 RegWidth++;
819 return true;
820 default:
821 assert(false); return false;
822 }
823}
824
825bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
826{
827 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
828 if (getLexer().is(AsmToken::Identifier)) {
829 StringRef RegName = Parser.getTok().getString();
830 if ((Reg = getSpecialRegForName(RegName))) {
831 Parser.Lex();
832 RegKind = IS_SPECIAL;
833 } else {
834 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +0000835 if (RegName[0] == 'v') {
836 RegNumIndex = 1;
837 RegKind = IS_VGPR;
838 } else if (RegName[0] == 's') {
839 RegNumIndex = 1;
840 RegKind = IS_SGPR;
841 } else if (RegName.startswith("ttmp")) {
842 RegNumIndex = strlen("ttmp");
843 RegKind = IS_TTMP;
844 } else {
845 return false;
846 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000847 if (RegName.size() > RegNumIndex) {
848 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +0000849 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
850 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000851 Parser.Lex();
852 RegWidth = 1;
853 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +0000854 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000855 Parser.Lex();
856 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +0000857 if (getLexer().isNot(AsmToken::LBrac))
858 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000859 Parser.Lex();
860
Artem Tamazovf88397c2016-06-03 14:41:17 +0000861 if (getParser().parseAbsoluteExpression(RegLo))
862 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000863
Artem Tamazov7da9b822016-05-27 12:50:13 +0000864 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +0000865 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
866 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000867 Parser.Lex();
868
Artem Tamazov7da9b822016-05-27 12:50:13 +0000869 if (isRBrace) {
870 RegHi = RegLo;
871 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +0000872 if (getParser().parseAbsoluteExpression(RegHi))
873 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000874
Artem Tamazovf88397c2016-06-03 14:41:17 +0000875 if (getLexer().isNot(AsmToken::RBrac))
876 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +0000877 Parser.Lex();
878 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000879 RegNum = (unsigned) RegLo;
880 RegWidth = (RegHi - RegLo) + 1;
881 }
882 }
883 } else if (getLexer().is(AsmToken::LBrac)) {
884 // List of consecutive registers: [s0,s1,s2,s3]
885 Parser.Lex();
Artem Tamazovf88397c2016-06-03 14:41:17 +0000886 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
887 return false;
888 if (RegWidth != 1)
889 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000890 RegisterKind RegKind1;
891 unsigned Reg1, RegNum1, RegWidth1;
892 do {
893 if (getLexer().is(AsmToken::Comma)) {
894 Parser.Lex();
895 } else if (getLexer().is(AsmToken::RBrac)) {
896 Parser.Lex();
897 break;
898 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +0000899 if (RegWidth1 != 1) {
900 return false;
901 }
902 if (RegKind1 != RegKind) {
903 return false;
904 }
905 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
906 return false;
907 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000908 } else {
909 return false;
910 }
911 } while (true);
912 } else {
913 return false;
914 }
915 switch (RegKind) {
916 case IS_SPECIAL:
917 RegNum = 0;
918 RegWidth = 1;
919 break;
920 case IS_VGPR:
921 case IS_SGPR:
922 case IS_TTMP:
923 {
924 unsigned Size = 1;
925 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
926 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
927 Size = std::min(RegWidth, 4u);
928 }
Artem Tamazovf88397c2016-06-03 14:41:17 +0000929 if (RegNum % Size != 0)
930 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000931 RegNum = RegNum / Size;
932 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +0000933 if (RCID == -1)
934 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000935 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +0000936 if (RegNum >= RC.getNumRegs())
937 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000938 Reg = RC.getRegister(RegNum);
939 break;
940 }
941
942 default:
943 assert(false); return false;
944 }
945
Artem Tamazovf88397c2016-06-03 14:41:17 +0000946 if (!subtargetHasRegister(*TRI, Reg))
947 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000948 return true;
949}
950
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000951std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000952 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000953 SMLoc StartLoc = Tok.getLoc();
954 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000955 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
956
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000957 RegisterKind RegKind;
958 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000959
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000960 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
961 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000962 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000963 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000964 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000965}
966
Sam Kolton1bdcef72016-05-23 09:59:02 +0000967AMDGPUAsmParser::OperandMatchResultTy
968AMDGPUAsmParser::parseImm(OperandVector &Operands) {
969 bool Minus = false;
970 if (getLexer().getKind() == AsmToken::Minus) {
971 Minus = true;
972 Parser.Lex();
973 }
974
975 SMLoc S = Parser.getTok().getLoc();
976 switch(getLexer().getKind()) {
977 case AsmToken::Integer: {
978 int64_t IntVal;
979 if (getParser().parseAbsoluteExpression(IntVal))
980 return MatchOperand_ParseFail;
981 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
982 Error(S, "invalid immediate: only 32-bit values are legal");
983 return MatchOperand_ParseFail;
984 }
985
986 if (Minus)
987 IntVal *= -1;
988 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
989 return MatchOperand_Success;
990 }
991 case AsmToken::Real: {
992 // FIXME: We should emit an error if a double precisions floating-point
993 // value is used. I'm not sure the best way to detect this.
994 int64_t IntVal;
995 if (getParser().parseAbsoluteExpression(IntVal))
996 return MatchOperand_ParseFail;
997
998 APFloat F((float)BitsToDouble(IntVal));
999 if (Minus)
1000 F.changeSign();
1001 Operands.push_back(
Matt Arsenault37fefd62016-06-10 02:18:02 +00001002 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001003 AMDGPUOperand::ImmTyNone, true));
1004 return MatchOperand_Success;
1005 }
1006 default:
1007 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1008 }
1009}
1010
1011AMDGPUAsmParser::OperandMatchResultTy
1012AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1013 auto res = parseImm(Operands);
1014 if (res != MatchOperand_NoMatch) {
1015 return res;
1016 }
1017
1018 if (auto R = parseRegister()) {
1019 assert(R->isReg());
1020 R->Reg.IsForcedVOP3 = isForcedVOP3();
1021 Operands.push_back(std::move(R));
1022 return MatchOperand_Success;
1023 }
1024 return MatchOperand_ParseFail;
1025}
1026
1027AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001028AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001029 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001030 // negate-modifier or negative immediate value.
1031 // By default we suppose it is modifier.
1032 bool Negate = false, Abs = false, Abs2 = false;
1033
1034 if (getLexer().getKind()== AsmToken::Minus) {
1035 Parser.Lex();
1036 Negate = true;
1037 }
1038
1039 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1040 Parser.Lex();
1041 Abs2 = true;
1042 if (getLexer().isNot(AsmToken::LParen)) {
1043 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1044 return MatchOperand_ParseFail;
1045 }
1046 Parser.Lex();
1047 }
1048
1049 if (getLexer().getKind() == AsmToken::Pipe) {
1050 if (Abs2) {
1051 Error(Parser.getTok().getLoc(), "expected register or immediate");
1052 return MatchOperand_ParseFail;
1053 }
1054 Parser.Lex();
1055 Abs = true;
1056 }
1057
1058 auto Res = parseRegOrImm(Operands);
1059 if (Res != MatchOperand_Success) {
1060 return Res;
1061 }
1062
Sam Kolton945231a2016-06-10 09:57:59 +00001063 AMDGPUOperand::Modifiers Mods = {false, false, false};
Sam Kolton1bdcef72016-05-23 09:59:02 +00001064 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001065 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001066 }
1067 if (Abs) {
1068 if (getLexer().getKind() != AsmToken::Pipe) {
1069 Error(Parser.getTok().getLoc(), "expected vertical bar");
1070 return MatchOperand_ParseFail;
1071 }
1072 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001073 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001074 }
1075 if (Abs2) {
1076 if (getLexer().isNot(AsmToken::RParen)) {
1077 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1078 return MatchOperand_ParseFail;
1079 }
1080 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001081 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001082 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001083
Sam Kolton945231a2016-06-10 09:57:59 +00001084 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001085 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001086 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001087 }
1088 return MatchOperand_Success;
1089}
1090
Sam Kolton945231a2016-06-10 09:57:59 +00001091AMDGPUAsmParser::OperandMatchResultTy
1092AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1093 bool Sext = false;
1094
1095 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1096 Parser.Lex();
1097 Sext = true;
1098 if (getLexer().isNot(AsmToken::LParen)) {
1099 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1100 return MatchOperand_ParseFail;
1101 }
1102 Parser.Lex();
1103 }
1104
1105 auto Res = parseRegOrImm(Operands);
1106 if (Res != MatchOperand_Success) {
1107 return Res;
1108 }
1109
Sam Kolton945231a2016-06-10 09:57:59 +00001110 AMDGPUOperand::Modifiers Mods = {false, false, false};
1111 if (Sext) {
1112 if (getLexer().isNot(AsmToken::RParen)) {
1113 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1114 return MatchOperand_ParseFail;
1115 }
1116 Parser.Lex();
1117 Mods.Sext = true;
1118 }
1119
1120 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001121 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001122 Op.setModifiers(Mods);
1123 }
1124 return MatchOperand_Success;
1125}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001126
Tom Stellard45bb48e2015-06-13 03:28:10 +00001127unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1128
1129 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1130
1131 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001132 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1133 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1134 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001135 return Match_InvalidOperand;
1136
Tom Stellard88e0b252015-10-06 15:57:53 +00001137 if ((TSFlags & SIInstrFlags::VOP3) &&
1138 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1139 getForcedEncodingSize() != 64)
1140 return Match_PreferE32;
1141
Tom Stellard45bb48e2015-06-13 03:28:10 +00001142 return Match_Success;
1143}
1144
Tom Stellard45bb48e2015-06-13 03:28:10 +00001145bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1146 OperandVector &Operands,
1147 MCStreamer &Out,
1148 uint64_t &ErrorInfo,
1149 bool MatchingInlineAsm) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001150 // What asm variants we should check
1151 std::vector<unsigned> MatchedVariants;
1152 if (getForcedEncodingSize() == 32) {
1153 MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
1154 } else if (isForcedVOP3()) {
1155 MatchedVariants = {AMDGPUAsmVariants::VOP3};
1156 } else if (isForcedSDWA()) {
1157 MatchedVariants = {AMDGPUAsmVariants::SDWA};
1158 } else if (isForcedDPP()) {
1159 MatchedVariants = {AMDGPUAsmVariants::DPP};
1160 } else {
1161 MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
1162 AMDGPUAsmVariants::VOP3,
1163 AMDGPUAsmVariants::SDWA,
1164 AMDGPUAsmVariants::DPP};
1165 }
1166
Tom Stellard45bb48e2015-06-13 03:28:10 +00001167 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001168 unsigned Result = Match_Success;
1169 for (auto Variant : MatchedVariants) {
1170 uint64_t EI;
1171 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1172 Variant);
1173 // We order match statuses from least to most specific. We use most specific
1174 // status as resulting
1175 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1176 if ((R == Match_Success) ||
1177 (R == Match_PreferE32) ||
1178 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1179 (R == Match_InvalidOperand && Result != Match_MissingFeature
1180 && Result != Match_PreferE32) ||
1181 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1182 && Result != Match_MissingFeature
1183 && Result != Match_PreferE32)) {
1184 Result = R;
1185 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001186 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001187 if (R == Match_Success)
1188 break;
1189 }
1190
1191 switch (Result) {
1192 default: break;
1193 case Match_Success:
1194 Inst.setLoc(IDLoc);
1195 Out.EmitInstruction(Inst, getSTI());
1196 return false;
1197
1198 case Match_MissingFeature:
1199 return Error(IDLoc, "instruction not supported on this GPU");
1200
1201 case Match_MnemonicFail:
1202 return Error(IDLoc, "unrecognized instruction mnemonic");
1203
1204 case Match_InvalidOperand: {
1205 SMLoc ErrorLoc = IDLoc;
1206 if (ErrorInfo != ~0ULL) {
1207 if (ErrorInfo >= Operands.size()) {
1208 return Error(IDLoc, "too few operands for instruction");
1209 }
1210 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1211 if (ErrorLoc == SMLoc())
1212 ErrorLoc = IDLoc;
1213 }
1214 return Error(ErrorLoc, "invalid operand for instruction");
1215 }
1216
1217 case Match_PreferE32:
1218 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1219 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001220 }
1221 llvm_unreachable("Implement any new match types added!");
1222}
1223
Tom Stellard347ac792015-06-26 21:15:07 +00001224bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1225 uint32_t &Minor) {
1226 if (getLexer().isNot(AsmToken::Integer))
1227 return TokError("invalid major version");
1228
1229 Major = getLexer().getTok().getIntVal();
1230 Lex();
1231
1232 if (getLexer().isNot(AsmToken::Comma))
1233 return TokError("minor version number required, comma expected");
1234 Lex();
1235
1236 if (getLexer().isNot(AsmToken::Integer))
1237 return TokError("invalid minor version");
1238
1239 Minor = getLexer().getTok().getIntVal();
1240 Lex();
1241
1242 return false;
1243}
1244
1245bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1246
1247 uint32_t Major;
1248 uint32_t Minor;
1249
1250 if (ParseDirectiveMajorMinor(Major, Minor))
1251 return true;
1252
1253 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1254 return false;
1255}
1256
1257bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1258
1259 uint32_t Major;
1260 uint32_t Minor;
1261 uint32_t Stepping;
1262 StringRef VendorName;
1263 StringRef ArchName;
1264
1265 // If this directive has no arguments, then use the ISA version for the
1266 // targeted GPU.
1267 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001268 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001269 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1270 Isa.Stepping,
1271 "AMD", "AMDGPU");
1272 return false;
1273 }
1274
1275
1276 if (ParseDirectiveMajorMinor(Major, Minor))
1277 return true;
1278
1279 if (getLexer().isNot(AsmToken::Comma))
1280 return TokError("stepping version number required, comma expected");
1281 Lex();
1282
1283 if (getLexer().isNot(AsmToken::Integer))
1284 return TokError("invalid stepping version");
1285
1286 Stepping = getLexer().getTok().getIntVal();
1287 Lex();
1288
1289 if (getLexer().isNot(AsmToken::Comma))
1290 return TokError("vendor name required, comma expected");
1291 Lex();
1292
1293 if (getLexer().isNot(AsmToken::String))
1294 return TokError("invalid vendor name");
1295
1296 VendorName = getLexer().getTok().getStringContents();
1297 Lex();
1298
1299 if (getLexer().isNot(AsmToken::Comma))
1300 return TokError("arch name required, comma expected");
1301 Lex();
1302
1303 if (getLexer().isNot(AsmToken::String))
1304 return TokError("invalid arch name");
1305
1306 ArchName = getLexer().getTok().getStringContents();
1307 Lex();
1308
1309 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1310 VendorName, ArchName);
1311 return false;
1312}
1313
Tom Stellardff7416b2015-06-26 21:58:31 +00001314bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1315 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001316 SmallString<40> ErrStr;
1317 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001318 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001319 return TokError(Err.str());
1320 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001321 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001322 return false;
1323}
1324
1325bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1326
1327 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001328 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001329
1330 while (true) {
1331
Tom Stellardff7416b2015-06-26 21:58:31 +00001332 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1333 // will set the current token to EndOfStatement.
1334 while(getLexer().is(AsmToken::EndOfStatement))
1335 Lex();
1336
1337 if (getLexer().isNot(AsmToken::Identifier))
1338 return TokError("expected value identifier or .end_amd_kernel_code_t");
1339
1340 StringRef ID = getLexer().getTok().getIdentifier();
1341 Lex();
1342
1343 if (ID == ".end_amd_kernel_code_t")
1344 break;
1345
1346 if (ParseAMDKernelCodeTValue(ID, Header))
1347 return true;
1348 }
1349
1350 getTargetStreamer().EmitAMDKernelCodeT(Header);
1351
1352 return false;
1353}
1354
Tom Stellarde135ffd2015-09-25 21:41:28 +00001355bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1356 getParser().getStreamer().SwitchSection(
1357 AMDGPU::getHSATextSection(getContext()));
1358 return false;
1359}
1360
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001361bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1362 if (getLexer().isNot(AsmToken::Identifier))
1363 return TokError("expected symbol name");
1364
1365 StringRef KernelName = Parser.getTok().getString();
1366
1367 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1368 ELF::STT_AMDGPU_HSA_KERNEL);
1369 Lex();
1370 return false;
1371}
1372
Tom Stellard00f2f912015-12-02 19:47:57 +00001373bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1374 if (getLexer().isNot(AsmToken::Identifier))
1375 return TokError("expected symbol name");
1376
1377 StringRef GlobalName = Parser.getTok().getIdentifier();
1378
1379 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1380 Lex();
1381 return false;
1382}
1383
1384bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1385 if (getLexer().isNot(AsmToken::Identifier))
1386 return TokError("expected symbol name");
1387
1388 StringRef GlobalName = Parser.getTok().getIdentifier();
1389
1390 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1391 Lex();
1392 return false;
1393}
1394
1395bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1396 getParser().getStreamer().SwitchSection(
1397 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1398 return false;
1399}
1400
1401bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1402 getParser().getStreamer().SwitchSection(
1403 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1404 return false;
1405}
1406
Tom Stellard9760f032015-12-03 03:34:32 +00001407bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1408 getParser().getStreamer().SwitchSection(
1409 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1410 return false;
1411}
1412
Tom Stellard45bb48e2015-06-13 03:28:10 +00001413bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001414 StringRef IDVal = DirectiveID.getString();
1415
1416 if (IDVal == ".hsa_code_object_version")
1417 return ParseDirectiveHSACodeObjectVersion();
1418
1419 if (IDVal == ".hsa_code_object_isa")
1420 return ParseDirectiveHSACodeObjectISA();
1421
Tom Stellardff7416b2015-06-26 21:58:31 +00001422 if (IDVal == ".amd_kernel_code_t")
1423 return ParseDirectiveAMDKernelCodeT();
1424
Tom Stellardfcfaea42016-05-05 17:03:33 +00001425 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001426 return ParseSectionDirectiveHSAText();
1427
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001428 if (IDVal == ".amdgpu_hsa_kernel")
1429 return ParseDirectiveAMDGPUHsaKernel();
1430
Tom Stellard00f2f912015-12-02 19:47:57 +00001431 if (IDVal == ".amdgpu_hsa_module_global")
1432 return ParseDirectiveAMDGPUHsaModuleGlobal();
1433
1434 if (IDVal == ".amdgpu_hsa_program_global")
1435 return ParseDirectiveAMDGPUHsaProgramGlobal();
1436
1437 if (IDVal == ".hsadata_global_agent")
1438 return ParseSectionDirectiveHSADataGlobalAgent();
1439
1440 if (IDVal == ".hsadata_global_program")
1441 return ParseSectionDirectiveHSADataGlobalProgram();
1442
Tom Stellard9760f032015-12-03 03:34:32 +00001443 if (IDVal == ".hsarodata_readonly_agent")
1444 return ParseSectionDirectiveHSARodataReadonlyAgent();
1445
Tom Stellard45bb48e2015-06-13 03:28:10 +00001446 return true;
1447}
1448
Matt Arsenault68802d32015-11-05 03:11:27 +00001449bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1450 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001451 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001452 return true;
1453
Matt Arsenault3b159672015-12-01 20:31:08 +00001454 if (isSI()) {
1455 // No flat_scr
1456 switch (RegNo) {
1457 case AMDGPU::FLAT_SCR:
1458 case AMDGPU::FLAT_SCR_LO:
1459 case AMDGPU::FLAT_SCR_HI:
1460 return false;
1461 default:
1462 return true;
1463 }
1464 }
1465
Matt Arsenault68802d32015-11-05 03:11:27 +00001466 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1467 // SI/CI have.
1468 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1469 R.isValid(); ++R) {
1470 if (*R == RegNo)
1471 return false;
1472 }
1473
1474 return true;
1475}
1476
Tom Stellard45bb48e2015-06-13 03:28:10 +00001477AMDGPUAsmParser::OperandMatchResultTy
1478AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1479
1480 // Try to parse with a custom parser
1481 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1482
1483 // If we successfully parsed the operand or if there as an error parsing,
1484 // we are done.
1485 //
1486 // If we are parsing after we reach EndOfStatement then this means we
1487 // are appending default values to the Operands list. This is only done
1488 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001489 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001490 getLexer().is(AsmToken::EndOfStatement))
1491 return ResTy;
1492
Sam Kolton1bdcef72016-05-23 09:59:02 +00001493 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001494
Sam Kolton1bdcef72016-05-23 09:59:02 +00001495 if (ResTy == MatchOperand_Success)
1496 return ResTy;
1497
1498 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00001499 // If this identifier is a symbol, we want to create an expression for it.
1500 // It is a little difficult to distinguish between a symbol name, and
1501 // an instruction flag like 'gds'. In order to do this, we parse
1502 // all tokens as expressions and then treate the symbol name as the token
1503 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001504 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00001505 SMLoc S = Tok.getLoc();
1506 const MCExpr *Expr = nullptr;
1507 if (!Parser.parseExpression(Expr)) {
1508 Operands.push_back(AMDGPUOperand::CreateExpr(Expr, S));
1509 return MatchOperand_Success;
1510 }
1511
Sam Kolton1bdcef72016-05-23 09:59:02 +00001512 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001513 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001514 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001515 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001516 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001517}
1518
Sam Kolton05ef1c92016-06-03 10:27:37 +00001519StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1520 // Clear any forced encodings from the previous instruction.
1521 setForcedEncodingSize(0);
1522 setForcedDPP(false);
1523 setForcedSDWA(false);
1524
1525 if (Name.endswith("_e64")) {
1526 setForcedEncodingSize(64);
1527 return Name.substr(0, Name.size() - 4);
1528 } else if (Name.endswith("_e32")) {
1529 setForcedEncodingSize(32);
1530 return Name.substr(0, Name.size() - 4);
1531 } else if (Name.endswith("_dpp")) {
1532 setForcedDPP(true);
1533 return Name.substr(0, Name.size() - 4);
1534 } else if (Name.endswith("_sdwa")) {
1535 setForcedSDWA(true);
1536 return Name.substr(0, Name.size() - 5);
1537 }
1538 return Name;
1539}
1540
Tom Stellard45bb48e2015-06-13 03:28:10 +00001541bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1542 StringRef Name,
1543 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001544 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00001545 Name = parseMnemonicSuffix(Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001546 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00001547
Tom Stellard45bb48e2015-06-13 03:28:10 +00001548 while (!getLexer().is(AsmToken::EndOfStatement)) {
1549 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1550
1551 // Eat the comma or space if there is one.
1552 if (getLexer().is(AsmToken::Comma))
1553 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00001554
Tom Stellard45bb48e2015-06-13 03:28:10 +00001555 switch (Res) {
1556 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001557 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001558 Error(getLexer().getLoc(), "failed parsing operand.");
1559 while (!getLexer().is(AsmToken::EndOfStatement)) {
1560 Parser.Lex();
1561 }
1562 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001563 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001564 Error(getLexer().getLoc(), "not a valid operand.");
1565 while (!getLexer().is(AsmToken::EndOfStatement)) {
1566 Parser.Lex();
1567 }
1568 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001569 }
1570 }
1571
Tom Stellard45bb48e2015-06-13 03:28:10 +00001572 return false;
1573}
1574
1575//===----------------------------------------------------------------------===//
1576// Utility functions
1577//===----------------------------------------------------------------------===//
1578
1579AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001580AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001581 switch(getLexer().getKind()) {
1582 default: return MatchOperand_NoMatch;
1583 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001584 StringRef Name = Parser.getTok().getString();
1585 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001586 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001587 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001588
1589 Parser.Lex();
1590 if (getLexer().isNot(AsmToken::Colon))
1591 return MatchOperand_ParseFail;
1592
1593 Parser.Lex();
1594 if (getLexer().isNot(AsmToken::Integer))
1595 return MatchOperand_ParseFail;
1596
1597 if (getParser().parseAbsoluteExpression(Int))
1598 return MatchOperand_ParseFail;
1599 break;
1600 }
1601 }
1602 return MatchOperand_Success;
1603}
1604
1605AMDGPUAsmParser::OperandMatchResultTy
1606AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001607 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001608 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001609
1610 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001611 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001612
Sam Kolton11de3702016-05-24 12:38:33 +00001613 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001614 if (Res != MatchOperand_Success)
1615 return Res;
1616
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001617 if (ConvertResult && !ConvertResult(Value)) {
1618 return MatchOperand_ParseFail;
1619 }
1620
1621 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001622 return MatchOperand_Success;
1623}
1624
1625AMDGPUAsmParser::OperandMatchResultTy
1626AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001627 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001628 int64_t Bit = 0;
1629 SMLoc S = Parser.getTok().getLoc();
1630
1631 // We are at the end of the statement, and this is a default argument, so
1632 // use a default value.
1633 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1634 switch(getLexer().getKind()) {
1635 case AsmToken::Identifier: {
1636 StringRef Tok = Parser.getTok().getString();
1637 if (Tok == Name) {
1638 Bit = 1;
1639 Parser.Lex();
1640 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1641 Bit = 0;
1642 Parser.Lex();
1643 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001644 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001645 }
1646 break;
1647 }
1648 default:
1649 return MatchOperand_NoMatch;
1650 }
1651 }
1652
1653 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1654 return MatchOperand_Success;
1655}
1656
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001657typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1658
Sam Koltona74cd522016-03-18 15:35:51 +00001659void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1660 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001661 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001662 auto i = OptionalIdx.find(ImmT);
1663 if (i != OptionalIdx.end()) {
1664 unsigned Idx = i->second;
1665 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1666 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001667 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001668 }
1669}
1670
Matt Arsenault37fefd62016-06-10 02:18:02 +00001671AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00001672AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00001673 if (getLexer().isNot(AsmToken::Identifier)) {
1674 return MatchOperand_NoMatch;
1675 }
1676 StringRef Tok = Parser.getTok().getString();
1677 if (Tok != Prefix) {
1678 return MatchOperand_NoMatch;
1679 }
1680
1681 Parser.Lex();
1682 if (getLexer().isNot(AsmToken::Colon)) {
1683 return MatchOperand_ParseFail;
1684 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001685
Sam Kolton3025e7f2016-04-26 13:33:56 +00001686 Parser.Lex();
1687 if (getLexer().isNot(AsmToken::Identifier)) {
1688 return MatchOperand_ParseFail;
1689 }
1690
1691 Value = Parser.getTok().getString();
1692 return MatchOperand_Success;
1693}
1694
Tom Stellard45bb48e2015-06-13 03:28:10 +00001695//===----------------------------------------------------------------------===//
1696// ds
1697//===----------------------------------------------------------------------===//
1698
Tom Stellard45bb48e2015-06-13 03:28:10 +00001699void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1700 const OperandVector &Operands) {
1701
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001702 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001703
1704 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1705 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1706
1707 // Add the register arguments
1708 if (Op.isReg()) {
1709 Op.addRegOperands(Inst, 1);
1710 continue;
1711 }
1712
1713 // Handle optional arguments
1714 OptionalIdx[Op.getImmTy()] = i;
1715 }
1716
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001717 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1718 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001719 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001720
Tom Stellard45bb48e2015-06-13 03:28:10 +00001721 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1722}
1723
1724void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1725
1726 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1727 bool GDSOnly = false;
1728
1729 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1730 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1731
1732 // Add the register arguments
1733 if (Op.isReg()) {
1734 Op.addRegOperands(Inst, 1);
1735 continue;
1736 }
1737
1738 if (Op.isToken() && Op.getToken() == "gds") {
1739 GDSOnly = true;
1740 continue;
1741 }
1742
1743 // Handle optional arguments
1744 OptionalIdx[Op.getImmTy()] = i;
1745 }
1746
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001747 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1748 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001749
1750 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001751 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001752 }
1753 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1754}
1755
1756
1757//===----------------------------------------------------------------------===//
1758// s_waitcnt
1759//===----------------------------------------------------------------------===//
1760
1761bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1762 StringRef CntName = Parser.getTok().getString();
1763 int64_t CntVal;
1764
1765 Parser.Lex();
1766 if (getLexer().isNot(AsmToken::LParen))
1767 return true;
1768
1769 Parser.Lex();
1770 if (getLexer().isNot(AsmToken::Integer))
1771 return true;
1772
1773 if (getParser().parseAbsoluteExpression(CntVal))
1774 return true;
1775
1776 if (getLexer().isNot(AsmToken::RParen))
1777 return true;
1778
1779 Parser.Lex();
1780 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1781 Parser.Lex();
1782
1783 int CntShift;
1784 int CntMask;
1785
1786 if (CntName == "vmcnt") {
1787 CntMask = 0xf;
1788 CntShift = 0;
1789 } else if (CntName == "expcnt") {
1790 CntMask = 0x7;
1791 CntShift = 4;
1792 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001793 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001794 CntShift = 8;
1795 } else {
1796 return true;
1797 }
1798
1799 IntVal &= ~(CntMask << CntShift);
1800 IntVal |= (CntVal << CntShift);
1801 return false;
1802}
1803
1804AMDGPUAsmParser::OperandMatchResultTy
1805AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1806 // Disable all counters by default.
1807 // vmcnt [3:0]
1808 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001809 // lgkmcnt [11:8]
1810 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001811 SMLoc S = Parser.getTok().getLoc();
1812
1813 switch(getLexer().getKind()) {
1814 default: return MatchOperand_ParseFail;
1815 case AsmToken::Integer:
1816 // The operand can be an integer value.
1817 if (getParser().parseAbsoluteExpression(CntVal))
1818 return MatchOperand_ParseFail;
1819 break;
1820
1821 case AsmToken::Identifier:
1822 do {
1823 if (parseCnt(CntVal))
1824 return MatchOperand_ParseFail;
1825 } while(getLexer().isNot(AsmToken::EndOfStatement));
1826 break;
1827 }
1828 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1829 return MatchOperand_Success;
1830}
1831
Artem Tamazov6edc1352016-05-26 17:00:33 +00001832bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
1833 using namespace llvm::AMDGPU::Hwreg;
1834
Artem Tamazovd6468662016-04-25 14:13:51 +00001835 if (Parser.getTok().getString() != "hwreg")
1836 return true;
1837 Parser.Lex();
1838
1839 if (getLexer().isNot(AsmToken::LParen))
1840 return true;
1841 Parser.Lex();
1842
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001843 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001844 HwReg.IsSymbolic = true;
1845 HwReg.Id = ID_UNKNOWN_;
1846 const StringRef tok = Parser.getTok().getString();
1847 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
1848 if (tok == IdSymbolic[i]) {
1849 HwReg.Id = i;
1850 break;
1851 }
1852 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001853 Parser.Lex();
1854 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001855 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001856 if (getLexer().isNot(AsmToken::Integer))
1857 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001858 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001859 return true;
1860 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001861
1862 if (getLexer().is(AsmToken::RParen)) {
1863 Parser.Lex();
1864 return false;
1865 }
1866
1867 // optional params
1868 if (getLexer().isNot(AsmToken::Comma))
1869 return true;
1870 Parser.Lex();
1871
1872 if (getLexer().isNot(AsmToken::Integer))
1873 return true;
1874 if (getParser().parseAbsoluteExpression(Offset))
1875 return true;
1876
1877 if (getLexer().isNot(AsmToken::Comma))
1878 return true;
1879 Parser.Lex();
1880
1881 if (getLexer().isNot(AsmToken::Integer))
1882 return true;
1883 if (getParser().parseAbsoluteExpression(Width))
1884 return true;
1885
1886 if (getLexer().isNot(AsmToken::RParen))
1887 return true;
1888 Parser.Lex();
1889
1890 return false;
1891}
1892
1893AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001894AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001895 using namespace llvm::AMDGPU::Hwreg;
1896
Artem Tamazovd6468662016-04-25 14:13:51 +00001897 int64_t Imm16Val = 0;
1898 SMLoc S = Parser.getTok().getLoc();
1899
1900 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00001901 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00001902 case AsmToken::Integer:
1903 // The operand can be an integer value.
1904 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00001905 return MatchOperand_NoMatch;
1906 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001907 Error(S, "invalid immediate: only 16-bit values are legal");
1908 // Do not return error code, but create an imm operand anyway and proceed
1909 // to the next operand, if any. That avoids unneccessary error messages.
1910 }
1911 break;
1912
1913 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001914 OperandInfoTy HwReg(ID_UNKNOWN_);
1915 int64_t Offset = OFFSET_DEFAULT_;
1916 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
1917 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00001918 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001919 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
1920 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001921 Error(S, "invalid symbolic name of hardware register");
1922 else
1923 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001924 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00001925 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00001926 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00001927 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00001928 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00001929 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00001930 }
1931 break;
1932 }
1933 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1934 return MatchOperand_Success;
1935}
1936
Tom Stellard45bb48e2015-06-13 03:28:10 +00001937bool AMDGPUOperand::isSWaitCnt() const {
1938 return isImm();
1939}
1940
Artem Tamazovd6468662016-04-25 14:13:51 +00001941bool AMDGPUOperand::isHwreg() const {
1942 return isImmTy(ImmTyHwreg);
1943}
1944
Artem Tamazov6edc1352016-05-26 17:00:33 +00001945bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001946 using namespace llvm::AMDGPU::SendMsg;
1947
1948 if (Parser.getTok().getString() != "sendmsg")
1949 return true;
1950 Parser.Lex();
1951
1952 if (getLexer().isNot(AsmToken::LParen))
1953 return true;
1954 Parser.Lex();
1955
1956 if (getLexer().is(AsmToken::Identifier)) {
1957 Msg.IsSymbolic = true;
1958 Msg.Id = ID_UNKNOWN_;
1959 const std::string tok = Parser.getTok().getString();
1960 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1961 switch(i) {
1962 default: continue; // Omit gaps.
1963 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
1964 }
1965 if (tok == IdSymbolic[i]) {
1966 Msg.Id = i;
1967 break;
1968 }
1969 }
1970 Parser.Lex();
1971 } else {
1972 Msg.IsSymbolic = false;
1973 if (getLexer().isNot(AsmToken::Integer))
1974 return true;
1975 if (getParser().parseAbsoluteExpression(Msg.Id))
1976 return true;
1977 if (getLexer().is(AsmToken::Integer))
1978 if (getParser().parseAbsoluteExpression(Msg.Id))
1979 Msg.Id = ID_UNKNOWN_;
1980 }
1981 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1982 return false;
1983
1984 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1985 if (getLexer().isNot(AsmToken::RParen))
1986 return true;
1987 Parser.Lex();
1988 return false;
1989 }
1990
1991 if (getLexer().isNot(AsmToken::Comma))
1992 return true;
1993 Parser.Lex();
1994
1995 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1996 Operation.Id = ID_UNKNOWN_;
1997 if (getLexer().is(AsmToken::Identifier)) {
1998 Operation.IsSymbolic = true;
1999 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2000 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2001 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002002 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002003 for (int i = F; i < L; ++i) {
2004 if (Tok == S[i]) {
2005 Operation.Id = i;
2006 break;
2007 }
2008 }
2009 Parser.Lex();
2010 } else {
2011 Operation.IsSymbolic = false;
2012 if (getLexer().isNot(AsmToken::Integer))
2013 return true;
2014 if (getParser().parseAbsoluteExpression(Operation.Id))
2015 return true;
2016 }
2017
2018 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2019 // Stream id is optional.
2020 if (getLexer().is(AsmToken::RParen)) {
2021 Parser.Lex();
2022 return false;
2023 }
2024
2025 if (getLexer().isNot(AsmToken::Comma))
2026 return true;
2027 Parser.Lex();
2028
2029 if (getLexer().isNot(AsmToken::Integer))
2030 return true;
2031 if (getParser().parseAbsoluteExpression(StreamId))
2032 return true;
2033 }
2034
2035 if (getLexer().isNot(AsmToken::RParen))
2036 return true;
2037 Parser.Lex();
2038 return false;
2039}
2040
2041AMDGPUAsmParser::OperandMatchResultTy
2042AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2043 using namespace llvm::AMDGPU::SendMsg;
2044
2045 int64_t Imm16Val = 0;
2046 SMLoc S = Parser.getTok().getLoc();
2047
2048 switch(getLexer().getKind()) {
2049 default:
2050 return MatchOperand_NoMatch;
2051 case AsmToken::Integer:
2052 // The operand can be an integer value.
2053 if (getParser().parseAbsoluteExpression(Imm16Val))
2054 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002055 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002056 Error(S, "invalid immediate: only 16-bit values are legal");
2057 // Do not return error code, but create an imm operand anyway and proceed
2058 // to the next operand, if any. That avoids unneccessary error messages.
2059 }
2060 break;
2061 case AsmToken::Identifier: {
2062 OperandInfoTy Msg(ID_UNKNOWN_);
2063 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002064 int64_t StreamId = STREAM_ID_DEFAULT_;
2065 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2066 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002067 do {
2068 // Validate and encode message ID.
2069 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2070 || Msg.Id == ID_SYSMSG)) {
2071 if (Msg.IsSymbolic)
2072 Error(S, "invalid/unsupported symbolic name of message");
2073 else
2074 Error(S, "invalid/unsupported code of message");
2075 break;
2076 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002077 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002078 // Validate and encode operation ID.
2079 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2080 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2081 if (Operation.IsSymbolic)
2082 Error(S, "invalid symbolic name of GS_OP");
2083 else
2084 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2085 break;
2086 }
2087 if (Operation.Id == OP_GS_NOP
2088 && Msg.Id != ID_GS_DONE) {
2089 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2090 break;
2091 }
2092 Imm16Val |= (Operation.Id << OP_SHIFT_);
2093 }
2094 if (Msg.Id == ID_SYSMSG) {
2095 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2096 if (Operation.IsSymbolic)
2097 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2098 else
2099 Error(S, "invalid/unsupported code of SYSMSG_OP");
2100 break;
2101 }
2102 Imm16Val |= (Operation.Id << OP_SHIFT_);
2103 }
2104 // Validate and encode stream ID.
2105 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2106 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2107 Error(S, "invalid stream id: only 2-bit values are legal");
2108 break;
2109 }
2110 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2111 }
2112 } while (0);
2113 }
2114 break;
2115 }
2116 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
2117 return MatchOperand_Success;
2118}
2119
2120bool AMDGPUOperand::isSendMsg() const {
2121 return isImmTy(ImmTySendMsg);
2122}
2123
Tom Stellard45bb48e2015-06-13 03:28:10 +00002124//===----------------------------------------------------------------------===//
2125// sopp branch targets
2126//===----------------------------------------------------------------------===//
2127
2128AMDGPUAsmParser::OperandMatchResultTy
2129AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2130 SMLoc S = Parser.getTok().getLoc();
2131
2132 switch (getLexer().getKind()) {
2133 default: return MatchOperand_ParseFail;
2134 case AsmToken::Integer: {
2135 int64_t Imm;
2136 if (getParser().parseAbsoluteExpression(Imm))
2137 return MatchOperand_ParseFail;
2138 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
2139 return MatchOperand_Success;
2140 }
2141
2142 case AsmToken::Identifier:
2143 Operands.push_back(AMDGPUOperand::CreateExpr(
2144 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2145 Parser.getTok().getString()), getContext()), S));
2146 Parser.Lex();
2147 return MatchOperand_Success;
2148 }
2149}
2150
2151//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002152// mubuf
2153//===----------------------------------------------------------------------===//
2154
Sam Kolton5f10a132016-05-06 11:31:17 +00002155AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
2156 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
2157}
2158
2159AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
2160 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
2161}
2162
2163AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
2164 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
2165}
2166
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002167void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2168 const OperandVector &Operands,
2169 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002170 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002171 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002172
2173 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2174 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2175
2176 // Add the register arguments
2177 if (Op.isReg()) {
2178 Op.addRegOperands(Inst, 1);
2179 continue;
2180 }
2181
2182 // Handle the case where soffset is an immediate
2183 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2184 Op.addImmOperands(Inst, 1);
2185 continue;
2186 }
2187
2188 // Handle tokens like 'offen' which are sometimes hard-coded into the
2189 // asm string. There are no MCInst operands for these.
2190 if (Op.isToken()) {
2191 continue;
2192 }
2193 assert(Op.isImm());
2194
2195 // Handle optional arguments
2196 OptionalIdx[Op.getImmTy()] = i;
2197 }
2198
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002199 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2200 if (IsAtomicReturn) {
2201 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2202 Inst.insert(I, *I);
2203 }
2204
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002205 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002206 if (!IsAtomic) { // glc is hard-coded.
2207 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2208 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002209 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2210 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002211}
2212
2213//===----------------------------------------------------------------------===//
2214// mimg
2215//===----------------------------------------------------------------------===//
2216
Sam Kolton1bdcef72016-05-23 09:59:02 +00002217void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2218 unsigned I = 1;
2219 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2220 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2221 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2222 }
2223
2224 OptionalImmIndexMap OptionalIdx;
2225
2226 for (unsigned E = Operands.size(); I != E; ++I) {
2227 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2228
2229 // Add the register arguments
2230 if (Op.isRegOrImm()) {
2231 Op.addRegOrImmOperands(Inst, 1);
2232 continue;
2233 } else if (Op.isImmModifier()) {
2234 OptionalIdx[Op.getImmTy()] = I;
2235 } else {
2236 assert(false);
2237 }
2238 }
2239
2240 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2241 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2242 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2243 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2244 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2245 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2246 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2247 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2248}
2249
2250void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2251 unsigned I = 1;
2252 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2253 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2254 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2255 }
2256
2257 // Add src, same as dst
2258 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2259
2260 OptionalImmIndexMap OptionalIdx;
2261
2262 for (unsigned E = Operands.size(); I != E; ++I) {
2263 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2264
2265 // Add the register arguments
2266 if (Op.isRegOrImm()) {
2267 Op.addRegOrImmOperands(Inst, 1);
2268 continue;
2269 } else if (Op.isImmModifier()) {
2270 OptionalIdx[Op.getImmTy()] = I;
2271 } else {
2272 assert(false);
2273 }
2274 }
2275
2276 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2277 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2278 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2279 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2280 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2281 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2282 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2283 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2284}
2285
Sam Kolton5f10a132016-05-06 11:31:17 +00002286AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2287 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2288}
2289
2290AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2291 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2292}
2293
2294AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2295 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2296}
2297
2298AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2299 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2300}
2301
2302AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2303 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2304}
2305
Tom Stellard45bb48e2015-06-13 03:28:10 +00002306//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002307// smrd
2308//===----------------------------------------------------------------------===//
2309
2310bool AMDGPUOperand::isSMRDOffset() const {
2311
2312 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2313 // information here.
2314 return isImm() && isUInt<8>(getImm());
2315}
2316
2317bool AMDGPUOperand::isSMRDLiteralOffset() const {
2318 // 32-bit literals are only supported on CI and we only want to use them
2319 // when the offset is > 8-bits.
2320 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2321}
2322
Sam Kolton5f10a132016-05-06 11:31:17 +00002323AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2324 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2325}
2326
2327AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2328 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2329}
2330
Tom Stellard217361c2015-08-06 19:28:38 +00002331//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002332// vop3
2333//===----------------------------------------------------------------------===//
2334
2335static bool ConvertOmodMul(int64_t &Mul) {
2336 if (Mul != 1 && Mul != 2 && Mul != 4)
2337 return false;
2338
2339 Mul >>= 1;
2340 return true;
2341}
2342
2343static bool ConvertOmodDiv(int64_t &Div) {
2344 if (Div == 1) {
2345 Div = 0;
2346 return true;
2347 }
2348
2349 if (Div == 2) {
2350 Div = 3;
2351 return true;
2352 }
2353
2354 return false;
2355}
2356
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002357static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2358 if (BoundCtrl == 0) {
2359 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002360 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002361 } else if (BoundCtrl == -1) {
2362 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002363 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002364 }
2365 return false;
2366}
2367
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002368// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002369static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2370 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2371 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2372 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2373 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2374 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2375 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2376 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2377 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2378 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2379 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2380 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2381 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2382 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2383 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2384 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2385 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2386 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2387 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2388 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2389 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00002390 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2391 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2392 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00002393 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002394};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002395
Sam Kolton11de3702016-05-24 12:38:33 +00002396AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2397 OperandMatchResultTy res;
2398 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2399 // try to parse any optional operand here
2400 if (Op.IsBit) {
2401 res = parseNamedBit(Op.Name, Operands, Op.Type);
2402 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2403 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002404 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2405 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2406 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2407 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00002408 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2409 res = parseSDWADstUnused(Operands);
2410 } else {
2411 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2412 }
2413 if (res != MatchOperand_NoMatch) {
2414 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002415 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002416 }
2417 return MatchOperand_NoMatch;
2418}
2419
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002420AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2421{
2422 StringRef Name = Parser.getTok().getString();
2423 if (Name == "mul") {
Sam Kolton11de3702016-05-24 12:38:33 +00002424 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002425 } else if (Name == "div") {
Sam Kolton11de3702016-05-24 12:38:33 +00002426 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002427 } else {
2428 return MatchOperand_NoMatch;
2429 }
2430}
2431
Tom Stellarda90b9522016-02-11 03:28:15 +00002432void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2433 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002434 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002435 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002436 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2437 }
2438 for (unsigned E = Operands.size(); I != E; ++I)
2439 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2440}
2441
2442void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002443 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2444 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002445 cvtVOP3(Inst, Operands);
2446 } else {
2447 cvtId(Inst, Operands);
2448 }
2449}
2450
Tom Stellarda90b9522016-02-11 03:28:15 +00002451void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002452 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002453 unsigned I = 1;
2454 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002455 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002456 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002457 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002458
Tom Stellarda90b9522016-02-11 03:28:15 +00002459 for (unsigned E = Operands.size(); I != E; ++I) {
2460 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002461 if (Op.isRegOrImmWithInputMods()) {
Sam Kolton945231a2016-06-10 09:57:59 +00002462 // only fp modifiers allowed in VOP3
2463 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002464 } else if (Op.isImm()) {
2465 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002466 } else {
2467 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002468 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002469 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002470
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002471 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2472 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002473}
2474
Sam Koltondfa29f72016-03-09 12:29:31 +00002475//===----------------------------------------------------------------------===//
2476// dpp
2477//===----------------------------------------------------------------------===//
2478
2479bool AMDGPUOperand::isDPPCtrl() const {
2480 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2481 if (result) {
2482 int64_t Imm = getImm();
2483 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2484 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2485 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2486 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2487 (Imm == 0x130) ||
2488 (Imm == 0x134) ||
2489 (Imm == 0x138) ||
2490 (Imm == 0x13c) ||
2491 (Imm == 0x140) ||
2492 (Imm == 0x141) ||
2493 (Imm == 0x142) ||
2494 (Imm == 0x143);
2495 }
2496 return false;
2497}
2498
Sam Koltona74cd522016-03-18 15:35:51 +00002499AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002500AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002501 SMLoc S = Parser.getTok().getLoc();
2502 StringRef Prefix;
2503 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002504
Sam Koltona74cd522016-03-18 15:35:51 +00002505 if (getLexer().getKind() == AsmToken::Identifier) {
2506 Prefix = Parser.getTok().getString();
2507 } else {
2508 return MatchOperand_NoMatch;
2509 }
2510
2511 if (Prefix == "row_mirror") {
2512 Int = 0x140;
2513 } else if (Prefix == "row_half_mirror") {
2514 Int = 0x141;
2515 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002516 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2517 if (Prefix != "quad_perm"
2518 && Prefix != "row_shl"
2519 && Prefix != "row_shr"
2520 && Prefix != "row_ror"
2521 && Prefix != "wave_shl"
2522 && Prefix != "wave_rol"
2523 && Prefix != "wave_shr"
2524 && Prefix != "wave_ror"
2525 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002526 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002527 }
2528
Sam Koltona74cd522016-03-18 15:35:51 +00002529 Parser.Lex();
2530 if (getLexer().isNot(AsmToken::Colon))
2531 return MatchOperand_ParseFail;
2532
2533 if (Prefix == "quad_perm") {
2534 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002535 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002536 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002537 return MatchOperand_ParseFail;
2538
2539 Parser.Lex();
2540 if (getLexer().isNot(AsmToken::Integer))
2541 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002542 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002543
Sam Koltona74cd522016-03-18 15:35:51 +00002544 Parser.Lex();
2545 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002546 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002547 Parser.Lex();
2548 if (getLexer().isNot(AsmToken::Integer))
2549 return MatchOperand_ParseFail;
2550 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002551
Sam Koltona74cd522016-03-18 15:35:51 +00002552 Parser.Lex();
2553 if (getLexer().isNot(AsmToken::Comma))
2554 return MatchOperand_ParseFail;
2555 Parser.Lex();
2556 if (getLexer().isNot(AsmToken::Integer))
2557 return MatchOperand_ParseFail;
2558 Int += (getLexer().getTok().getIntVal() << 4);
2559
2560 Parser.Lex();
2561 if (getLexer().isNot(AsmToken::Comma))
2562 return MatchOperand_ParseFail;
2563 Parser.Lex();
2564 if (getLexer().isNot(AsmToken::Integer))
2565 return MatchOperand_ParseFail;
2566 Int += (getLexer().getTok().getIntVal() << 6);
2567
2568 Parser.Lex();
2569 if (getLexer().isNot(AsmToken::RBrac))
2570 return MatchOperand_ParseFail;
2571
2572 } else {
2573 // sel:%d
2574 Parser.Lex();
2575 if (getLexer().isNot(AsmToken::Integer))
2576 return MatchOperand_ParseFail;
2577 Int = getLexer().getTok().getIntVal();
2578
2579 if (Prefix == "row_shl") {
2580 Int |= 0x100;
2581 } else if (Prefix == "row_shr") {
2582 Int |= 0x110;
2583 } else if (Prefix == "row_ror") {
2584 Int |= 0x120;
2585 } else if (Prefix == "wave_shl") {
2586 Int = 0x130;
2587 } else if (Prefix == "wave_rol") {
2588 Int = 0x134;
2589 } else if (Prefix == "wave_shr") {
2590 Int = 0x138;
2591 } else if (Prefix == "wave_ror") {
2592 Int = 0x13C;
2593 } else if (Prefix == "row_bcast") {
2594 if (Int == 15) {
2595 Int = 0x142;
2596 } else if (Int == 31) {
2597 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00002598 } else {
2599 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002600 }
2601 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002602 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002603 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002604 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002605 }
Sam Koltona74cd522016-03-18 15:35:51 +00002606 Parser.Lex(); // eat last token
2607
2608 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002609 AMDGPUOperand::ImmTyDppCtrl));
2610 return MatchOperand_Success;
2611}
2612
Sam Kolton5f10a132016-05-06 11:31:17 +00002613AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2614 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002615}
2616
Sam Kolton5f10a132016-05-06 11:31:17 +00002617AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2618 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002619}
2620
Sam Kolton5f10a132016-05-06 11:31:17 +00002621AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2622 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2623}
2624
2625void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002626 OptionalImmIndexMap OptionalIdx;
2627
2628 unsigned I = 1;
2629 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2630 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2631 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2632 }
2633
2634 for (unsigned E = Operands.size(); I != E; ++I) {
2635 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2636 // Add the register arguments
Sam Kolton5f10a132016-05-06 11:31:17 +00002637 if (Op.isRegOrImmWithInputMods()) {
Sam Kolton945231a2016-06-10 09:57:59 +00002638 // Only float modifiers supported in DPP
2639 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002640 } else if (Op.isDPPCtrl()) {
2641 Op.addImmOperands(Inst, 1);
2642 } else if (Op.isImm()) {
2643 // Handle optional arguments
2644 OptionalIdx[Op.getImmTy()] = I;
2645 } else {
2646 llvm_unreachable("Invalid operand type");
2647 }
2648 }
2649
Sam Koltondfa29f72016-03-09 12:29:31 +00002650 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2651 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2652 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2653}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002654
Sam Kolton3025e7f2016-04-26 13:33:56 +00002655//===----------------------------------------------------------------------===//
2656// sdwa
2657//===----------------------------------------------------------------------===//
2658
2659AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002660AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
2661 AMDGPUOperand::ImmTy Type) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00002662 SMLoc S = Parser.getTok().getLoc();
2663 StringRef Value;
2664 AMDGPUAsmParser::OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002665
Sam Kolton05ef1c92016-06-03 10:27:37 +00002666 res = parseStringWithPrefix(Prefix, Value);
2667 if (res != MatchOperand_Success) {
2668 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00002669 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002670
Sam Kolton3025e7f2016-04-26 13:33:56 +00002671 int64_t Int;
2672 Int = StringSwitch<int64_t>(Value)
2673 .Case("BYTE_0", 0)
2674 .Case("BYTE_1", 1)
2675 .Case("BYTE_2", 2)
2676 .Case("BYTE_3", 3)
2677 .Case("WORD_0", 4)
2678 .Case("WORD_1", 5)
2679 .Case("DWORD", 6)
2680 .Default(0xffffffff);
2681 Parser.Lex(); // eat last token
2682
2683 if (Int == 0xffffffff) {
2684 return MatchOperand_ParseFail;
2685 }
2686
Sam Kolton05ef1c92016-06-03 10:27:37 +00002687 Operands.push_back(AMDGPUOperand::CreateImm(Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002688 return MatchOperand_Success;
2689}
2690
Matt Arsenault37fefd62016-06-10 02:18:02 +00002691AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00002692AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2693 SMLoc S = Parser.getTok().getLoc();
2694 StringRef Value;
2695 AMDGPUAsmParser::OperandMatchResultTy res;
2696
2697 res = parseStringWithPrefix("dst_unused", Value);
2698 if (res != MatchOperand_Success) {
2699 return res;
2700 }
2701
2702 int64_t Int;
2703 Int = StringSwitch<int64_t>(Value)
2704 .Case("UNUSED_PAD", 0)
2705 .Case("UNUSED_SEXT", 1)
2706 .Case("UNUSED_PRESERVE", 2)
2707 .Default(0xffffffff);
2708 Parser.Lex(); // eat last token
2709
2710 if (Int == 0xffffffff) {
2711 return MatchOperand_ParseFail;
2712 }
2713
2714 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2715 AMDGPUOperand::ImmTySdwaDstUnused));
2716 return MatchOperand_Success;
2717}
2718
Sam Kolton945231a2016-06-10 09:57:59 +00002719void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002720 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002721}
2722
Sam Kolton945231a2016-06-10 09:57:59 +00002723void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002724 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
2725}
2726
2727void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
2728 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002729}
2730
2731void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00002732 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002733 OptionalImmIndexMap OptionalIdx;
2734
2735 unsigned I = 1;
2736 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2737 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2738 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2739 }
2740
2741 for (unsigned E = Operands.size(); I != E; ++I) {
2742 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2743 // Add the register arguments
Sam Kolton5196b882016-07-01 09:59:21 +00002744 if (BasicInstType == SIInstrFlags::VOPC &&
2745 Op.isReg() &&
2746 Op.Reg.RegNo == AMDGPU::VCC) {
2747 // VOPC sdwa use "vcc" token as dst. Skip it.
2748 continue;
2749 } else if (Op.isRegOrImmWithInputMods()) {
Sam Kolton945231a2016-06-10 09:57:59 +00002750 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002751 } else if (Op.isImm()) {
2752 // Handle optional arguments
2753 OptionalIdx[Op.getImmTy()] = I;
2754 } else {
2755 llvm_unreachable("Invalid operand type");
2756 }
2757 }
2758
Sam Kolton945231a2016-06-10 09:57:59 +00002759 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
2760
Sam Kolton05ef1c92016-06-03 10:27:37 +00002761 if (Inst.getOpcode() == AMDGPU::V_NOP_sdwa) {
2762 // V_NOP_sdwa has no optional sdwa arguments
2763 return;
2764 }
Sam Kolton5196b882016-07-01 09:59:21 +00002765 switch (BasicInstType) {
2766 case SIInstrFlags::VOP1: {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002767 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
2768 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
2769 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
Sam Kolton5196b882016-07-01 09:59:21 +00002770 break;
2771 }
2772 case SIInstrFlags::VOP2: {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002773 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
2774 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
2775 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2776 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
Sam Kolton5196b882016-07-01 09:59:21 +00002777 break;
2778 }
2779 case SIInstrFlags::VOPC: {
2780 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2781 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
2782 break;
2783 }
2784 default:
2785 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
Sam Kolton05ef1c92016-06-03 10:27:37 +00002786 }
2787}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002788
Tom Stellard45bb48e2015-06-13 03:28:10 +00002789/// Force static initialization.
2790extern "C" void LLVMInitializeAMDGPUAsmParser() {
2791 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2792 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2793}
2794
2795#define GET_REGISTER_MATCHER
2796#define GET_MATCHER_IMPLEMENTATION
2797#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00002798
2799
2800// This fuction should be defined after auto-generated include so that we have
2801// MatchClassKind enum defined
2802unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
2803 unsigned Kind) {
2804 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00002805 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00002806 // operand. This method checks if we are given immediate operand but expect to
2807 // get corresponding token.
2808 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
2809 switch (Kind) {
2810 case MCK_addr64:
2811 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
2812 case MCK_gds:
2813 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
2814 case MCK_glc:
2815 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
2816 case MCK_idxen:
2817 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
2818 case MCK_offen:
2819 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Tom Stellard89049702016-06-15 02:54:14 +00002820 case MCK_SSrc32:
2821 // When operands have expression values, they will return true for isToken,
2822 // because it is not possible to distinguish between a token and an
2823 // expression at parse time. MatchInstructionImpl() will always try to
2824 // match an operand as a token, when isToken returns true, and when the
2825 // name of the expression is not a valid token, the match will fail,
2826 // so we need to handle it here.
2827 return Operand.isSSrc32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00002828 case MCK_SoppBrTarget:
2829 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00002830 default: return Match_InvalidOperand;
2831 }
2832}