blob: 839a5559f92d07746d6fa5326860941c63b427cf [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000023#include "llvm/CodeGen/MachineValueType.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/MC/MCContext.h"
25#include "llvm/MC/MCExpr.h"
26#include "llvm/MC/MCInst.h"
27#include "llvm/MC/MCInstrInfo.h"
28#include "llvm/MC/MCParser/MCAsmLexer.h"
29#include "llvm/MC/MCParser/MCAsmParser.h"
30#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000031#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCRegisterInfo.h"
33#include "llvm/MC/MCStreamer.h"
34#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000036#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000037#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/Support/SourceMgr.h"
39#include "llvm/Support/TargetRegistry.h"
40#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000041#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000042
Tom Stellard45bb48e2015-06-13 03:28:10 +000043using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000044using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000045
46namespace {
47
Sam Kolton1eeb11b2016-09-09 14:44:04 +000048class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000049struct OptionalOperand;
50
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000051enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
52
Sam Kolton1eeb11b2016-09-09 14:44:04 +000053//===----------------------------------------------------------------------===//
54// Operand
55//===----------------------------------------------------------------------===//
56
Tom Stellard45bb48e2015-06-13 03:28:10 +000057class AMDGPUOperand : public MCParsedAsmOperand {
58 enum KindTy {
59 Token,
60 Immediate,
61 Register,
62 Expression
63 } Kind;
64
65 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000066 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
68public:
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069 AMDGPUOperand(enum KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
70 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000071
Sam Kolton5f10a132016-05-06 11:31:17 +000072 typedef std::unique_ptr<AMDGPUOperand> Ptr;
73
Sam Kolton945231a2016-06-10 09:57:59 +000074 struct Modifiers {
75 bool Abs;
76 bool Neg;
77 bool Sext;
78
79 bool hasFPModifiers() const { return Abs || Neg; }
80 bool hasIntModifiers() const { return Sext; }
81 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
82
83 int64_t getFPModifiersOperand() const {
84 int64_t Operand = 0;
85 Operand |= Abs ? SISrcMods::ABS : 0;
86 Operand |= Neg ? SISrcMods::NEG : 0;
87 return Operand;
88 }
89
90 int64_t getIntModifiersOperand() const {
91 int64_t Operand = 0;
92 Operand |= Sext ? SISrcMods::SEXT : 0;
93 return Operand;
94 }
95
96 int64_t getModifiersOperand() const {
97 assert(!(hasFPModifiers() && hasIntModifiers())
98 && "fp and int modifiers should not be used simultaneously");
99 if (hasFPModifiers()) {
100 return getFPModifiersOperand();
101 } else if (hasIntModifiers()) {
102 return getIntModifiersOperand();
103 } else {
104 return 0;
105 }
106 }
107
108 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
109 };
110
Tom Stellard45bb48e2015-06-13 03:28:10 +0000111 enum ImmTy {
112 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000114 ImmTyOffen,
115 ImmTyIdxen,
116 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000117 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000118 ImmTyOffset0,
119 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000120 ImmTyGLC,
121 ImmTySLC,
122 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000123 ImmTyClampSI,
124 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000125 ImmTyDppCtrl,
126 ImmTyDppRowMask,
127 ImmTyDppBankMask,
128 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000129 ImmTySdwaDstSel,
130 ImmTySdwaSrc0Sel,
131 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000132 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000133 ImmTyDMask,
134 ImmTyUNorm,
135 ImmTyDA,
136 ImmTyR128,
137 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000138 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000139 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000140 };
141
142 struct TokOp {
143 const char *Data;
144 unsigned Length;
145 };
146
147 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000148 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000149 ImmTy Type;
150 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000151 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152 };
153
154 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000155 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000156 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000157 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158 };
159
160 union {
161 TokOp Tok;
162 ImmOp Imm;
163 RegOp Reg;
164 const MCExpr *Expr;
165 };
166
Tom Stellard45bb48e2015-06-13 03:28:10 +0000167 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000168 if (Kind == Token)
169 return true;
170
171 if (Kind != Expression || !Expr)
172 return false;
173
174 // When parsing operands, we can't always tell if something was meant to be
175 // a token, like 'gds', or an expression that references a global variable.
176 // In this case, we assume the string is an expression, and if we need to
177 // interpret is a token, then we treat the symbol name as the token.
178 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000179 }
180
181 bool isImm() const override {
182 return Kind == Immediate;
183 }
184
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000185 bool isInlinableImm(MVT type) const;
186 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187
Tom Stellard45bb48e2015-06-13 03:28:10 +0000188 bool isRegKind() const {
189 return Kind == Register;
190 }
191
192 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000193 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000194 }
195
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000196 bool isRegOrImmWithInputMods(MVT type) const {
197 return isRegKind() || isInlinableImm(type);
198 }
199
200 bool isRegOrImmWithInt32InputMods() const {
201 return isRegOrImmWithInputMods(MVT::i32);
202 }
203
204 bool isRegOrImmWithInt64InputMods() const {
205 return isRegOrImmWithInputMods(MVT::i64);
206 }
207
208 bool isRegOrImmWithFP32InputMods() const {
209 return isRegOrImmWithInputMods(MVT::f32);
210 }
211
212 bool isRegOrImmWithFP64InputMods() const {
213 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000214 }
215
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000216 bool isImmTy(ImmTy ImmT) const {
217 return isImm() && Imm.Type == ImmT;
218 }
Sam Kolton945231a2016-06-10 09:57:59 +0000219
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000220 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000221 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000222 }
Sam Kolton945231a2016-06-10 09:57:59 +0000223
224 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
225 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
226 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000227 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
228 bool isDA() const { return isImmTy(ImmTyDA); }
229 bool isR128() const { return isImmTy(ImmTyUNorm); }
230 bool isLWE() const { return isImmTy(ImmTyLWE); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000231 bool isOffen() const { return isImmTy(ImmTyOffen); }
232 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
233 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
234 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
235 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
236 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000237 bool isGDS() const { return isImmTy(ImmTyGDS); }
238 bool isGLC() const { return isImmTy(ImmTyGLC); }
239 bool isSLC() const { return isImmTy(ImmTySLC); }
240 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000241 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
242 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
243 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
244 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
245 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
246 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
247 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
248
249 bool isMod() const {
250 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000251 }
252
253 bool isRegOrImm() const {
254 return isReg() || isImm();
255 }
256
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000257 bool isRegClass(unsigned RCID) const;
258
259 bool isSCSrcB32() const {
260 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000261 }
262
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000263 bool isSCSrcB64() const {
264 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000265 }
266
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000267 bool isSCSrcF32() const {
268 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000269 }
270
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000271 bool isSCSrcF64() const {
272 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000273 }
274
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000275 bool isSSrcB32() const {
276 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
277 }
278
279 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000280 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
281 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000282 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000283 }
284
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000285 bool isSSrcF32() const {
286 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000287 }
288
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000289 bool isSSrcF64() const {
290 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000291 }
292
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000293 bool isVCSrcB32() const {
294 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000295 }
296
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000297 bool isVCSrcB64() const {
298 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64);
299 }
300
301 bool isVCSrcF32() const {
302 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32);
303 }
304
305 bool isVCSrcF64() const {
306 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64);
307 }
308
309 bool isVSrcB32() const {
310 return isVCSrcF32() || isLiteralImm(MVT::i32);
311 }
312
313 bool isVSrcB64() const {
314 return isVCSrcF64() || isLiteralImm(MVT::i64);
315 }
316
317 bool isVSrcF32() const {
318 return isVCSrcF32() || isLiteralImm(MVT::f32);
319 }
320
321 bool isVSrcF64() const {
322 return isVCSrcF64() || isLiteralImm(MVT::f64);
323 }
324
325 bool isKImmFP32() const {
326 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327 }
328
329 bool isMem() const override {
330 return false;
331 }
332
333 bool isExpr() const {
334 return Kind == Expression;
335 }
336
337 bool isSoppBrTarget() const {
338 return isExpr() || isImm();
339 }
340
Sam Kolton945231a2016-06-10 09:57:59 +0000341 bool isSWaitCnt() const;
342 bool isHwreg() const;
343 bool isSendMsg() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000344 bool isSMRDOffset() const;
345 bool isSMRDLiteralOffset() const;
346 bool isDPPCtrl() const;
347
Tom Stellard89049702016-06-15 02:54:14 +0000348 StringRef getExpressionAsToken() const {
349 assert(isExpr());
350 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
351 return S->getSymbol().getName();
352 }
353
354
Sam Kolton945231a2016-06-10 09:57:59 +0000355 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000356 assert(isToken());
357
358 if (Kind == Expression)
359 return getExpressionAsToken();
360
Sam Kolton945231a2016-06-10 09:57:59 +0000361 return StringRef(Tok.Data, Tok.Length);
362 }
363
364 int64_t getImm() const {
365 assert(isImm());
366 return Imm.Val;
367 }
368
369 enum ImmTy getImmTy() const {
370 assert(isImm());
371 return Imm.Type;
372 }
373
374 unsigned getReg() const override {
375 return Reg.RegNo;
376 }
377
Tom Stellard45bb48e2015-06-13 03:28:10 +0000378 SMLoc getStartLoc() const override {
379 return StartLoc;
380 }
381
Peter Collingbourne0da86302016-10-10 22:49:37 +0000382 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000383 return EndLoc;
384 }
385
Sam Kolton945231a2016-06-10 09:57:59 +0000386 Modifiers getModifiers() const {
387 assert(isRegKind() || isImmTy(ImmTyNone));
388 return isRegKind() ? Reg.Mods : Imm.Mods;
389 }
390
391 void setModifiers(Modifiers Mods) {
392 assert(isRegKind() || isImmTy(ImmTyNone));
393 if (isRegKind())
394 Reg.Mods = Mods;
395 else
396 Imm.Mods = Mods;
397 }
398
399 bool hasModifiers() const {
400 return getModifiers().hasModifiers();
401 }
402
403 bool hasFPModifiers() const {
404 return getModifiers().hasFPModifiers();
405 }
406
407 bool hasIntModifiers() const {
408 return getModifiers().hasIntModifiers();
409 }
410
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000411 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000412
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000413 void addLiteralImmOperand(MCInst &Inst, int64_t Val) const;
414
415 void addKImmFP32Operands(MCInst &Inst, unsigned N) const;
416
417 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000418
419 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
420 if (isRegKind())
421 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000422 else if (isExpr())
423 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000424 else
425 addImmOperands(Inst, N);
426 }
427
428 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
429 Modifiers Mods = getModifiers();
430 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
431 if (isRegKind()) {
432 addRegOperands(Inst, N);
433 } else {
434 addImmOperands(Inst, N, false);
435 }
436 }
437
438 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
439 assert(!hasIntModifiers());
440 addRegOrImmWithInputModsOperands(Inst, N);
441 }
442
443 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
444 assert(!hasFPModifiers());
445 addRegOrImmWithInputModsOperands(Inst, N);
446 }
447
448 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
449 if (isImm())
450 addImmOperands(Inst, N);
451 else {
452 assert(isExpr());
453 Inst.addOperand(MCOperand::createExpr(Expr));
454 }
455 }
456
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000457 void printImmTy(raw_ostream& OS, ImmTy Type) const {
458 switch (Type) {
459 case ImmTyNone: OS << "None"; break;
460 case ImmTyGDS: OS << "GDS"; break;
461 case ImmTyOffen: OS << "Offen"; break;
462 case ImmTyIdxen: OS << "Idxen"; break;
463 case ImmTyAddr64: OS << "Addr64"; break;
464 case ImmTyOffset: OS << "Offset"; break;
465 case ImmTyOffset0: OS << "Offset0"; break;
466 case ImmTyOffset1: OS << "Offset1"; break;
467 case ImmTyGLC: OS << "GLC"; break;
468 case ImmTySLC: OS << "SLC"; break;
469 case ImmTyTFE: OS << "TFE"; break;
470 case ImmTyClampSI: OS << "ClampSI"; break;
471 case ImmTyOModSI: OS << "OModSI"; break;
472 case ImmTyDppCtrl: OS << "DppCtrl"; break;
473 case ImmTyDppRowMask: OS << "DppRowMask"; break;
474 case ImmTyDppBankMask: OS << "DppBankMask"; break;
475 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000476 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
477 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
478 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000479 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
480 case ImmTyDMask: OS << "DMask"; break;
481 case ImmTyUNorm: OS << "UNorm"; break;
482 case ImmTyDA: OS << "DA"; break;
483 case ImmTyR128: OS << "R128"; break;
484 case ImmTyLWE: OS << "LWE"; break;
485 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000486 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000487 }
488 }
489
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000490 void print(raw_ostream &OS) const override {
491 switch (Kind) {
492 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000493 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000494 break;
495 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000496 OS << '<' << getImm();
497 if (getImmTy() != ImmTyNone) {
498 OS << " type: "; printImmTy(OS, getImmTy());
499 }
Sam Kolton945231a2016-06-10 09:57:59 +0000500 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000501 break;
502 case Token:
503 OS << '\'' << getToken() << '\'';
504 break;
505 case Expression:
506 OS << "<expr " << *Expr << '>';
507 break;
508 }
509 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000510
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000511 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
512 int64_t Val, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000513 enum ImmTy Type = ImmTyNone,
514 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000515 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000516 Op->Imm.Val = Val;
517 Op->Imm.IsFPImm = IsFPImm;
518 Op->Imm.Type = Type;
Sam Kolton945231a2016-06-10 09:57:59 +0000519 Op->Imm.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000520 Op->StartLoc = Loc;
521 Op->EndLoc = Loc;
522 return Op;
523 }
524
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000525 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
526 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000527 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000528 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000529 Res->Tok.Data = Str.data();
530 Res->Tok.Length = Str.size();
531 Res->StartLoc = Loc;
532 Res->EndLoc = Loc;
533 return Res;
534 }
535
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000536 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
537 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000538 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000539 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000540 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000541 Op->Reg.RegNo = RegNo;
Sam Kolton945231a2016-06-10 09:57:59 +0000542 Op->Reg.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543 Op->Reg.IsForcedVOP3 = ForceVOP3;
544 Op->StartLoc = S;
545 Op->EndLoc = E;
546 return Op;
547 }
548
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000549 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
550 const class MCExpr *Expr, SMLoc S) {
551 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000552 Op->Expr = Expr;
553 Op->StartLoc = S;
554 Op->EndLoc = S;
555 return Op;
556 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000557};
558
Sam Kolton945231a2016-06-10 09:57:59 +0000559raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
560 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
561 return OS;
562}
563
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000564//===----------------------------------------------------------------------===//
565// AsmParser
566//===----------------------------------------------------------------------===//
567
Tom Stellard45bb48e2015-06-13 03:28:10 +0000568class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000569 const MCInstrInfo &MII;
570 MCAsmParser &Parser;
571
572 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000573 bool ForcedDPP;
574 bool ForcedSDWA;
Matt Arsenault68802d32015-11-05 03:11:27 +0000575
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576 /// @name Auto-generated Match Functions
577 /// {
578
579#define GET_ASSEMBLER_HEADER
580#include "AMDGPUGenAsmMatcher.inc"
581
582 /// }
583
Tom Stellard347ac792015-06-26 21:15:07 +0000584private:
585 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
586 bool ParseDirectiveHSACodeObjectVersion();
587 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000588 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
589 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000590 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000591 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000592 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000593 bool ParseDirectiveAMDGPUHsaModuleGlobal();
594 bool ParseDirectiveAMDGPUHsaProgramGlobal();
595 bool ParseSectionDirectiveHSADataGlobalAgent();
596 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000597 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000598 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
599 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000600 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000601
Tom Stellard45bb48e2015-06-13 03:28:10 +0000602public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000603 enum AMDGPUMatchResultTy {
604 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
605 };
606
Akira Hatanakab11ef082015-11-14 06:35:56 +0000607 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000608 const MCInstrInfo &MII,
609 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000610 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000611 ForcedEncodingSize(0),
612 ForcedDPP(false),
613 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000614 MCAsmParserExtension::Initialize(Parser);
615
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000616 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000617 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000618 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000619 }
620
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000621 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000622
623 {
624 // TODO: make those pre-defined variables read-only.
625 // Currently there is none suitable machinery in the core llvm-mc for this.
626 // MCSymbol::isRedefinable is intended for another purpose, and
627 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
628 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
629 MCContext &Ctx = getContext();
630 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
631 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
632 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
633 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
634 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
635 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
636 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000637 }
638
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000639 bool isSI() const {
640 return AMDGPU::isSI(getSTI());
641 }
642
643 bool isCI() const {
644 return AMDGPU::isCI(getSTI());
645 }
646
647 bool isVI() const {
648 return AMDGPU::isVI(getSTI());
649 }
650
651 bool hasSGPR102_SGPR103() const {
652 return !isVI();
653 }
654
Tom Stellard347ac792015-06-26 21:15:07 +0000655 AMDGPUTargetStreamer &getTargetStreamer() {
656 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
657 return static_cast<AMDGPUTargetStreamer &>(TS);
658 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000659
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000660 const MCRegisterInfo *getMRI() const {
661 // We need this const_cast because for some reason getContext() is not const
662 // in MCAsmParser.
663 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
664 }
665
666 const MCInstrInfo *getMII() const {
667 return &MII;
668 }
669
Sam Kolton05ef1c92016-06-03 10:27:37 +0000670 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
671 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
672 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000673
Sam Kolton05ef1c92016-06-03 10:27:37 +0000674 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
675 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
676 bool isForcedDPP() const { return ForcedDPP; }
677 bool isForcedSDWA() const { return ForcedSDWA; }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000678
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000679 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000680 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
681 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000682 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
683 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000684 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
685 OperandVector &Operands, MCStreamer &Out,
686 uint64_t &ErrorInfo,
687 bool MatchingInlineAsm) override;
688 bool ParseDirective(AsmToken DirectiveID) override;
689 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000690 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000691 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
692 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000693 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000694
Sam Kolton11de3702016-05-24 12:38:33 +0000695 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000696 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
697 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000698 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000699 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000700 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000701 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000702 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000703
Sam Kolton1bdcef72016-05-23 09:59:02 +0000704 OperandMatchResultTy parseImm(OperandVector &Operands);
705 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000706 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
707 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000708
Tom Stellard45bb48e2015-06-13 03:28:10 +0000709 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
710 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711
712 bool parseCnt(int64_t &IntVal);
713 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000714 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000715
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000716private:
717 struct OperandInfoTy {
718 int64_t Id;
719 bool IsSymbolic;
720 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
721 };
Sam Kolton11de3702016-05-24 12:38:33 +0000722
Artem Tamazov6edc1352016-05-26 17:00:33 +0000723 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
724 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000725public:
Sam Kolton11de3702016-05-24 12:38:33 +0000726 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
727
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000728 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000729 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
730
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000731 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
732 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
733 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000734 AMDGPUOperand::Ptr defaultGLC() const;
735 AMDGPUOperand::Ptr defaultSLC() const;
736 AMDGPUOperand::Ptr defaultTFE() const;
737
Sam Kolton5f10a132016-05-06 11:31:17 +0000738 AMDGPUOperand::Ptr defaultDMask() const;
739 AMDGPUOperand::Ptr defaultUNorm() const;
740 AMDGPUOperand::Ptr defaultDA() const;
741 AMDGPUOperand::Ptr defaultR128() const;
742 AMDGPUOperand::Ptr defaultLWE() const;
743 AMDGPUOperand::Ptr defaultSMRDOffset() const;
744 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000745
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000746 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
747
Tom Stellarda90b9522016-02-11 03:28:15 +0000748 void cvtId(MCInst &Inst, const OperandVector &Operands);
749 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000750 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000751
752 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000753 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000754
Sam Kolton11de3702016-05-24 12:38:33 +0000755 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000756 AMDGPUOperand::Ptr defaultRowMask() const;
757 AMDGPUOperand::Ptr defaultBankMask() const;
758 AMDGPUOperand::Ptr defaultBoundCtrl() const;
759 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000760
Sam Kolton05ef1c92016-06-03 10:27:37 +0000761 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
762 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000763 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000764 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
765 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000766 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
767 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
768 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000769};
770
771struct OptionalOperand {
772 const char *Name;
773 AMDGPUOperand::ImmTy Type;
774 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000775 bool (*ConvertResult)(int64_t&);
776};
777
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000778}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000779
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000780//===----------------------------------------------------------------------===//
781// Operand
782//===----------------------------------------------------------------------===//
783
784bool AMDGPUOperand::isInlinableImm(MVT type) const {
785 if (!isImmTy(ImmTyNone)) {
786 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
787 return false;
788 }
789 // TODO: We should avoid using host float here. It would be better to
790 // check the float bit values which is what a few other places do.
791 // We've had bot failures before due to weird NaN support on mips hosts.
792
793 APInt Literal(64, Imm.Val);
794
795 if (Imm.IsFPImm) { // We got fp literal token
796 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
797 return AMDGPU::isInlinableLiteral64(Imm.Val, AsmParser->isVI());
798 } else { // Expected 32-bit operand
799 bool lost;
800 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
801 // Convert literal to single precision
802 APFloat::opStatus status = FPLiteral.convert(APFloat::IEEEsingle,
803 APFloat::rmNearestTiesToEven,
804 &lost);
805 // We allow precision lost but not overflow or underflow
806 if (status != APFloat::opOK &&
807 lost &&
808 ((status & APFloat::opOverflow) != 0 ||
809 (status & APFloat::opUnderflow) != 0)) {
810 return false;
811 }
812 // Check if single precision literal is inlinable
813 return AMDGPU::isInlinableLiteral32(
814 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
815 AsmParser->isVI());
816 }
817 } else { // We got int literal token
818 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
819 return AMDGPU::isInlinableLiteral64(Imm.Val, AsmParser->isVI());
820 } else { // Expected 32-bit operand
821 return AMDGPU::isInlinableLiteral32(
822 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
823 AsmParser->isVI());
824 }
825 }
826 return false;
827}
828
829bool AMDGPUOperand::isLiteralImm(MVT type) const {
830 // Check that this imediate can be added as literal
831 if (!isImmTy(ImmTyNone)) {
832 return false;
833 }
834
835 APInt Literal(64, Imm.Val);
836
837 if (Imm.IsFPImm) { // We got fp literal token
838 if (type == MVT::f64) { // Expected 64-bit fp operand
839 // We would set low 64-bits of literal to zeroes but we accept this literals
840 return true;
841 } else if (type == MVT::i64) { // Expected 64-bit int operand
842 // We don't allow fp literals in 64-bit integer instructions. It is
843 // unclear how we should encode them.
844 return false;
845 } else { // Expected 32-bit operand
846 bool lost;
847 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
848 // Convert literal to single precision
849 APFloat::opStatus status = FPLiteral.convert(APFloat::IEEEsingle,
850 APFloat::rmNearestTiesToEven,
851 &lost);
852 // We allow precision lost but not overflow or underflow
853 if (status != APFloat::opOK &&
854 lost &&
855 ((status & APFloat::opOverflow) != 0 ||
856 (status & APFloat::opUnderflow) != 0)) {
857 return false;
858 }
859 return true;
860 }
861 } else { // We got int literal token
862 APInt HiBits = Literal.getHiBits(32);
863 if (HiBits == 0xffffffff &&
864 (*Literal.getLoBits(32).getRawData() & 0x80000000) != 0) {
865 // If high 32 bits aren't zeroes then they all should be ones and 32nd
866 // bit should be set. So that this 64-bit literal is sign-extension of
867 // 32-bit value.
868 return true;
869 } else if (HiBits == 0) {
870 return true;
871 }
872 }
873 return false;
874}
875
876bool AMDGPUOperand::isRegClass(unsigned RCID) const {
877 return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
878}
879
880void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
881 int64_t Val = Imm.Val;
882 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers() && Imm.Mods.Neg) {
883 // Apply modifiers to immediate value. Only negate can get here
884 if (Imm.IsFPImm) {
885 APFloat F(BitsToDouble(Val));
886 F.changeSign();
887 Val = F.bitcastToAPInt().getZExtValue();
888 } else {
889 Val = -Val;
890 }
891 }
892
893 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()), Inst.getNumOperands())) {
894 addLiteralImmOperand(Inst, Val);
895 } else {
896 Inst.addOperand(MCOperand::createImm(Val));
897 }
898}
899
900void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val) const {
901 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
902 auto OpNum = Inst.getNumOperands();
903 // Check that this operand accepts literals
904 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
905
906 APInt Literal(64, Val);
907 auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), InstDesc, OpNum); // expected operand size
908
909 if (Imm.IsFPImm) { // We got fp literal token
910 if (OpSize == 8) { // Expected 64-bit operand
911 // Check if literal is inlinable
912 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(), AsmParser->isVI())) {
913 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
914 } else if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
915 // For fp operands we check if low 32 bits are zeros
916 if (Literal.getLoBits(32) != 0) {
917 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
918 "Can't encode literal as exact 64-bit"
919 " floating-point operand. Low 32-bits will be"
920 " set to zero");
921 }
922 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
923 } else {
924 // We don't allow fp literals in 64-bit integer instructions. It is
925 // unclear how we should encode them. This case should be checked earlier
926 // in predicate methods (isLiteralImm())
927 llvm_unreachable("fp literal in 64-bit integer instruction.");
928 }
929 } else { // Expected 32-bit operand
930 bool lost;
931 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
932 // Convert literal to single precision
933 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
934 // We allow precision lost but not overflow or underflow. This should be
935 // checked earlier in isLiteralImm()
936 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
937 }
938 } else { // We got int literal token
939 if (OpSize == 8) { // Expected 64-bit operand
940 auto LiteralVal = Literal.getZExtValue();
941 if (AMDGPU::isInlinableLiteral64(LiteralVal, AsmParser->isVI())) {
942 Inst.addOperand(MCOperand::createImm(LiteralVal));
943 return;
944 }
945 } else { // Expected 32-bit operand
946 auto LiteralVal = static_cast<int32_t>(Literal.getLoBits(32).getZExtValue());
947 if (AMDGPU::isInlinableLiteral32(LiteralVal, AsmParser->isVI())) {
948 Inst.addOperand(MCOperand::createImm(LiteralVal));
949 return;
950 }
951 }
952 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
953 }
954}
955
956void AMDGPUOperand::addKImmFP32Operands(MCInst &Inst, unsigned N) const {
957 APInt Literal(64, Imm.Val);
958 if (Imm.IsFPImm) { // We got fp literal
959 bool lost;
960 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
961 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
962 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
963 } else { // We got int literal token
964 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
965 }
966}
967
968void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
969 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
970}
971
972//===----------------------------------------------------------------------===//
973// AsmParser
974//===----------------------------------------------------------------------===//
975
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000976static int getRegClass(RegisterKind Is, unsigned RegWidth) {
977 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000978 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000979 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000980 case 1: return AMDGPU::VGPR_32RegClassID;
981 case 2: return AMDGPU::VReg_64RegClassID;
982 case 3: return AMDGPU::VReg_96RegClassID;
983 case 4: return AMDGPU::VReg_128RegClassID;
984 case 8: return AMDGPU::VReg_256RegClassID;
985 case 16: return AMDGPU::VReg_512RegClassID;
986 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000987 } else if (Is == IS_TTMP) {
988 switch (RegWidth) {
989 default: return -1;
990 case 1: return AMDGPU::TTMP_32RegClassID;
991 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000992 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000993 }
994 } else if (Is == IS_SGPR) {
995 switch (RegWidth) {
996 default: return -1;
997 case 1: return AMDGPU::SGPR_32RegClassID;
998 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000999 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001000 case 8: return AMDGPU::SReg_256RegClassID;
1001 case 16: return AMDGPU::SReg_512RegClassID;
1002 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001003 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001004 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001005}
1006
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001007static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001008 return StringSwitch<unsigned>(RegName)
1009 .Case("exec", AMDGPU::EXEC)
1010 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001011 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001012 .Case("m0", AMDGPU::M0)
1013 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001014 .Case("tba", AMDGPU::TBA)
1015 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001016 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1017 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001018 .Case("vcc_lo", AMDGPU::VCC_LO)
1019 .Case("vcc_hi", AMDGPU::VCC_HI)
1020 .Case("exec_lo", AMDGPU::EXEC_LO)
1021 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001022 .Case("tma_lo", AMDGPU::TMA_LO)
1023 .Case("tma_hi", AMDGPU::TMA_HI)
1024 .Case("tba_lo", AMDGPU::TBA_LO)
1025 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001026 .Default(0);
1027}
1028
1029bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001030 auto R = parseRegister();
1031 if (!R) return true;
1032 assert(R->isReg());
1033 RegNo = R->getReg();
1034 StartLoc = R->getStartLoc();
1035 EndLoc = R->getEndLoc();
1036 return false;
1037}
1038
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001039bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
1040{
1041 switch (RegKind) {
1042 case IS_SPECIAL:
1043 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
1044 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
1045 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
1046 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
1047 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
1048 return false;
1049 case IS_VGPR:
1050 case IS_SGPR:
1051 case IS_TTMP:
1052 if (Reg1 != Reg + RegWidth) { return false; }
1053 RegWidth++;
1054 return true;
1055 default:
1056 assert(false); return false;
1057 }
1058}
1059
1060bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
1061{
1062 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1063 if (getLexer().is(AsmToken::Identifier)) {
1064 StringRef RegName = Parser.getTok().getString();
1065 if ((Reg = getSpecialRegForName(RegName))) {
1066 Parser.Lex();
1067 RegKind = IS_SPECIAL;
1068 } else {
1069 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001070 if (RegName[0] == 'v') {
1071 RegNumIndex = 1;
1072 RegKind = IS_VGPR;
1073 } else if (RegName[0] == 's') {
1074 RegNumIndex = 1;
1075 RegKind = IS_SGPR;
1076 } else if (RegName.startswith("ttmp")) {
1077 RegNumIndex = strlen("ttmp");
1078 RegKind = IS_TTMP;
1079 } else {
1080 return false;
1081 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001082 if (RegName.size() > RegNumIndex) {
1083 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001084 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1085 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001086 Parser.Lex();
1087 RegWidth = 1;
1088 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001089 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001090 Parser.Lex();
1091 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001092 if (getLexer().isNot(AsmToken::LBrac))
1093 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001094 Parser.Lex();
1095
Artem Tamazovf88397c2016-06-03 14:41:17 +00001096 if (getParser().parseAbsoluteExpression(RegLo))
1097 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001098
Artem Tamazov7da9b822016-05-27 12:50:13 +00001099 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001100 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1101 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001102 Parser.Lex();
1103
Artem Tamazov7da9b822016-05-27 12:50:13 +00001104 if (isRBrace) {
1105 RegHi = RegLo;
1106 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001107 if (getParser().parseAbsoluteExpression(RegHi))
1108 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001109
Artem Tamazovf88397c2016-06-03 14:41:17 +00001110 if (getLexer().isNot(AsmToken::RBrac))
1111 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001112 Parser.Lex();
1113 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001114 RegNum = (unsigned) RegLo;
1115 RegWidth = (RegHi - RegLo) + 1;
1116 }
1117 }
1118 } else if (getLexer().is(AsmToken::LBrac)) {
1119 // List of consecutive registers: [s0,s1,s2,s3]
1120 Parser.Lex();
Artem Tamazovf88397c2016-06-03 14:41:17 +00001121 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
1122 return false;
1123 if (RegWidth != 1)
1124 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001125 RegisterKind RegKind1;
1126 unsigned Reg1, RegNum1, RegWidth1;
1127 do {
1128 if (getLexer().is(AsmToken::Comma)) {
1129 Parser.Lex();
1130 } else if (getLexer().is(AsmToken::RBrac)) {
1131 Parser.Lex();
1132 break;
1133 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001134 if (RegWidth1 != 1) {
1135 return false;
1136 }
1137 if (RegKind1 != RegKind) {
1138 return false;
1139 }
1140 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1141 return false;
1142 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001143 } else {
1144 return false;
1145 }
1146 } while (true);
1147 } else {
1148 return false;
1149 }
1150 switch (RegKind) {
1151 case IS_SPECIAL:
1152 RegNum = 0;
1153 RegWidth = 1;
1154 break;
1155 case IS_VGPR:
1156 case IS_SGPR:
1157 case IS_TTMP:
1158 {
1159 unsigned Size = 1;
1160 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1161 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
1162 Size = std::min(RegWidth, 4u);
1163 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001164 if (RegNum % Size != 0)
1165 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001166 RegNum = RegNum / Size;
1167 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001168 if (RCID == -1)
1169 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001170 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001171 if (RegNum >= RC.getNumRegs())
1172 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001173 Reg = RC.getRegister(RegNum);
1174 break;
1175 }
1176
1177 default:
1178 assert(false); return false;
1179 }
1180
Artem Tamazovf88397c2016-06-03 14:41:17 +00001181 if (!subtargetHasRegister(*TRI, Reg))
1182 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001183 return true;
1184}
1185
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001186std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001187 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001188 SMLoc StartLoc = Tok.getLoc();
1189 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001190 RegisterKind RegKind;
1191 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001192
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001193 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
1194 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001195 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001196 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001197}
1198
Sam Kolton1bdcef72016-05-23 09:59:02 +00001199AMDGPUAsmParser::OperandMatchResultTy
1200AMDGPUAsmParser::parseImm(OperandVector &Operands) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001201 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001202 bool Minus = false;
1203 if (getLexer().getKind() == AsmToken::Minus) {
1204 Minus = true;
1205 Parser.Lex();
1206 }
1207
1208 SMLoc S = Parser.getTok().getLoc();
1209 switch(getLexer().getKind()) {
1210 case AsmToken::Integer: {
1211 int64_t IntVal;
1212 if (getParser().parseAbsoluteExpression(IntVal))
1213 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001214 if (Minus)
1215 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001216 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001217 return MatchOperand_Success;
1218 }
1219 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001220 int64_t IntVal;
1221 if (getParser().parseAbsoluteExpression(IntVal))
1222 return MatchOperand_ParseFail;
1223
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001224 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001225 if (Minus)
1226 F.changeSign();
1227 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001228 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001229 AMDGPUOperand::ImmTyNone, true));
1230 return MatchOperand_Success;
1231 }
1232 default:
1233 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1234 }
1235}
1236
1237AMDGPUAsmParser::OperandMatchResultTy
1238AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1239 auto res = parseImm(Operands);
1240 if (res != MatchOperand_NoMatch) {
1241 return res;
1242 }
1243
1244 if (auto R = parseRegister()) {
1245 assert(R->isReg());
1246 R->Reg.IsForcedVOP3 = isForcedVOP3();
1247 Operands.push_back(std::move(R));
1248 return MatchOperand_Success;
1249 }
1250 return MatchOperand_ParseFail;
1251}
1252
1253AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001254AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001255 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001256 // negate-modifier or negative immediate value.
1257 // By default we suppose it is modifier.
1258 bool Negate = false, Abs = false, Abs2 = false;
1259
1260 if (getLexer().getKind()== AsmToken::Minus) {
1261 Parser.Lex();
1262 Negate = true;
1263 }
1264
1265 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1266 Parser.Lex();
1267 Abs2 = true;
1268 if (getLexer().isNot(AsmToken::LParen)) {
1269 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1270 return MatchOperand_ParseFail;
1271 }
1272 Parser.Lex();
1273 }
1274
1275 if (getLexer().getKind() == AsmToken::Pipe) {
1276 if (Abs2) {
1277 Error(Parser.getTok().getLoc(), "expected register or immediate");
1278 return MatchOperand_ParseFail;
1279 }
1280 Parser.Lex();
1281 Abs = true;
1282 }
1283
1284 auto Res = parseRegOrImm(Operands);
1285 if (Res != MatchOperand_Success) {
1286 return Res;
1287 }
1288
Sam Kolton945231a2016-06-10 09:57:59 +00001289 AMDGPUOperand::Modifiers Mods = {false, false, false};
Sam Kolton1bdcef72016-05-23 09:59:02 +00001290 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001291 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001292 }
1293 if (Abs) {
1294 if (getLexer().getKind() != AsmToken::Pipe) {
1295 Error(Parser.getTok().getLoc(), "expected vertical bar");
1296 return MatchOperand_ParseFail;
1297 }
1298 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001299 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001300 }
1301 if (Abs2) {
1302 if (getLexer().isNot(AsmToken::RParen)) {
1303 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1304 return MatchOperand_ParseFail;
1305 }
1306 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001307 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001308 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001309
Sam Kolton945231a2016-06-10 09:57:59 +00001310 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001311 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001312 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001313 }
1314 return MatchOperand_Success;
1315}
1316
Sam Kolton945231a2016-06-10 09:57:59 +00001317AMDGPUAsmParser::OperandMatchResultTy
1318AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1319 bool Sext = false;
1320
1321 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1322 Parser.Lex();
1323 Sext = true;
1324 if (getLexer().isNot(AsmToken::LParen)) {
1325 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1326 return MatchOperand_ParseFail;
1327 }
1328 Parser.Lex();
1329 }
1330
1331 auto Res = parseRegOrImm(Operands);
1332 if (Res != MatchOperand_Success) {
1333 return Res;
1334 }
1335
Sam Kolton945231a2016-06-10 09:57:59 +00001336 AMDGPUOperand::Modifiers Mods = {false, false, false};
1337 if (Sext) {
1338 if (getLexer().isNot(AsmToken::RParen)) {
1339 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1340 return MatchOperand_ParseFail;
1341 }
1342 Parser.Lex();
1343 Mods.Sext = true;
1344 }
1345
1346 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001347 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001348 Op.setModifiers(Mods);
1349 }
1350 return MatchOperand_Success;
1351}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001352
Tom Stellard45bb48e2015-06-13 03:28:10 +00001353unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1354
1355 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1356
1357 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001358 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1359 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1360 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001361 return Match_InvalidOperand;
1362
Tom Stellard88e0b252015-10-06 15:57:53 +00001363 if ((TSFlags & SIInstrFlags::VOP3) &&
1364 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1365 getForcedEncodingSize() != 64)
1366 return Match_PreferE32;
1367
Sam Koltona3ec5c12016-10-07 14:46:06 +00001368 if (Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa ||
1369 Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa) {
1370 // v_mac_f32/16 allow only dst_sel == DWORD;
1371 auto OpNum = AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
1372 const auto &Op = Inst.getOperand(OpNum);
1373 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1374 return Match_InvalidOperand;
1375 }
1376 }
1377
Tom Stellard45bb48e2015-06-13 03:28:10 +00001378 return Match_Success;
1379}
1380
Tom Stellard45bb48e2015-06-13 03:28:10 +00001381bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1382 OperandVector &Operands,
1383 MCStreamer &Out,
1384 uint64_t &ErrorInfo,
1385 bool MatchingInlineAsm) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001386 // What asm variants we should check
1387 std::vector<unsigned> MatchedVariants;
1388 if (getForcedEncodingSize() == 32) {
1389 MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
1390 } else if (isForcedVOP3()) {
1391 MatchedVariants = {AMDGPUAsmVariants::VOP3};
1392 } else if (isForcedSDWA()) {
1393 MatchedVariants = {AMDGPUAsmVariants::SDWA};
1394 } else if (isForcedDPP()) {
1395 MatchedVariants = {AMDGPUAsmVariants::DPP};
1396 } else {
1397 MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
1398 AMDGPUAsmVariants::VOP3,
1399 AMDGPUAsmVariants::SDWA,
1400 AMDGPUAsmVariants::DPP};
1401 }
1402
Tom Stellard45bb48e2015-06-13 03:28:10 +00001403 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001404 unsigned Result = Match_Success;
1405 for (auto Variant : MatchedVariants) {
1406 uint64_t EI;
1407 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1408 Variant);
1409 // We order match statuses from least to most specific. We use most specific
1410 // status as resulting
1411 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1412 if ((R == Match_Success) ||
1413 (R == Match_PreferE32) ||
1414 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1415 (R == Match_InvalidOperand && Result != Match_MissingFeature
1416 && Result != Match_PreferE32) ||
1417 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1418 && Result != Match_MissingFeature
1419 && Result != Match_PreferE32)) {
1420 Result = R;
1421 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001422 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001423 if (R == Match_Success)
1424 break;
1425 }
1426
1427 switch (Result) {
1428 default: break;
1429 case Match_Success:
1430 Inst.setLoc(IDLoc);
1431 Out.EmitInstruction(Inst, getSTI());
1432 return false;
1433
1434 case Match_MissingFeature:
1435 return Error(IDLoc, "instruction not supported on this GPU");
1436
1437 case Match_MnemonicFail:
1438 return Error(IDLoc, "unrecognized instruction mnemonic");
1439
1440 case Match_InvalidOperand: {
1441 SMLoc ErrorLoc = IDLoc;
1442 if (ErrorInfo != ~0ULL) {
1443 if (ErrorInfo >= Operands.size()) {
1444 return Error(IDLoc, "too few operands for instruction");
1445 }
1446 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1447 if (ErrorLoc == SMLoc())
1448 ErrorLoc = IDLoc;
1449 }
1450 return Error(ErrorLoc, "invalid operand for instruction");
1451 }
1452
1453 case Match_PreferE32:
1454 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1455 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001456 }
1457 llvm_unreachable("Implement any new match types added!");
1458}
1459
Tom Stellard347ac792015-06-26 21:15:07 +00001460bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1461 uint32_t &Minor) {
1462 if (getLexer().isNot(AsmToken::Integer))
1463 return TokError("invalid major version");
1464
1465 Major = getLexer().getTok().getIntVal();
1466 Lex();
1467
1468 if (getLexer().isNot(AsmToken::Comma))
1469 return TokError("minor version number required, comma expected");
1470 Lex();
1471
1472 if (getLexer().isNot(AsmToken::Integer))
1473 return TokError("invalid minor version");
1474
1475 Minor = getLexer().getTok().getIntVal();
1476 Lex();
1477
1478 return false;
1479}
1480
1481bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1482
1483 uint32_t Major;
1484 uint32_t Minor;
1485
1486 if (ParseDirectiveMajorMinor(Major, Minor))
1487 return true;
1488
1489 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1490 return false;
1491}
1492
1493bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1494
1495 uint32_t Major;
1496 uint32_t Minor;
1497 uint32_t Stepping;
1498 StringRef VendorName;
1499 StringRef ArchName;
1500
1501 // If this directive has no arguments, then use the ISA version for the
1502 // targeted GPU.
1503 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001504 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001505 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1506 Isa.Stepping,
1507 "AMD", "AMDGPU");
1508 return false;
1509 }
1510
1511
1512 if (ParseDirectiveMajorMinor(Major, Minor))
1513 return true;
1514
1515 if (getLexer().isNot(AsmToken::Comma))
1516 return TokError("stepping version number required, comma expected");
1517 Lex();
1518
1519 if (getLexer().isNot(AsmToken::Integer))
1520 return TokError("invalid stepping version");
1521
1522 Stepping = getLexer().getTok().getIntVal();
1523 Lex();
1524
1525 if (getLexer().isNot(AsmToken::Comma))
1526 return TokError("vendor name required, comma expected");
1527 Lex();
1528
1529 if (getLexer().isNot(AsmToken::String))
1530 return TokError("invalid vendor name");
1531
1532 VendorName = getLexer().getTok().getStringContents();
1533 Lex();
1534
1535 if (getLexer().isNot(AsmToken::Comma))
1536 return TokError("arch name required, comma expected");
1537 Lex();
1538
1539 if (getLexer().isNot(AsmToken::String))
1540 return TokError("invalid arch name");
1541
1542 ArchName = getLexer().getTok().getStringContents();
1543 Lex();
1544
1545 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1546 VendorName, ArchName);
1547 return false;
1548}
1549
Tom Stellardff7416b2015-06-26 21:58:31 +00001550bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1551 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001552 SmallString<40> ErrStr;
1553 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001554 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001555 return TokError(Err.str());
1556 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001557 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001558 return false;
1559}
1560
1561bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1562
1563 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001564 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001565
1566 while (true) {
1567
Tom Stellardff7416b2015-06-26 21:58:31 +00001568 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1569 // will set the current token to EndOfStatement.
1570 while(getLexer().is(AsmToken::EndOfStatement))
1571 Lex();
1572
1573 if (getLexer().isNot(AsmToken::Identifier))
1574 return TokError("expected value identifier or .end_amd_kernel_code_t");
1575
1576 StringRef ID = getLexer().getTok().getIdentifier();
1577 Lex();
1578
1579 if (ID == ".end_amd_kernel_code_t")
1580 break;
1581
1582 if (ParseAMDKernelCodeTValue(ID, Header))
1583 return true;
1584 }
1585
1586 getTargetStreamer().EmitAMDKernelCodeT(Header);
1587
1588 return false;
1589}
1590
Tom Stellarde135ffd2015-09-25 21:41:28 +00001591bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1592 getParser().getStreamer().SwitchSection(
1593 AMDGPU::getHSATextSection(getContext()));
1594 return false;
1595}
1596
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001597bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1598 if (getLexer().isNot(AsmToken::Identifier))
1599 return TokError("expected symbol name");
1600
1601 StringRef KernelName = Parser.getTok().getString();
1602
1603 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1604 ELF::STT_AMDGPU_HSA_KERNEL);
1605 Lex();
1606 return false;
1607}
1608
Tom Stellard00f2f912015-12-02 19:47:57 +00001609bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1610 if (getLexer().isNot(AsmToken::Identifier))
1611 return TokError("expected symbol name");
1612
1613 StringRef GlobalName = Parser.getTok().getIdentifier();
1614
1615 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1616 Lex();
1617 return false;
1618}
1619
1620bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1621 if (getLexer().isNot(AsmToken::Identifier))
1622 return TokError("expected symbol name");
1623
1624 StringRef GlobalName = Parser.getTok().getIdentifier();
1625
1626 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1627 Lex();
1628 return false;
1629}
1630
1631bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1632 getParser().getStreamer().SwitchSection(
1633 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1634 return false;
1635}
1636
1637bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1638 getParser().getStreamer().SwitchSection(
1639 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1640 return false;
1641}
1642
Tom Stellard9760f032015-12-03 03:34:32 +00001643bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1644 getParser().getStreamer().SwitchSection(
1645 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1646 return false;
1647}
1648
Tom Stellard45bb48e2015-06-13 03:28:10 +00001649bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001650 StringRef IDVal = DirectiveID.getString();
1651
1652 if (IDVal == ".hsa_code_object_version")
1653 return ParseDirectiveHSACodeObjectVersion();
1654
1655 if (IDVal == ".hsa_code_object_isa")
1656 return ParseDirectiveHSACodeObjectISA();
1657
Tom Stellardff7416b2015-06-26 21:58:31 +00001658 if (IDVal == ".amd_kernel_code_t")
1659 return ParseDirectiveAMDKernelCodeT();
1660
Tom Stellardfcfaea42016-05-05 17:03:33 +00001661 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001662 return ParseSectionDirectiveHSAText();
1663
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001664 if (IDVal == ".amdgpu_hsa_kernel")
1665 return ParseDirectiveAMDGPUHsaKernel();
1666
Tom Stellard00f2f912015-12-02 19:47:57 +00001667 if (IDVal == ".amdgpu_hsa_module_global")
1668 return ParseDirectiveAMDGPUHsaModuleGlobal();
1669
1670 if (IDVal == ".amdgpu_hsa_program_global")
1671 return ParseDirectiveAMDGPUHsaProgramGlobal();
1672
1673 if (IDVal == ".hsadata_global_agent")
1674 return ParseSectionDirectiveHSADataGlobalAgent();
1675
1676 if (IDVal == ".hsadata_global_program")
1677 return ParseSectionDirectiveHSADataGlobalProgram();
1678
Tom Stellard9760f032015-12-03 03:34:32 +00001679 if (IDVal == ".hsarodata_readonly_agent")
1680 return ParseSectionDirectiveHSARodataReadonlyAgent();
1681
Tom Stellard45bb48e2015-06-13 03:28:10 +00001682 return true;
1683}
1684
Matt Arsenault68802d32015-11-05 03:11:27 +00001685bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1686 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001687 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001688 return true;
1689
Matt Arsenault3b159672015-12-01 20:31:08 +00001690 if (isSI()) {
1691 // No flat_scr
1692 switch (RegNo) {
1693 case AMDGPU::FLAT_SCR:
1694 case AMDGPU::FLAT_SCR_LO:
1695 case AMDGPU::FLAT_SCR_HI:
1696 return false;
1697 default:
1698 return true;
1699 }
1700 }
1701
Matt Arsenault68802d32015-11-05 03:11:27 +00001702 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1703 // SI/CI have.
1704 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1705 R.isValid(); ++R) {
1706 if (*R == RegNo)
1707 return false;
1708 }
1709
1710 return true;
1711}
1712
Tom Stellard45bb48e2015-06-13 03:28:10 +00001713AMDGPUAsmParser::OperandMatchResultTy
1714AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1715
1716 // Try to parse with a custom parser
1717 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1718
1719 // If we successfully parsed the operand or if there as an error parsing,
1720 // we are done.
1721 //
1722 // If we are parsing after we reach EndOfStatement then this means we
1723 // are appending default values to the Operands list. This is only done
1724 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001725 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001726 getLexer().is(AsmToken::EndOfStatement))
1727 return ResTy;
1728
Sam Kolton1bdcef72016-05-23 09:59:02 +00001729 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001730
Sam Kolton1bdcef72016-05-23 09:59:02 +00001731 if (ResTy == MatchOperand_Success)
1732 return ResTy;
1733
1734 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00001735 // If this identifier is a symbol, we want to create an expression for it.
1736 // It is a little difficult to distinguish between a symbol name, and
1737 // an instruction flag like 'gds'. In order to do this, we parse
1738 // all tokens as expressions and then treate the symbol name as the token
1739 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001740 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00001741 SMLoc S = Tok.getLoc();
1742 const MCExpr *Expr = nullptr;
1743 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001744 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00001745 return MatchOperand_Success;
1746 }
1747
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001748 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001749 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001750 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001751 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001752 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001753}
1754
Sam Kolton05ef1c92016-06-03 10:27:37 +00001755StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1756 // Clear any forced encodings from the previous instruction.
1757 setForcedEncodingSize(0);
1758 setForcedDPP(false);
1759 setForcedSDWA(false);
1760
1761 if (Name.endswith("_e64")) {
1762 setForcedEncodingSize(64);
1763 return Name.substr(0, Name.size() - 4);
1764 } else if (Name.endswith("_e32")) {
1765 setForcedEncodingSize(32);
1766 return Name.substr(0, Name.size() - 4);
1767 } else if (Name.endswith("_dpp")) {
1768 setForcedDPP(true);
1769 return Name.substr(0, Name.size() - 4);
1770 } else if (Name.endswith("_sdwa")) {
1771 setForcedSDWA(true);
1772 return Name.substr(0, Name.size() - 5);
1773 }
1774 return Name;
1775}
1776
Tom Stellard45bb48e2015-06-13 03:28:10 +00001777bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1778 StringRef Name,
1779 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001780 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00001781 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001782 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00001783
Tom Stellard45bb48e2015-06-13 03:28:10 +00001784 while (!getLexer().is(AsmToken::EndOfStatement)) {
1785 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1786
1787 // Eat the comma or space if there is one.
1788 if (getLexer().is(AsmToken::Comma))
1789 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00001790
Tom Stellard45bb48e2015-06-13 03:28:10 +00001791 switch (Res) {
1792 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001793 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001794 Error(getLexer().getLoc(), "failed parsing operand.");
1795 while (!getLexer().is(AsmToken::EndOfStatement)) {
1796 Parser.Lex();
1797 }
1798 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001799 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001800 Error(getLexer().getLoc(), "not a valid operand.");
1801 while (!getLexer().is(AsmToken::EndOfStatement)) {
1802 Parser.Lex();
1803 }
1804 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001805 }
1806 }
1807
Tom Stellard45bb48e2015-06-13 03:28:10 +00001808 return false;
1809}
1810
1811//===----------------------------------------------------------------------===//
1812// Utility functions
1813//===----------------------------------------------------------------------===//
1814
1815AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001816AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001817 switch(getLexer().getKind()) {
1818 default: return MatchOperand_NoMatch;
1819 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001820 StringRef Name = Parser.getTok().getString();
1821 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001822 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001823 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001824
1825 Parser.Lex();
1826 if (getLexer().isNot(AsmToken::Colon))
1827 return MatchOperand_ParseFail;
1828
1829 Parser.Lex();
1830 if (getLexer().isNot(AsmToken::Integer))
1831 return MatchOperand_ParseFail;
1832
1833 if (getParser().parseAbsoluteExpression(Int))
1834 return MatchOperand_ParseFail;
1835 break;
1836 }
1837 }
1838 return MatchOperand_Success;
1839}
1840
1841AMDGPUAsmParser::OperandMatchResultTy
1842AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001843 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001844 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001845
1846 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001847 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001848
Sam Kolton11de3702016-05-24 12:38:33 +00001849 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001850 if (Res != MatchOperand_Success)
1851 return Res;
1852
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001853 if (ConvertResult && !ConvertResult(Value)) {
1854 return MatchOperand_ParseFail;
1855 }
1856
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001857 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001858 return MatchOperand_Success;
1859}
1860
1861AMDGPUAsmParser::OperandMatchResultTy
1862AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001863 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001864 int64_t Bit = 0;
1865 SMLoc S = Parser.getTok().getLoc();
1866
1867 // We are at the end of the statement, and this is a default argument, so
1868 // use a default value.
1869 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1870 switch(getLexer().getKind()) {
1871 case AsmToken::Identifier: {
1872 StringRef Tok = Parser.getTok().getString();
1873 if (Tok == Name) {
1874 Bit = 1;
1875 Parser.Lex();
1876 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1877 Bit = 0;
1878 Parser.Lex();
1879 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001880 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001881 }
1882 break;
1883 }
1884 default:
1885 return MatchOperand_NoMatch;
1886 }
1887 }
1888
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001889 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001890 return MatchOperand_Success;
1891}
1892
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001893typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1894
Sam Koltona74cd522016-03-18 15:35:51 +00001895void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1896 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001897 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001898 auto i = OptionalIdx.find(ImmT);
1899 if (i != OptionalIdx.end()) {
1900 unsigned Idx = i->second;
1901 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1902 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001903 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001904 }
1905}
1906
Matt Arsenault37fefd62016-06-10 02:18:02 +00001907AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00001908AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00001909 if (getLexer().isNot(AsmToken::Identifier)) {
1910 return MatchOperand_NoMatch;
1911 }
1912 StringRef Tok = Parser.getTok().getString();
1913 if (Tok != Prefix) {
1914 return MatchOperand_NoMatch;
1915 }
1916
1917 Parser.Lex();
1918 if (getLexer().isNot(AsmToken::Colon)) {
1919 return MatchOperand_ParseFail;
1920 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001921
Sam Kolton3025e7f2016-04-26 13:33:56 +00001922 Parser.Lex();
1923 if (getLexer().isNot(AsmToken::Identifier)) {
1924 return MatchOperand_ParseFail;
1925 }
1926
1927 Value = Parser.getTok().getString();
1928 return MatchOperand_Success;
1929}
1930
Tom Stellard45bb48e2015-06-13 03:28:10 +00001931//===----------------------------------------------------------------------===//
1932// ds
1933//===----------------------------------------------------------------------===//
1934
Tom Stellard45bb48e2015-06-13 03:28:10 +00001935void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1936 const OperandVector &Operands) {
1937
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001938 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001939
1940 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1941 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1942
1943 // Add the register arguments
1944 if (Op.isReg()) {
1945 Op.addRegOperands(Inst, 1);
1946 continue;
1947 }
1948
1949 // Handle optional arguments
1950 OptionalIdx[Op.getImmTy()] = i;
1951 }
1952
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001953 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1954 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001955 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001956
Tom Stellard45bb48e2015-06-13 03:28:10 +00001957 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1958}
1959
1960void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1961
1962 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1963 bool GDSOnly = false;
1964
1965 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1966 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1967
1968 // Add the register arguments
1969 if (Op.isReg()) {
1970 Op.addRegOperands(Inst, 1);
1971 continue;
1972 }
1973
1974 if (Op.isToken() && Op.getToken() == "gds") {
1975 GDSOnly = true;
1976 continue;
1977 }
1978
1979 // Handle optional arguments
1980 OptionalIdx[Op.getImmTy()] = i;
1981 }
1982
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001983 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1984 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001985
1986 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001987 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001988 }
1989 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1990}
1991
1992
1993//===----------------------------------------------------------------------===//
1994// s_waitcnt
1995//===----------------------------------------------------------------------===//
1996
1997bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1998 StringRef CntName = Parser.getTok().getString();
1999 int64_t CntVal;
2000
2001 Parser.Lex();
2002 if (getLexer().isNot(AsmToken::LParen))
2003 return true;
2004
2005 Parser.Lex();
2006 if (getLexer().isNot(AsmToken::Integer))
2007 return true;
2008
2009 if (getParser().parseAbsoluteExpression(CntVal))
2010 return true;
2011
2012 if (getLexer().isNot(AsmToken::RParen))
2013 return true;
2014
2015 Parser.Lex();
2016 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2017 Parser.Lex();
2018
2019 int CntShift;
2020 int CntMask;
2021
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002022 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +00002023 if (CntName == "vmcnt") {
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002024 CntMask = getVmcntMask(IV);
2025 CntShift = getVmcntShift(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002026 } else if (CntName == "expcnt") {
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002027 CntMask = getExpcntMask(IV);
2028 CntShift = getExpcntShift(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002029 } else if (CntName == "lgkmcnt") {
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002030 CntMask = getLgkmcntMask(IV);
2031 CntShift = getLgkmcntShift(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002032 } else {
2033 return true;
2034 }
2035
2036 IntVal &= ~(CntMask << CntShift);
2037 IntVal |= (CntVal << CntShift);
2038 return false;
2039}
2040
2041AMDGPUAsmParser::OperandMatchResultTy
2042AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
2043 // Disable all counters by default.
2044 // vmcnt [3:0]
2045 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00002046 // lgkmcnt [11:8]
2047 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002048 SMLoc S = Parser.getTok().getLoc();
2049
2050 switch(getLexer().getKind()) {
2051 default: return MatchOperand_ParseFail;
2052 case AsmToken::Integer:
2053 // The operand can be an integer value.
2054 if (getParser().parseAbsoluteExpression(CntVal))
2055 return MatchOperand_ParseFail;
2056 break;
2057
2058 case AsmToken::Identifier:
2059 do {
2060 if (parseCnt(CntVal))
2061 return MatchOperand_ParseFail;
2062 } while(getLexer().isNot(AsmToken::EndOfStatement));
2063 break;
2064 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002065 Operands.push_back(AMDGPUOperand::CreateImm(this, CntVal, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002066 return MatchOperand_Success;
2067}
2068
Artem Tamazov6edc1352016-05-26 17:00:33 +00002069bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
2070 using namespace llvm::AMDGPU::Hwreg;
2071
Artem Tamazovd6468662016-04-25 14:13:51 +00002072 if (Parser.getTok().getString() != "hwreg")
2073 return true;
2074 Parser.Lex();
2075
2076 if (getLexer().isNot(AsmToken::LParen))
2077 return true;
2078 Parser.Lex();
2079
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002080 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002081 HwReg.IsSymbolic = true;
2082 HwReg.Id = ID_UNKNOWN_;
2083 const StringRef tok = Parser.getTok().getString();
2084 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2085 if (tok == IdSymbolic[i]) {
2086 HwReg.Id = i;
2087 break;
2088 }
2089 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002090 Parser.Lex();
2091 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002092 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002093 if (getLexer().isNot(AsmToken::Integer))
2094 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002095 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002096 return true;
2097 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002098
2099 if (getLexer().is(AsmToken::RParen)) {
2100 Parser.Lex();
2101 return false;
2102 }
2103
2104 // optional params
2105 if (getLexer().isNot(AsmToken::Comma))
2106 return true;
2107 Parser.Lex();
2108
2109 if (getLexer().isNot(AsmToken::Integer))
2110 return true;
2111 if (getParser().parseAbsoluteExpression(Offset))
2112 return true;
2113
2114 if (getLexer().isNot(AsmToken::Comma))
2115 return true;
2116 Parser.Lex();
2117
2118 if (getLexer().isNot(AsmToken::Integer))
2119 return true;
2120 if (getParser().parseAbsoluteExpression(Width))
2121 return true;
2122
2123 if (getLexer().isNot(AsmToken::RParen))
2124 return true;
2125 Parser.Lex();
2126
2127 return false;
2128}
2129
2130AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002131AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002132 using namespace llvm::AMDGPU::Hwreg;
2133
Artem Tamazovd6468662016-04-25 14:13:51 +00002134 int64_t Imm16Val = 0;
2135 SMLoc S = Parser.getTok().getLoc();
2136
2137 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002138 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002139 case AsmToken::Integer:
2140 // The operand can be an integer value.
2141 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002142 return MatchOperand_NoMatch;
2143 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002144 Error(S, "invalid immediate: only 16-bit values are legal");
2145 // Do not return error code, but create an imm operand anyway and proceed
2146 // to the next operand, if any. That avoids unneccessary error messages.
2147 }
2148 break;
2149
2150 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002151 OperandInfoTy HwReg(ID_UNKNOWN_);
2152 int64_t Offset = OFFSET_DEFAULT_;
2153 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2154 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002155 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002156 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2157 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002158 Error(S, "invalid symbolic name of hardware register");
2159 else
2160 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002161 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002162 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002163 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002164 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002165 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002166 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002167 }
2168 break;
2169 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002170 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00002171 return MatchOperand_Success;
2172}
2173
Tom Stellard45bb48e2015-06-13 03:28:10 +00002174bool AMDGPUOperand::isSWaitCnt() const {
2175 return isImm();
2176}
2177
Artem Tamazovd6468662016-04-25 14:13:51 +00002178bool AMDGPUOperand::isHwreg() const {
2179 return isImmTy(ImmTyHwreg);
2180}
2181
Artem Tamazov6edc1352016-05-26 17:00:33 +00002182bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002183 using namespace llvm::AMDGPU::SendMsg;
2184
2185 if (Parser.getTok().getString() != "sendmsg")
2186 return true;
2187 Parser.Lex();
2188
2189 if (getLexer().isNot(AsmToken::LParen))
2190 return true;
2191 Parser.Lex();
2192
2193 if (getLexer().is(AsmToken::Identifier)) {
2194 Msg.IsSymbolic = true;
2195 Msg.Id = ID_UNKNOWN_;
2196 const std::string tok = Parser.getTok().getString();
2197 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
2198 switch(i) {
2199 default: continue; // Omit gaps.
2200 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
2201 }
2202 if (tok == IdSymbolic[i]) {
2203 Msg.Id = i;
2204 break;
2205 }
2206 }
2207 Parser.Lex();
2208 } else {
2209 Msg.IsSymbolic = false;
2210 if (getLexer().isNot(AsmToken::Integer))
2211 return true;
2212 if (getParser().parseAbsoluteExpression(Msg.Id))
2213 return true;
2214 if (getLexer().is(AsmToken::Integer))
2215 if (getParser().parseAbsoluteExpression(Msg.Id))
2216 Msg.Id = ID_UNKNOWN_;
2217 }
2218 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
2219 return false;
2220
2221 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
2222 if (getLexer().isNot(AsmToken::RParen))
2223 return true;
2224 Parser.Lex();
2225 return false;
2226 }
2227
2228 if (getLexer().isNot(AsmToken::Comma))
2229 return true;
2230 Parser.Lex();
2231
2232 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2233 Operation.Id = ID_UNKNOWN_;
2234 if (getLexer().is(AsmToken::Identifier)) {
2235 Operation.IsSymbolic = true;
2236 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2237 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2238 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002239 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002240 for (int i = F; i < L; ++i) {
2241 if (Tok == S[i]) {
2242 Operation.Id = i;
2243 break;
2244 }
2245 }
2246 Parser.Lex();
2247 } else {
2248 Operation.IsSymbolic = false;
2249 if (getLexer().isNot(AsmToken::Integer))
2250 return true;
2251 if (getParser().parseAbsoluteExpression(Operation.Id))
2252 return true;
2253 }
2254
2255 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2256 // Stream id is optional.
2257 if (getLexer().is(AsmToken::RParen)) {
2258 Parser.Lex();
2259 return false;
2260 }
2261
2262 if (getLexer().isNot(AsmToken::Comma))
2263 return true;
2264 Parser.Lex();
2265
2266 if (getLexer().isNot(AsmToken::Integer))
2267 return true;
2268 if (getParser().parseAbsoluteExpression(StreamId))
2269 return true;
2270 }
2271
2272 if (getLexer().isNot(AsmToken::RParen))
2273 return true;
2274 Parser.Lex();
2275 return false;
2276}
2277
2278AMDGPUAsmParser::OperandMatchResultTy
2279AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2280 using namespace llvm::AMDGPU::SendMsg;
2281
2282 int64_t Imm16Val = 0;
2283 SMLoc S = Parser.getTok().getLoc();
2284
2285 switch(getLexer().getKind()) {
2286 default:
2287 return MatchOperand_NoMatch;
2288 case AsmToken::Integer:
2289 // The operand can be an integer value.
2290 if (getParser().parseAbsoluteExpression(Imm16Val))
2291 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002292 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002293 Error(S, "invalid immediate: only 16-bit values are legal");
2294 // Do not return error code, but create an imm operand anyway and proceed
2295 // to the next operand, if any. That avoids unneccessary error messages.
2296 }
2297 break;
2298 case AsmToken::Identifier: {
2299 OperandInfoTy Msg(ID_UNKNOWN_);
2300 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002301 int64_t StreamId = STREAM_ID_DEFAULT_;
2302 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2303 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002304 do {
2305 // Validate and encode message ID.
2306 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2307 || Msg.Id == ID_SYSMSG)) {
2308 if (Msg.IsSymbolic)
2309 Error(S, "invalid/unsupported symbolic name of message");
2310 else
2311 Error(S, "invalid/unsupported code of message");
2312 break;
2313 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002314 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002315 // Validate and encode operation ID.
2316 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2317 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2318 if (Operation.IsSymbolic)
2319 Error(S, "invalid symbolic name of GS_OP");
2320 else
2321 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2322 break;
2323 }
2324 if (Operation.Id == OP_GS_NOP
2325 && Msg.Id != ID_GS_DONE) {
2326 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2327 break;
2328 }
2329 Imm16Val |= (Operation.Id << OP_SHIFT_);
2330 }
2331 if (Msg.Id == ID_SYSMSG) {
2332 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2333 if (Operation.IsSymbolic)
2334 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2335 else
2336 Error(S, "invalid/unsupported code of SYSMSG_OP");
2337 break;
2338 }
2339 Imm16Val |= (Operation.Id << OP_SHIFT_);
2340 }
2341 // Validate and encode stream ID.
2342 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2343 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2344 Error(S, "invalid stream id: only 2-bit values are legal");
2345 break;
2346 }
2347 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2348 }
2349 } while (0);
2350 }
2351 break;
2352 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002353 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002354 return MatchOperand_Success;
2355}
2356
2357bool AMDGPUOperand::isSendMsg() const {
2358 return isImmTy(ImmTySendMsg);
2359}
2360
Tom Stellard45bb48e2015-06-13 03:28:10 +00002361//===----------------------------------------------------------------------===//
2362// sopp branch targets
2363//===----------------------------------------------------------------------===//
2364
2365AMDGPUAsmParser::OperandMatchResultTy
2366AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2367 SMLoc S = Parser.getTok().getLoc();
2368
2369 switch (getLexer().getKind()) {
2370 default: return MatchOperand_ParseFail;
2371 case AsmToken::Integer: {
2372 int64_t Imm;
2373 if (getParser().parseAbsoluteExpression(Imm))
2374 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002375 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002376 return MatchOperand_Success;
2377 }
2378
2379 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002380 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00002381 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2382 Parser.getTok().getString()), getContext()), S));
2383 Parser.Lex();
2384 return MatchOperand_Success;
2385 }
2386}
2387
2388//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002389// mubuf
2390//===----------------------------------------------------------------------===//
2391
Sam Kolton5f10a132016-05-06 11:31:17 +00002392AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002393 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002394}
2395
2396AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002397 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002398}
2399
2400AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002401 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002402}
2403
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002404void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2405 const OperandVector &Operands,
2406 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002407 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002408 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002409
2410 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2411 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2412
2413 // Add the register arguments
2414 if (Op.isReg()) {
2415 Op.addRegOperands(Inst, 1);
2416 continue;
2417 }
2418
2419 // Handle the case where soffset is an immediate
2420 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2421 Op.addImmOperands(Inst, 1);
2422 continue;
2423 }
2424
2425 // Handle tokens like 'offen' which are sometimes hard-coded into the
2426 // asm string. There are no MCInst operands for these.
2427 if (Op.isToken()) {
2428 continue;
2429 }
2430 assert(Op.isImm());
2431
2432 // Handle optional arguments
2433 OptionalIdx[Op.getImmTy()] = i;
2434 }
2435
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002436 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2437 if (IsAtomicReturn) {
2438 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2439 Inst.insert(I, *I);
2440 }
2441
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002442 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002443 if (!IsAtomic) { // glc is hard-coded.
2444 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2445 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002446 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2447 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002448}
2449
2450//===----------------------------------------------------------------------===//
2451// mimg
2452//===----------------------------------------------------------------------===//
2453
Sam Kolton1bdcef72016-05-23 09:59:02 +00002454void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2455 unsigned I = 1;
2456 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2457 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2458 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2459 }
2460
2461 OptionalImmIndexMap OptionalIdx;
2462
2463 for (unsigned E = Operands.size(); I != E; ++I) {
2464 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2465
2466 // Add the register arguments
2467 if (Op.isRegOrImm()) {
2468 Op.addRegOrImmOperands(Inst, 1);
2469 continue;
2470 } else if (Op.isImmModifier()) {
2471 OptionalIdx[Op.getImmTy()] = I;
2472 } else {
2473 assert(false);
2474 }
2475 }
2476
2477 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2478 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2479 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2480 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2481 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2482 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2483 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2484 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2485}
2486
2487void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2488 unsigned I = 1;
2489 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2490 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2491 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2492 }
2493
2494 // Add src, same as dst
2495 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2496
2497 OptionalImmIndexMap OptionalIdx;
2498
2499 for (unsigned E = Operands.size(); I != E; ++I) {
2500 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2501
2502 // Add the register arguments
2503 if (Op.isRegOrImm()) {
2504 Op.addRegOrImmOperands(Inst, 1);
2505 continue;
2506 } else if (Op.isImmModifier()) {
2507 OptionalIdx[Op.getImmTy()] = I;
2508 } else {
2509 assert(false);
2510 }
2511 }
2512
2513 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2514 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2515 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2516 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2517 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2518 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2519 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2520 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2521}
2522
Sam Kolton5f10a132016-05-06 11:31:17 +00002523AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002524 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00002525}
2526
2527AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002528 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00002529}
2530
2531AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002532 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00002533}
2534
2535AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002536 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00002537}
2538
2539AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002540 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002541}
2542
Tom Stellard45bb48e2015-06-13 03:28:10 +00002543//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002544// smrd
2545//===----------------------------------------------------------------------===//
2546
2547bool AMDGPUOperand::isSMRDOffset() const {
2548
2549 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2550 // information here.
2551 return isImm() && isUInt<8>(getImm());
2552}
2553
2554bool AMDGPUOperand::isSMRDLiteralOffset() const {
2555 // 32-bit literals are only supported on CI and we only want to use them
2556 // when the offset is > 8-bits.
2557 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2558}
2559
Sam Kolton5f10a132016-05-06 11:31:17 +00002560AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002561 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002562}
2563
2564AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002565 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002566}
2567
Tom Stellard217361c2015-08-06 19:28:38 +00002568//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002569// vop3
2570//===----------------------------------------------------------------------===//
2571
2572static bool ConvertOmodMul(int64_t &Mul) {
2573 if (Mul != 1 && Mul != 2 && Mul != 4)
2574 return false;
2575
2576 Mul >>= 1;
2577 return true;
2578}
2579
2580static bool ConvertOmodDiv(int64_t &Div) {
2581 if (Div == 1) {
2582 Div = 0;
2583 return true;
2584 }
2585
2586 if (Div == 2) {
2587 Div = 3;
2588 return true;
2589 }
2590
2591 return false;
2592}
2593
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002594static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2595 if (BoundCtrl == 0) {
2596 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002597 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002598 } else if (BoundCtrl == -1) {
2599 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002600 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002601 }
2602 return false;
2603}
2604
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002605// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002606static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2607 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2608 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2609 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2610 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2611 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2612 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2613 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2614 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2615 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2616 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2617 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2618 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2619 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2620 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2621 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2622 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2623 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2624 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2625 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2626 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00002627 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2628 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2629 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00002630 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002631};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002632
Sam Kolton11de3702016-05-24 12:38:33 +00002633AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2634 OperandMatchResultTy res;
2635 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2636 // try to parse any optional operand here
2637 if (Op.IsBit) {
2638 res = parseNamedBit(Op.Name, Operands, Op.Type);
2639 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2640 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002641 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2642 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2643 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2644 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00002645 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2646 res = parseSDWADstUnused(Operands);
2647 } else {
2648 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2649 }
2650 if (res != MatchOperand_NoMatch) {
2651 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002652 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002653 }
2654 return MatchOperand_NoMatch;
2655}
2656
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002657AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2658{
2659 StringRef Name = Parser.getTok().getString();
2660 if (Name == "mul") {
Sam Kolton11de3702016-05-24 12:38:33 +00002661 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002662 } else if (Name == "div") {
Sam Kolton11de3702016-05-24 12:38:33 +00002663 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002664 } else {
2665 return MatchOperand_NoMatch;
2666 }
2667}
2668
Tom Stellarda90b9522016-02-11 03:28:15 +00002669void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2670 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002671 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002672 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002673 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2674 }
2675 for (unsigned E = Operands.size(); I != E; ++I)
2676 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2677}
2678
2679void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002680 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2681 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002682 cvtVOP3(Inst, Operands);
2683 } else {
2684 cvtId(Inst, Operands);
2685 }
2686}
2687
Sam Koltona3ec5c12016-10-07 14:46:06 +00002688static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
2689 // 1. This operand is input modifiers
2690 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
2691 // 2. This is not last operand
2692 && Desc.NumOperands > (OpNum + 1)
2693 // 3. Next operand is register class
2694 && Desc.OpInfo[OpNum + 1].RegClass != -1
2695 // 4. Next register is not tied to any other operand
2696 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
2697}
2698
Tom Stellarda90b9522016-02-11 03:28:15 +00002699void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002700 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002701 unsigned I = 1;
2702 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002703 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002704 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002705 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002706
Tom Stellarda90b9522016-02-11 03:28:15 +00002707 for (unsigned E = Operands.size(); I != E; ++I) {
2708 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002709 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00002710 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002711 } else if (Op.isImm()) {
2712 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002713 } else {
2714 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002715 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002716 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002717
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002718 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2719 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002720
2721 // special case v_mac_f32:
2722 // it has src2 register operand that is tied to dst operand
2723 // we don't allow modifiers for this operand in assembler so src2_modifiers
2724 // should be 0
2725 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
2726 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi) {
2727 auto it = Inst.begin();
2728 std::advance(it, AMDGPU::getNamedOperandIdx(AMDGPU::V_MAC_F32_e64, AMDGPU::OpName::src2_modifiers));
2729 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
2730 ++it;
2731 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
2732 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002733}
2734
Sam Koltondfa29f72016-03-09 12:29:31 +00002735//===----------------------------------------------------------------------===//
2736// dpp
2737//===----------------------------------------------------------------------===//
2738
2739bool AMDGPUOperand::isDPPCtrl() const {
2740 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2741 if (result) {
2742 int64_t Imm = getImm();
2743 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2744 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2745 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2746 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2747 (Imm == 0x130) ||
2748 (Imm == 0x134) ||
2749 (Imm == 0x138) ||
2750 (Imm == 0x13c) ||
2751 (Imm == 0x140) ||
2752 (Imm == 0x141) ||
2753 (Imm == 0x142) ||
2754 (Imm == 0x143);
2755 }
2756 return false;
2757}
2758
Sam Koltona74cd522016-03-18 15:35:51 +00002759AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002760AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002761 SMLoc S = Parser.getTok().getLoc();
2762 StringRef Prefix;
2763 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002764
Sam Koltona74cd522016-03-18 15:35:51 +00002765 if (getLexer().getKind() == AsmToken::Identifier) {
2766 Prefix = Parser.getTok().getString();
2767 } else {
2768 return MatchOperand_NoMatch;
2769 }
2770
2771 if (Prefix == "row_mirror") {
2772 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002773 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002774 } else if (Prefix == "row_half_mirror") {
2775 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002776 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002777 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002778 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2779 if (Prefix != "quad_perm"
2780 && Prefix != "row_shl"
2781 && Prefix != "row_shr"
2782 && Prefix != "row_ror"
2783 && Prefix != "wave_shl"
2784 && Prefix != "wave_rol"
2785 && Prefix != "wave_shr"
2786 && Prefix != "wave_ror"
2787 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002788 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002789 }
2790
Sam Koltona74cd522016-03-18 15:35:51 +00002791 Parser.Lex();
2792 if (getLexer().isNot(AsmToken::Colon))
2793 return MatchOperand_ParseFail;
2794
2795 if (Prefix == "quad_perm") {
2796 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002797 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002798 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002799 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002800 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00002801
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002802 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00002803 return MatchOperand_ParseFail;
2804
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002805 for (int i = 0; i < 3; ++i) {
2806 if (getLexer().isNot(AsmToken::Comma))
2807 return MatchOperand_ParseFail;
2808 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00002809
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002810 int64_t Temp;
2811 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
2812 return MatchOperand_ParseFail;
2813 const int shift = i*2 + 2;
2814 Int += (Temp << shift);
2815 }
Sam Koltona74cd522016-03-18 15:35:51 +00002816
Sam Koltona74cd522016-03-18 15:35:51 +00002817 if (getLexer().isNot(AsmToken::RBrac))
2818 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002819 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002820
2821 } else {
2822 // sel:%d
2823 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002824 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00002825 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002826
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002827 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002828 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002829 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002830 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002831 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002832 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002833 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002834 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002835 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002836 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002837 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002838 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002839 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002840 Int = 0x13C;
2841 } else if (Prefix == "row_bcast") {
2842 if (Int == 15) {
2843 Int = 0x142;
2844 } else if (Int == 31) {
2845 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00002846 } else {
2847 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002848 }
2849 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002850 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002851 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002852 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002853 }
Sam Koltona74cd522016-03-18 15:35:51 +00002854
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002855 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00002856 return MatchOperand_Success;
2857}
2858
Sam Kolton5f10a132016-05-06 11:31:17 +00002859AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002860 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002861}
2862
Sam Kolton5f10a132016-05-06 11:31:17 +00002863AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002864 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002865}
2866
Sam Kolton5f10a132016-05-06 11:31:17 +00002867AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002868 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00002869}
2870
2871void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002872 OptionalImmIndexMap OptionalIdx;
2873
2874 unsigned I = 1;
2875 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2876 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2877 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2878 }
2879
2880 for (unsigned E = Operands.size(); I != E; ++I) {
2881 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2882 // Add the register arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00002883 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00002884 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002885 } else if (Op.isDPPCtrl()) {
2886 Op.addImmOperands(Inst, 1);
2887 } else if (Op.isImm()) {
2888 // Handle optional arguments
2889 OptionalIdx[Op.getImmTy()] = I;
2890 } else {
2891 llvm_unreachable("Invalid operand type");
2892 }
2893 }
2894
Sam Koltondfa29f72016-03-09 12:29:31 +00002895 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2896 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2897 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002898
2899 // special case v_mac_f32:
2900 // it has src2 register operand that is tied to dst operand
2901 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp) {
2902 auto it = Inst.begin();
2903 std::advance(it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
2904 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
2905 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002906}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002907
Sam Kolton3025e7f2016-04-26 13:33:56 +00002908//===----------------------------------------------------------------------===//
2909// sdwa
2910//===----------------------------------------------------------------------===//
2911
2912AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002913AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
2914 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002915 using namespace llvm::AMDGPU::SDWA;
2916
Sam Kolton3025e7f2016-04-26 13:33:56 +00002917 SMLoc S = Parser.getTok().getLoc();
2918 StringRef Value;
2919 AMDGPUAsmParser::OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002920
Sam Kolton05ef1c92016-06-03 10:27:37 +00002921 res = parseStringWithPrefix(Prefix, Value);
2922 if (res != MatchOperand_Success) {
2923 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00002924 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002925
Sam Kolton3025e7f2016-04-26 13:33:56 +00002926 int64_t Int;
2927 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00002928 .Case("BYTE_0", SdwaSel::BYTE_0)
2929 .Case("BYTE_1", SdwaSel::BYTE_1)
2930 .Case("BYTE_2", SdwaSel::BYTE_2)
2931 .Case("BYTE_3", SdwaSel::BYTE_3)
2932 .Case("WORD_0", SdwaSel::WORD_0)
2933 .Case("WORD_1", SdwaSel::WORD_1)
2934 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00002935 .Default(0xffffffff);
2936 Parser.Lex(); // eat last token
2937
2938 if (Int == 0xffffffff) {
2939 return MatchOperand_ParseFail;
2940 }
2941
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002942 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002943 return MatchOperand_Success;
2944}
2945
Matt Arsenault37fefd62016-06-10 02:18:02 +00002946AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00002947AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002948 using namespace llvm::AMDGPU::SDWA;
2949
Sam Kolton3025e7f2016-04-26 13:33:56 +00002950 SMLoc S = Parser.getTok().getLoc();
2951 StringRef Value;
2952 AMDGPUAsmParser::OperandMatchResultTy res;
2953
2954 res = parseStringWithPrefix("dst_unused", Value);
2955 if (res != MatchOperand_Success) {
2956 return res;
2957 }
2958
2959 int64_t Int;
2960 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00002961 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
2962 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
2963 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00002964 .Default(0xffffffff);
2965 Parser.Lex(); // eat last token
2966
2967 if (Int == 0xffffffff) {
2968 return MatchOperand_ParseFail;
2969 }
2970
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002971 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002972 return MatchOperand_Success;
2973}
2974
Sam Kolton945231a2016-06-10 09:57:59 +00002975void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002976 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002977}
2978
Sam Kolton945231a2016-06-10 09:57:59 +00002979void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002980 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
2981}
2982
2983void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
2984 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002985}
2986
2987void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00002988 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002989 OptionalImmIndexMap OptionalIdx;
2990
2991 unsigned I = 1;
2992 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2993 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2994 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2995 }
2996
2997 for (unsigned E = Operands.size(); I != E; ++I) {
2998 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2999 // Add the register arguments
Sam Kolton5196b882016-07-01 09:59:21 +00003000 if (BasicInstType == SIInstrFlags::VOPC &&
3001 Op.isReg() &&
3002 Op.Reg.RegNo == AMDGPU::VCC) {
3003 // VOPC sdwa use "vcc" token as dst. Skip it.
3004 continue;
Sam Koltona3ec5c12016-10-07 14:46:06 +00003005 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003006 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003007 } else if (Op.isImm()) {
3008 // Handle optional arguments
3009 OptionalIdx[Op.getImmTy()] = I;
3010 } else {
3011 llvm_unreachable("Invalid operand type");
3012 }
3013 }
3014
Sam Kolton945231a2016-06-10 09:57:59 +00003015 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
3016
Sam Koltona3ec5c12016-10-07 14:46:06 +00003017 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003018 // V_NOP_sdwa has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003019 switch (BasicInstType) {
3020 case SIInstrFlags::VOP1: {
3021 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3022 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3023 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3024 break;
3025 }
3026 case SIInstrFlags::VOP2: {
3027 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3028 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3029 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3030 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3031 break;
3032 }
3033 case SIInstrFlags::VOPC: {
3034 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3035 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3036 break;
3037 }
3038 default:
3039 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
3040 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00003041 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00003042
3043 // special case v_mac_f32:
3044 // it has src2 register operand that is tied to dst operand
3045 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa) {
3046 auto it = Inst.begin();
3047 std::advance(it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
3048 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00003049 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00003050
Sam Kolton05ef1c92016-06-03 10:27:37 +00003051}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003052
Tom Stellard45bb48e2015-06-13 03:28:10 +00003053/// Force static initialization.
3054extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00003055 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
3056 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003057}
3058
3059#define GET_REGISTER_MATCHER
3060#define GET_MATCHER_IMPLEMENTATION
3061#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00003062
3063
3064// This fuction should be defined after auto-generated include so that we have
3065// MatchClassKind enum defined
3066unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
3067 unsigned Kind) {
3068 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00003069 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00003070 // operand. This method checks if we are given immediate operand but expect to
3071 // get corresponding token.
3072 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
3073 switch (Kind) {
3074 case MCK_addr64:
3075 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
3076 case MCK_gds:
3077 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
3078 case MCK_glc:
3079 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
3080 case MCK_idxen:
3081 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
3082 case MCK_offen:
3083 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003084 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00003085 // When operands have expression values, they will return true for isToken,
3086 // because it is not possible to distinguish between a token and an
3087 // expression at parse time. MatchInstructionImpl() will always try to
3088 // match an operand as a token, when isToken returns true, and when the
3089 // name of the expression is not a valid token, the match will fail,
3090 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003091 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
3092 case MCK_SSrcF32:
3093 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00003094 case MCK_SoppBrTarget:
3095 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00003096 default: return Match_InvalidOperand;
3097 }
3098}