blob: dcee28e4382d0991d41ef835cfa3f4816a76b325 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000023#include "llvm/CodeGen/MachineValueType.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/MC/MCContext.h"
25#include "llvm/MC/MCExpr.h"
26#include "llvm/MC/MCInst.h"
27#include "llvm/MC/MCInstrInfo.h"
28#include "llvm/MC/MCParser/MCAsmLexer.h"
29#include "llvm/MC/MCParser/MCAsmParser.h"
30#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000031#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCRegisterInfo.h"
33#include "llvm/MC/MCStreamer.h"
34#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000036#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000037#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/Support/SourceMgr.h"
39#include "llvm/Support/TargetRegistry.h"
40#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000041#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000042
Tom Stellard45bb48e2015-06-13 03:28:10 +000043using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000044using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000045
46namespace {
47
Sam Kolton1eeb11b2016-09-09 14:44:04 +000048class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000049struct OptionalOperand;
50
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000051enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
52
Sam Kolton1eeb11b2016-09-09 14:44:04 +000053//===----------------------------------------------------------------------===//
54// Operand
55//===----------------------------------------------------------------------===//
56
Tom Stellard45bb48e2015-06-13 03:28:10 +000057class AMDGPUOperand : public MCParsedAsmOperand {
58 enum KindTy {
59 Token,
60 Immediate,
61 Register,
62 Expression
63 } Kind;
64
65 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000066 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000067
68public:
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069 AMDGPUOperand(enum KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
70 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000071
Sam Kolton5f10a132016-05-06 11:31:17 +000072 typedef std::unique_ptr<AMDGPUOperand> Ptr;
73
Sam Kolton945231a2016-06-10 09:57:59 +000074 struct Modifiers {
75 bool Abs;
76 bool Neg;
77 bool Sext;
78
79 bool hasFPModifiers() const { return Abs || Neg; }
80 bool hasIntModifiers() const { return Sext; }
81 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
82
83 int64_t getFPModifiersOperand() const {
84 int64_t Operand = 0;
85 Operand |= Abs ? SISrcMods::ABS : 0;
86 Operand |= Neg ? SISrcMods::NEG : 0;
87 return Operand;
88 }
89
90 int64_t getIntModifiersOperand() const {
91 int64_t Operand = 0;
92 Operand |= Sext ? SISrcMods::SEXT : 0;
93 return Operand;
94 }
95
96 int64_t getModifiersOperand() const {
97 assert(!(hasFPModifiers() && hasIntModifiers())
98 && "fp and int modifiers should not be used simultaneously");
99 if (hasFPModifiers()) {
100 return getFPModifiersOperand();
101 } else if (hasIntModifiers()) {
102 return getIntModifiersOperand();
103 } else {
104 return 0;
105 }
106 }
107
108 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
109 };
110
Tom Stellard45bb48e2015-06-13 03:28:10 +0000111 enum ImmTy {
112 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000114 ImmTyOffen,
115 ImmTyIdxen,
116 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000117 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000118 ImmTyOffset0,
119 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000120 ImmTyGLC,
121 ImmTySLC,
122 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000123 ImmTyClampSI,
124 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000125 ImmTyDppCtrl,
126 ImmTyDppRowMask,
127 ImmTyDppBankMask,
128 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000129 ImmTySdwaDstSel,
130 ImmTySdwaSrc0Sel,
131 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000132 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000133 ImmTyDMask,
134 ImmTyUNorm,
135 ImmTyDA,
136 ImmTyR128,
137 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000138 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000139 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000140 };
141
142 struct TokOp {
143 const char *Data;
144 unsigned Length;
145 };
146
147 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000148 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000149 ImmTy Type;
150 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000151 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152 };
153
154 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000155 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000156 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000157 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158 };
159
160 union {
161 TokOp Tok;
162 ImmOp Imm;
163 RegOp Reg;
164 const MCExpr *Expr;
165 };
166
Tom Stellard45bb48e2015-06-13 03:28:10 +0000167 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000168 if (Kind == Token)
169 return true;
170
171 if (Kind != Expression || !Expr)
172 return false;
173
174 // When parsing operands, we can't always tell if something was meant to be
175 // a token, like 'gds', or an expression that references a global variable.
176 // In this case, we assume the string is an expression, and if we need to
177 // interpret is a token, then we treat the symbol name as the token.
178 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000179 }
180
181 bool isImm() const override {
182 return Kind == Immediate;
183 }
184
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000185 bool isInlinableImm(MVT type) const;
186 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187
Tom Stellard45bb48e2015-06-13 03:28:10 +0000188 bool isRegKind() const {
189 return Kind == Register;
190 }
191
192 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000193 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000194 }
195
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000196 bool isRegOrImmWithInputMods(MVT type) const {
197 return isRegKind() || isInlinableImm(type);
198 }
199
200 bool isRegOrImmWithInt32InputMods() const {
201 return isRegOrImmWithInputMods(MVT::i32);
202 }
203
204 bool isRegOrImmWithInt64InputMods() const {
205 return isRegOrImmWithInputMods(MVT::i64);
206 }
207
208 bool isRegOrImmWithFP32InputMods() const {
209 return isRegOrImmWithInputMods(MVT::f32);
210 }
211
212 bool isRegOrImmWithFP64InputMods() const {
213 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000214 }
215
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000216 bool isImmTy(ImmTy ImmT) const {
217 return isImm() && Imm.Type == ImmT;
218 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000219
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000220 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000221 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000222 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000223
Sam Kolton945231a2016-06-10 09:57:59 +0000224 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
225 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
226 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000227 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
228 bool isDA() const { return isImmTy(ImmTyDA); }
229 bool isR128() const { return isImmTy(ImmTyUNorm); }
230 bool isLWE() const { return isImmTy(ImmTyLWE); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000231 bool isOffen() const { return isImmTy(ImmTyOffen); }
232 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
233 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
234 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
235 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
236 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000237 bool isGDS() const { return isImmTy(ImmTyGDS); }
238 bool isGLC() const { return isImmTy(ImmTyGLC); }
239 bool isSLC() const { return isImmTy(ImmTySLC); }
240 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000241 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
242 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
243 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
244 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
245 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
246 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
247 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000248
Sam Kolton945231a2016-06-10 09:57:59 +0000249 bool isMod() const {
250 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000251 }
252
253 bool isRegOrImm() const {
254 return isReg() || isImm();
255 }
256
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000257 bool isRegClass(unsigned RCID) const;
258
259 bool isSCSrcB32() const {
260 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000261 }
262
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000263 bool isSCSrcB64() const {
264 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000265 }
266
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000267 bool isSCSrcF32() const {
268 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000269 }
270
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000271 bool isSCSrcF64() const {
272 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000273 }
274
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000275 bool isSSrcB32() const {
276 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
277 }
278
279 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000280 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
281 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000282 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000283 }
284
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000285 bool isSSrcF32() const {
286 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000287 }
288
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000289 bool isSSrcF64() const {
290 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000291 }
292
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000293 bool isVCSrcB32() const {
294 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000295 }
296
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000297 bool isVCSrcB64() const {
298 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64);
299 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000300
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000301 bool isVCSrcF32() const {
302 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32);
303 }
304
305 bool isVCSrcF64() const {
306 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64);
307 }
308
309 bool isVSrcB32() const {
310 return isVCSrcF32() || isLiteralImm(MVT::i32);
311 }
312
313 bool isVSrcB64() const {
314 return isVCSrcF64() || isLiteralImm(MVT::i64);
315 }
316
317 bool isVSrcF32() const {
318 return isVCSrcF32() || isLiteralImm(MVT::f32);
319 }
320
321 bool isVSrcF64() const {
322 return isVCSrcF64() || isLiteralImm(MVT::f64);
323 }
324
325 bool isKImmFP32() const {
326 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327 }
328
329 bool isMem() const override {
330 return false;
331 }
332
333 bool isExpr() const {
334 return Kind == Expression;
335 }
336
337 bool isSoppBrTarget() const {
338 return isExpr() || isImm();
339 }
340
Sam Kolton945231a2016-06-10 09:57:59 +0000341 bool isSWaitCnt() const;
342 bool isHwreg() const;
343 bool isSendMsg() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000344 bool isSMRDOffset8() const;
345 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000346 bool isSMRDLiteralOffset() const;
347 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000348 bool isGPRIdxMode() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000349
Tom Stellard89049702016-06-15 02:54:14 +0000350 StringRef getExpressionAsToken() const {
351 assert(isExpr());
352 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
353 return S->getSymbol().getName();
354 }
355
356
Sam Kolton945231a2016-06-10 09:57:59 +0000357 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000358 assert(isToken());
359
360 if (Kind == Expression)
361 return getExpressionAsToken();
362
Sam Kolton945231a2016-06-10 09:57:59 +0000363 return StringRef(Tok.Data, Tok.Length);
364 }
365
366 int64_t getImm() const {
367 assert(isImm());
368 return Imm.Val;
369 }
370
371 enum ImmTy getImmTy() const {
372 assert(isImm());
373 return Imm.Type;
374 }
375
376 unsigned getReg() const override {
377 return Reg.RegNo;
378 }
379
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380 SMLoc getStartLoc() const override {
381 return StartLoc;
382 }
383
Peter Collingbourne0da86302016-10-10 22:49:37 +0000384 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385 return EndLoc;
386 }
387
Sam Kolton945231a2016-06-10 09:57:59 +0000388 Modifiers getModifiers() const {
389 assert(isRegKind() || isImmTy(ImmTyNone));
390 return isRegKind() ? Reg.Mods : Imm.Mods;
391 }
392
393 void setModifiers(Modifiers Mods) {
394 assert(isRegKind() || isImmTy(ImmTyNone));
395 if (isRegKind())
396 Reg.Mods = Mods;
397 else
398 Imm.Mods = Mods;
399 }
400
401 bool hasModifiers() const {
402 return getModifiers().hasModifiers();
403 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000404
Sam Kolton945231a2016-06-10 09:57:59 +0000405 bool hasFPModifiers() const {
406 return getModifiers().hasFPModifiers();
407 }
408
409 bool hasIntModifiers() const {
410 return getModifiers().hasIntModifiers();
411 }
412
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000413 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000414
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000415 void addLiteralImmOperand(MCInst &Inst, int64_t Val) const;
416
417 void addKImmFP32Operands(MCInst &Inst, unsigned N) const;
418
419 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000420
421 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
422 if (isRegKind())
423 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000424 else if (isExpr())
425 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000426 else
427 addImmOperands(Inst, N);
428 }
429
430 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
431 Modifiers Mods = getModifiers();
432 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
433 if (isRegKind()) {
434 addRegOperands(Inst, N);
435 } else {
436 addImmOperands(Inst, N, false);
437 }
438 }
439
440 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
441 assert(!hasIntModifiers());
442 addRegOrImmWithInputModsOperands(Inst, N);
443 }
444
445 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
446 assert(!hasFPModifiers());
447 addRegOrImmWithInputModsOperands(Inst, N);
448 }
449
450 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
451 if (isImm())
452 addImmOperands(Inst, N);
453 else {
454 assert(isExpr());
455 Inst.addOperand(MCOperand::createExpr(Expr));
456 }
457 }
458
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000459 void printImmTy(raw_ostream& OS, ImmTy Type) const {
460 switch (Type) {
461 case ImmTyNone: OS << "None"; break;
462 case ImmTyGDS: OS << "GDS"; break;
463 case ImmTyOffen: OS << "Offen"; break;
464 case ImmTyIdxen: OS << "Idxen"; break;
465 case ImmTyAddr64: OS << "Addr64"; break;
466 case ImmTyOffset: OS << "Offset"; break;
467 case ImmTyOffset0: OS << "Offset0"; break;
468 case ImmTyOffset1: OS << "Offset1"; break;
469 case ImmTyGLC: OS << "GLC"; break;
470 case ImmTySLC: OS << "SLC"; break;
471 case ImmTyTFE: OS << "TFE"; break;
472 case ImmTyClampSI: OS << "ClampSI"; break;
473 case ImmTyOModSI: OS << "OModSI"; break;
474 case ImmTyDppCtrl: OS << "DppCtrl"; break;
475 case ImmTyDppRowMask: OS << "DppRowMask"; break;
476 case ImmTyDppBankMask: OS << "DppBankMask"; break;
477 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000478 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
479 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
480 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000481 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
482 case ImmTyDMask: OS << "DMask"; break;
483 case ImmTyUNorm: OS << "UNorm"; break;
484 case ImmTyDA: OS << "DA"; break;
485 case ImmTyR128: OS << "R128"; break;
486 case ImmTyLWE: OS << "LWE"; break;
487 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000488 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000489 }
490 }
491
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000492 void print(raw_ostream &OS) const override {
493 switch (Kind) {
494 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000495 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000496 break;
497 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000498 OS << '<' << getImm();
499 if (getImmTy() != ImmTyNone) {
500 OS << " type: "; printImmTy(OS, getImmTy());
501 }
Sam Kolton945231a2016-06-10 09:57:59 +0000502 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000503 break;
504 case Token:
505 OS << '\'' << getToken() << '\'';
506 break;
507 case Expression:
508 OS << "<expr " << *Expr << '>';
509 break;
510 }
511 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000512
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000513 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
514 int64_t Val, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000515 enum ImmTy Type = ImmTyNone,
516 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000517 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000518 Op->Imm.Val = Val;
519 Op->Imm.IsFPImm = IsFPImm;
520 Op->Imm.Type = Type;
Sam Kolton945231a2016-06-10 09:57:59 +0000521 Op->Imm.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000522 Op->StartLoc = Loc;
523 Op->EndLoc = Loc;
524 return Op;
525 }
526
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000527 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
528 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000529 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000530 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000531 Res->Tok.Data = Str.data();
532 Res->Tok.Length = Str.size();
533 Res->StartLoc = Loc;
534 Res->EndLoc = Loc;
535 return Res;
536 }
537
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000538 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
539 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000540 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000541 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000542 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543 Op->Reg.RegNo = RegNo;
Sam Kolton945231a2016-06-10 09:57:59 +0000544 Op->Reg.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000545 Op->Reg.IsForcedVOP3 = ForceVOP3;
546 Op->StartLoc = S;
547 Op->EndLoc = E;
548 return Op;
549 }
550
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000551 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
552 const class MCExpr *Expr, SMLoc S) {
553 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 Op->Expr = Expr;
555 Op->StartLoc = S;
556 Op->EndLoc = S;
557 return Op;
558 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000559};
560
Sam Kolton945231a2016-06-10 09:57:59 +0000561raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
562 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
563 return OS;
564}
565
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000566//===----------------------------------------------------------------------===//
567// AsmParser
568//===----------------------------------------------------------------------===//
569
Tom Stellard45bb48e2015-06-13 03:28:10 +0000570class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000571 const MCInstrInfo &MII;
572 MCAsmParser &Parser;
573
574 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000575 bool ForcedDPP;
576 bool ForcedSDWA;
Matt Arsenault68802d32015-11-05 03:11:27 +0000577
Tom Stellard45bb48e2015-06-13 03:28:10 +0000578 /// @name Auto-generated Match Functions
579 /// {
580
581#define GET_ASSEMBLER_HEADER
582#include "AMDGPUGenAsmMatcher.inc"
583
584 /// }
585
Tom Stellard347ac792015-06-26 21:15:07 +0000586private:
587 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
588 bool ParseDirectiveHSACodeObjectVersion();
589 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000590 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
591 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000592 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000593 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000594 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000595 bool ParseDirectiveAMDGPUHsaModuleGlobal();
596 bool ParseDirectiveAMDGPUHsaProgramGlobal();
597 bool ParseSectionDirectiveHSADataGlobalAgent();
598 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000599 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000600 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
601 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000602 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000603
Tom Stellard45bb48e2015-06-13 03:28:10 +0000604public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000605 enum AMDGPUMatchResultTy {
606 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
607 };
608
Akira Hatanakab11ef082015-11-14 06:35:56 +0000609 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000610 const MCInstrInfo &MII,
611 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000612 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000613 ForcedEncodingSize(0),
614 ForcedDPP(false),
615 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000616 MCAsmParserExtension::Initialize(Parser);
617
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000618 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000619 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000620 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000621 }
622
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000623 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000624
625 {
626 // TODO: make those pre-defined variables read-only.
627 // Currently there is none suitable machinery in the core llvm-mc for this.
628 // MCSymbol::isRedefinable is intended for another purpose, and
629 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
630 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
631 MCContext &Ctx = getContext();
632 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
633 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
634 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
635 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
636 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
637 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
638 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000639 }
640
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000641 bool isSI() const {
642 return AMDGPU::isSI(getSTI());
643 }
644
645 bool isCI() const {
646 return AMDGPU::isCI(getSTI());
647 }
648
649 bool isVI() const {
650 return AMDGPU::isVI(getSTI());
651 }
652
653 bool hasSGPR102_SGPR103() const {
654 return !isVI();
655 }
656
Tom Stellard347ac792015-06-26 21:15:07 +0000657 AMDGPUTargetStreamer &getTargetStreamer() {
658 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
659 return static_cast<AMDGPUTargetStreamer &>(TS);
660 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000661
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000662 const MCRegisterInfo *getMRI() const {
663 // We need this const_cast because for some reason getContext() is not const
664 // in MCAsmParser.
665 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
666 }
667
668 const MCInstrInfo *getMII() const {
669 return &MII;
670 }
671
Sam Kolton05ef1c92016-06-03 10:27:37 +0000672 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
673 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
674 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000675
Sam Kolton05ef1c92016-06-03 10:27:37 +0000676 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
677 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
678 bool isForcedDPP() const { return ForcedDPP; }
679 bool isForcedSDWA() const { return ForcedSDWA; }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000680
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000681 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000682 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
683 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000684 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
685 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000686 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
687 OperandVector &Operands, MCStreamer &Out,
688 uint64_t &ErrorInfo,
689 bool MatchingInlineAsm) override;
690 bool ParseDirective(AsmToken DirectiveID) override;
691 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000692 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
694 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000695 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000696
Sam Kolton11de3702016-05-24 12:38:33 +0000697 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000698 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
699 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000700 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000701 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000702 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000703 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000704 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000705
Sam Kolton1bdcef72016-05-23 09:59:02 +0000706 OperandMatchResultTy parseImm(OperandVector &Operands);
707 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000708 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
709 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000710
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
712 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000713
714 bool parseCnt(int64_t &IntVal);
715 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000716 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000717
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000718private:
719 struct OperandInfoTy {
720 int64_t Id;
721 bool IsSymbolic;
722 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
723 };
Sam Kolton11de3702016-05-24 12:38:33 +0000724
Artem Tamazov6edc1352016-05-26 17:00:33 +0000725 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
726 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000727public:
Sam Kolton11de3702016-05-24 12:38:33 +0000728 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
729
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000730 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000731 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
732
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000733 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
734 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
735 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000736 AMDGPUOperand::Ptr defaultGLC() const;
737 AMDGPUOperand::Ptr defaultSLC() const;
738 AMDGPUOperand::Ptr defaultTFE() const;
739
Sam Kolton5f10a132016-05-06 11:31:17 +0000740 AMDGPUOperand::Ptr defaultDMask() const;
741 AMDGPUOperand::Ptr defaultUNorm() const;
742 AMDGPUOperand::Ptr defaultDA() const;
743 AMDGPUOperand::Ptr defaultR128() const;
744 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000745 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
746 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +0000747 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000748
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000749 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
750
Tom Stellarda90b9522016-02-11 03:28:15 +0000751 void cvtId(MCInst &Inst, const OperandVector &Operands);
752 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000753 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000754
755 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000756 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000757
Sam Kolton11de3702016-05-24 12:38:33 +0000758 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000759 AMDGPUOperand::Ptr defaultRowMask() const;
760 AMDGPUOperand::Ptr defaultBankMask() const;
761 AMDGPUOperand::Ptr defaultBoundCtrl() const;
762 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000763
Sam Kolton05ef1c92016-06-03 10:27:37 +0000764 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
765 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000766 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000767 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
768 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000769 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
770 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
771 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000772};
773
774struct OptionalOperand {
775 const char *Name;
776 AMDGPUOperand::ImmTy Type;
777 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000778 bool (*ConvertResult)(int64_t&);
779};
780
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000781}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000782
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000783//===----------------------------------------------------------------------===//
784// Operand
785//===----------------------------------------------------------------------===//
786
787bool AMDGPUOperand::isInlinableImm(MVT type) const {
788 if (!isImmTy(ImmTyNone)) {
789 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
790 return false;
791 }
792 // TODO: We should avoid using host float here. It would be better to
793 // check the float bit values which is what a few other places do.
794 // We've had bot failures before due to weird NaN support on mips hosts.
795
796 APInt Literal(64, Imm.Val);
797
798 if (Imm.IsFPImm) { // We got fp literal token
799 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
800 return AMDGPU::isInlinableLiteral64(Imm.Val, AsmParser->isVI());
801 } else { // Expected 32-bit operand
802 bool lost;
803 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
804 // Convert literal to single precision
805 APFloat::opStatus status = FPLiteral.convert(APFloat::IEEEsingle,
806 APFloat::rmNearestTiesToEven,
807 &lost);
808 // We allow precision lost but not overflow or underflow
809 if (status != APFloat::opOK &&
810 lost &&
811 ((status & APFloat::opOverflow) != 0 ||
812 (status & APFloat::opUnderflow) != 0)) {
813 return false;
814 }
815 // Check if single precision literal is inlinable
816 return AMDGPU::isInlinableLiteral32(
817 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
818 AsmParser->isVI());
819 }
820 } else { // We got int literal token
821 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
822 return AMDGPU::isInlinableLiteral64(Imm.Val, AsmParser->isVI());
823 } else { // Expected 32-bit operand
824 return AMDGPU::isInlinableLiteral32(
825 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
826 AsmParser->isVI());
827 }
828 }
829 return false;
830}
831
832bool AMDGPUOperand::isLiteralImm(MVT type) const {
833 // Check that this imediate can be added as literal
834 if (!isImmTy(ImmTyNone)) {
835 return false;
836 }
837
838 APInt Literal(64, Imm.Val);
839
840 if (Imm.IsFPImm) { // We got fp literal token
841 if (type == MVT::f64) { // Expected 64-bit fp operand
842 // We would set low 64-bits of literal to zeroes but we accept this literals
843 return true;
844 } else if (type == MVT::i64) { // Expected 64-bit int operand
845 // We don't allow fp literals in 64-bit integer instructions. It is
846 // unclear how we should encode them.
847 return false;
848 } else { // Expected 32-bit operand
849 bool lost;
850 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
851 // Convert literal to single precision
852 APFloat::opStatus status = FPLiteral.convert(APFloat::IEEEsingle,
853 APFloat::rmNearestTiesToEven,
854 &lost);
855 // We allow precision lost but not overflow or underflow
856 if (status != APFloat::opOK &&
857 lost &&
858 ((status & APFloat::opOverflow) != 0 ||
859 (status & APFloat::opUnderflow) != 0)) {
860 return false;
861 }
862 return true;
863 }
864 } else { // We got int literal token
865 APInt HiBits = Literal.getHiBits(32);
866 if (HiBits == 0xffffffff &&
867 (*Literal.getLoBits(32).getRawData() & 0x80000000) != 0) {
868 // If high 32 bits aren't zeroes then they all should be ones and 32nd
869 // bit should be set. So that this 64-bit literal is sign-extension of
870 // 32-bit value.
871 return true;
872 } else if (HiBits == 0) {
873 return true;
874 }
875 }
876 return false;
877}
878
879bool AMDGPUOperand::isRegClass(unsigned RCID) const {
880 return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
881}
882
883void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
884 int64_t Val = Imm.Val;
885 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers() && Imm.Mods.Neg) {
886 // Apply modifiers to immediate value. Only negate can get here
887 if (Imm.IsFPImm) {
888 APFloat F(BitsToDouble(Val));
889 F.changeSign();
890 Val = F.bitcastToAPInt().getZExtValue();
891 } else {
892 Val = -Val;
893 }
894 }
895
896 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()), Inst.getNumOperands())) {
897 addLiteralImmOperand(Inst, Val);
898 } else {
899 Inst.addOperand(MCOperand::createImm(Val));
900 }
901}
902
903void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val) const {
904 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
905 auto OpNum = Inst.getNumOperands();
906 // Check that this operand accepts literals
907 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
908
909 APInt Literal(64, Val);
910 auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), InstDesc, OpNum); // expected operand size
911
912 if (Imm.IsFPImm) { // We got fp literal token
913 if (OpSize == 8) { // Expected 64-bit operand
914 // Check if literal is inlinable
915 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(), AsmParser->isVI())) {
916 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
917 } else if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
918 // For fp operands we check if low 32 bits are zeros
919 if (Literal.getLoBits(32) != 0) {
920 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
921 "Can't encode literal as exact 64-bit"
922 " floating-point operand. Low 32-bits will be"
923 " set to zero");
924 }
925 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
926 } else {
927 // We don't allow fp literals in 64-bit integer instructions. It is
928 // unclear how we should encode them. This case should be checked earlier
929 // in predicate methods (isLiteralImm())
930 llvm_unreachable("fp literal in 64-bit integer instruction.");
931 }
932 } else { // Expected 32-bit operand
933 bool lost;
934 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
935 // Convert literal to single precision
936 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
937 // We allow precision lost but not overflow or underflow. This should be
938 // checked earlier in isLiteralImm()
939 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
940 }
941 } else { // We got int literal token
942 if (OpSize == 8) { // Expected 64-bit operand
943 auto LiteralVal = Literal.getZExtValue();
944 if (AMDGPU::isInlinableLiteral64(LiteralVal, AsmParser->isVI())) {
945 Inst.addOperand(MCOperand::createImm(LiteralVal));
946 return;
947 }
948 } else { // Expected 32-bit operand
949 auto LiteralVal = static_cast<int32_t>(Literal.getLoBits(32).getZExtValue());
950 if (AMDGPU::isInlinableLiteral32(LiteralVal, AsmParser->isVI())) {
951 Inst.addOperand(MCOperand::createImm(LiteralVal));
952 return;
953 }
954 }
955 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
956 }
957}
958
959void AMDGPUOperand::addKImmFP32Operands(MCInst &Inst, unsigned N) const {
960 APInt Literal(64, Imm.Val);
961 if (Imm.IsFPImm) { // We got fp literal
962 bool lost;
963 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
964 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
965 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
966 } else { // We got int literal token
967 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
968 }
969}
970
971void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
972 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
973}
974
975//===----------------------------------------------------------------------===//
976// AsmParser
977//===----------------------------------------------------------------------===//
978
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000979static int getRegClass(RegisterKind Is, unsigned RegWidth) {
980 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000981 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000982 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000983 case 1: return AMDGPU::VGPR_32RegClassID;
984 case 2: return AMDGPU::VReg_64RegClassID;
985 case 3: return AMDGPU::VReg_96RegClassID;
986 case 4: return AMDGPU::VReg_128RegClassID;
987 case 8: return AMDGPU::VReg_256RegClassID;
988 case 16: return AMDGPU::VReg_512RegClassID;
989 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000990 } else if (Is == IS_TTMP) {
991 switch (RegWidth) {
992 default: return -1;
993 case 1: return AMDGPU::TTMP_32RegClassID;
994 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000995 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000996 }
997 } else if (Is == IS_SGPR) {
998 switch (RegWidth) {
999 default: return -1;
1000 case 1: return AMDGPU::SGPR_32RegClassID;
1001 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001002 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001003 case 8: return AMDGPU::SReg_256RegClassID;
1004 case 16: return AMDGPU::SReg_512RegClassID;
1005 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001006 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001007 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001008}
1009
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001010static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001011 return StringSwitch<unsigned>(RegName)
1012 .Case("exec", AMDGPU::EXEC)
1013 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001014 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001015 .Case("m0", AMDGPU::M0)
1016 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001017 .Case("tba", AMDGPU::TBA)
1018 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001019 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1020 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001021 .Case("vcc_lo", AMDGPU::VCC_LO)
1022 .Case("vcc_hi", AMDGPU::VCC_HI)
1023 .Case("exec_lo", AMDGPU::EXEC_LO)
1024 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001025 .Case("tma_lo", AMDGPU::TMA_LO)
1026 .Case("tma_hi", AMDGPU::TMA_HI)
1027 .Case("tba_lo", AMDGPU::TBA_LO)
1028 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001029 .Default(0);
1030}
1031
1032bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001033 auto R = parseRegister();
1034 if (!R) return true;
1035 assert(R->isReg());
1036 RegNo = R->getReg();
1037 StartLoc = R->getStartLoc();
1038 EndLoc = R->getEndLoc();
1039 return false;
1040}
1041
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001042bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
1043{
1044 switch (RegKind) {
1045 case IS_SPECIAL:
1046 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
1047 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
1048 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
1049 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
1050 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
1051 return false;
1052 case IS_VGPR:
1053 case IS_SGPR:
1054 case IS_TTMP:
1055 if (Reg1 != Reg + RegWidth) { return false; }
1056 RegWidth++;
1057 return true;
1058 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001059 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001060 }
1061}
1062
1063bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
1064{
1065 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1066 if (getLexer().is(AsmToken::Identifier)) {
1067 StringRef RegName = Parser.getTok().getString();
1068 if ((Reg = getSpecialRegForName(RegName))) {
1069 Parser.Lex();
1070 RegKind = IS_SPECIAL;
1071 } else {
1072 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001073 if (RegName[0] == 'v') {
1074 RegNumIndex = 1;
1075 RegKind = IS_VGPR;
1076 } else if (RegName[0] == 's') {
1077 RegNumIndex = 1;
1078 RegKind = IS_SGPR;
1079 } else if (RegName.startswith("ttmp")) {
1080 RegNumIndex = strlen("ttmp");
1081 RegKind = IS_TTMP;
1082 } else {
1083 return false;
1084 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001085 if (RegName.size() > RegNumIndex) {
1086 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001087 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1088 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001089 Parser.Lex();
1090 RegWidth = 1;
1091 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001092 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001093 Parser.Lex();
1094 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001095 if (getLexer().isNot(AsmToken::LBrac))
1096 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001097 Parser.Lex();
1098
Artem Tamazovf88397c2016-06-03 14:41:17 +00001099 if (getParser().parseAbsoluteExpression(RegLo))
1100 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001101
Artem Tamazov7da9b822016-05-27 12:50:13 +00001102 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001103 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1104 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001105 Parser.Lex();
1106
Artem Tamazov7da9b822016-05-27 12:50:13 +00001107 if (isRBrace) {
1108 RegHi = RegLo;
1109 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001110 if (getParser().parseAbsoluteExpression(RegHi))
1111 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001112
Artem Tamazovf88397c2016-06-03 14:41:17 +00001113 if (getLexer().isNot(AsmToken::RBrac))
1114 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001115 Parser.Lex();
1116 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001117 RegNum = (unsigned) RegLo;
1118 RegWidth = (RegHi - RegLo) + 1;
1119 }
1120 }
1121 } else if (getLexer().is(AsmToken::LBrac)) {
1122 // List of consecutive registers: [s0,s1,s2,s3]
1123 Parser.Lex();
Artem Tamazovf88397c2016-06-03 14:41:17 +00001124 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
1125 return false;
1126 if (RegWidth != 1)
1127 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001128 RegisterKind RegKind1;
1129 unsigned Reg1, RegNum1, RegWidth1;
1130 do {
1131 if (getLexer().is(AsmToken::Comma)) {
1132 Parser.Lex();
1133 } else if (getLexer().is(AsmToken::RBrac)) {
1134 Parser.Lex();
1135 break;
1136 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001137 if (RegWidth1 != 1) {
1138 return false;
1139 }
1140 if (RegKind1 != RegKind) {
1141 return false;
1142 }
1143 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1144 return false;
1145 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001146 } else {
1147 return false;
1148 }
1149 } while (true);
1150 } else {
1151 return false;
1152 }
1153 switch (RegKind) {
1154 case IS_SPECIAL:
1155 RegNum = 0;
1156 RegWidth = 1;
1157 break;
1158 case IS_VGPR:
1159 case IS_SGPR:
1160 case IS_TTMP:
1161 {
1162 unsigned Size = 1;
1163 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1164 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
1165 Size = std::min(RegWidth, 4u);
1166 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001167 if (RegNum % Size != 0)
1168 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001169 RegNum = RegNum / Size;
1170 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001171 if (RCID == -1)
1172 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001173 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001174 if (RegNum >= RC.getNumRegs())
1175 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001176 Reg = RC.getRegister(RegNum);
1177 break;
1178 }
1179
1180 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001181 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001182 }
1183
Artem Tamazovf88397c2016-06-03 14:41:17 +00001184 if (!subtargetHasRegister(*TRI, Reg))
1185 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001186 return true;
1187}
1188
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001189std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001190 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001191 SMLoc StartLoc = Tok.getLoc();
1192 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001193 RegisterKind RegKind;
1194 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001195
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001196 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
1197 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001198 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001199 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001200}
1201
Alex Bradbury58eba092016-11-01 16:32:05 +00001202OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001203AMDGPUAsmParser::parseImm(OperandVector &Operands) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001204 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001205 bool Minus = false;
1206 if (getLexer().getKind() == AsmToken::Minus) {
1207 Minus = true;
1208 Parser.Lex();
1209 }
1210
1211 SMLoc S = Parser.getTok().getLoc();
1212 switch(getLexer().getKind()) {
1213 case AsmToken::Integer: {
1214 int64_t IntVal;
1215 if (getParser().parseAbsoluteExpression(IntVal))
1216 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001217 if (Minus)
1218 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001219 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001220 return MatchOperand_Success;
1221 }
1222 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001223 int64_t IntVal;
1224 if (getParser().parseAbsoluteExpression(IntVal))
1225 return MatchOperand_ParseFail;
1226
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001227 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001228 if (Minus)
1229 F.changeSign();
1230 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001231 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001232 AMDGPUOperand::ImmTyNone, true));
1233 return MatchOperand_Success;
1234 }
1235 default:
1236 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1237 }
1238}
1239
Alex Bradbury58eba092016-11-01 16:32:05 +00001240OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001241AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1242 auto res = parseImm(Operands);
1243 if (res != MatchOperand_NoMatch) {
1244 return res;
1245 }
1246
1247 if (auto R = parseRegister()) {
1248 assert(R->isReg());
1249 R->Reg.IsForcedVOP3 = isForcedVOP3();
1250 Operands.push_back(std::move(R));
1251 return MatchOperand_Success;
1252 }
1253 return MatchOperand_ParseFail;
1254}
1255
Alex Bradbury58eba092016-11-01 16:32:05 +00001256OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001257AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001258 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001259 // negate-modifier or negative immediate value.
1260 // By default we suppose it is modifier.
1261 bool Negate = false, Abs = false, Abs2 = false;
1262
1263 if (getLexer().getKind()== AsmToken::Minus) {
1264 Parser.Lex();
1265 Negate = true;
1266 }
1267
1268 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1269 Parser.Lex();
1270 Abs2 = true;
1271 if (getLexer().isNot(AsmToken::LParen)) {
1272 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1273 return MatchOperand_ParseFail;
1274 }
1275 Parser.Lex();
1276 }
1277
1278 if (getLexer().getKind() == AsmToken::Pipe) {
1279 if (Abs2) {
1280 Error(Parser.getTok().getLoc(), "expected register or immediate");
1281 return MatchOperand_ParseFail;
1282 }
1283 Parser.Lex();
1284 Abs = true;
1285 }
1286
1287 auto Res = parseRegOrImm(Operands);
1288 if (Res != MatchOperand_Success) {
1289 return Res;
1290 }
1291
Sam Kolton945231a2016-06-10 09:57:59 +00001292 AMDGPUOperand::Modifiers Mods = {false, false, false};
Sam Kolton1bdcef72016-05-23 09:59:02 +00001293 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001294 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001295 }
1296 if (Abs) {
1297 if (getLexer().getKind() != AsmToken::Pipe) {
1298 Error(Parser.getTok().getLoc(), "expected vertical bar");
1299 return MatchOperand_ParseFail;
1300 }
1301 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001302 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001303 }
1304 if (Abs2) {
1305 if (getLexer().isNot(AsmToken::RParen)) {
1306 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1307 return MatchOperand_ParseFail;
1308 }
1309 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001310 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001311 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001312
Sam Kolton945231a2016-06-10 09:57:59 +00001313 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001314 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001315 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001316 }
1317 return MatchOperand_Success;
1318}
1319
Alex Bradbury58eba092016-11-01 16:32:05 +00001320OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001321AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1322 bool Sext = false;
1323
1324 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1325 Parser.Lex();
1326 Sext = true;
1327 if (getLexer().isNot(AsmToken::LParen)) {
1328 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1329 return MatchOperand_ParseFail;
1330 }
1331 Parser.Lex();
1332 }
1333
1334 auto Res = parseRegOrImm(Operands);
1335 if (Res != MatchOperand_Success) {
1336 return Res;
1337 }
1338
Sam Kolton945231a2016-06-10 09:57:59 +00001339 AMDGPUOperand::Modifiers Mods = {false, false, false};
1340 if (Sext) {
1341 if (getLexer().isNot(AsmToken::RParen)) {
1342 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1343 return MatchOperand_ParseFail;
1344 }
1345 Parser.Lex();
1346 Mods.Sext = true;
1347 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001348
Sam Kolton945231a2016-06-10 09:57:59 +00001349 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001350 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001351 Op.setModifiers(Mods);
1352 }
1353 return MatchOperand_Success;
1354}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001355
Tom Stellard45bb48e2015-06-13 03:28:10 +00001356unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1357
1358 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1359
1360 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001361 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1362 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1363 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001364 return Match_InvalidOperand;
1365
Tom Stellard88e0b252015-10-06 15:57:53 +00001366 if ((TSFlags & SIInstrFlags::VOP3) &&
1367 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1368 getForcedEncodingSize() != 64)
1369 return Match_PreferE32;
1370
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001371 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa ||
1372 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001373 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001374 auto OpNum =
1375 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001376 const auto &Op = Inst.getOperand(OpNum);
1377 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1378 return Match_InvalidOperand;
1379 }
1380 }
1381
Tom Stellard45bb48e2015-06-13 03:28:10 +00001382 return Match_Success;
1383}
1384
Tom Stellard45bb48e2015-06-13 03:28:10 +00001385bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1386 OperandVector &Operands,
1387 MCStreamer &Out,
1388 uint64_t &ErrorInfo,
1389 bool MatchingInlineAsm) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001390 // What asm variants we should check
1391 std::vector<unsigned> MatchedVariants;
1392 if (getForcedEncodingSize() == 32) {
1393 MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
1394 } else if (isForcedVOP3()) {
1395 MatchedVariants = {AMDGPUAsmVariants::VOP3};
1396 } else if (isForcedSDWA()) {
1397 MatchedVariants = {AMDGPUAsmVariants::SDWA};
1398 } else if (isForcedDPP()) {
1399 MatchedVariants = {AMDGPUAsmVariants::DPP};
1400 } else {
1401 MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
1402 AMDGPUAsmVariants::VOP3,
1403 AMDGPUAsmVariants::SDWA,
1404 AMDGPUAsmVariants::DPP};
1405 }
1406
Tom Stellard45bb48e2015-06-13 03:28:10 +00001407 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001408 unsigned Result = Match_Success;
1409 for (auto Variant : MatchedVariants) {
1410 uint64_t EI;
1411 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1412 Variant);
1413 // We order match statuses from least to most specific. We use most specific
1414 // status as resulting
1415 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1416 if ((R == Match_Success) ||
1417 (R == Match_PreferE32) ||
1418 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1419 (R == Match_InvalidOperand && Result != Match_MissingFeature
1420 && Result != Match_PreferE32) ||
1421 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1422 && Result != Match_MissingFeature
1423 && Result != Match_PreferE32)) {
1424 Result = R;
1425 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001426 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001427 if (R == Match_Success)
1428 break;
1429 }
1430
1431 switch (Result) {
1432 default: break;
1433 case Match_Success:
1434 Inst.setLoc(IDLoc);
1435 Out.EmitInstruction(Inst, getSTI());
1436 return false;
1437
1438 case Match_MissingFeature:
1439 return Error(IDLoc, "instruction not supported on this GPU");
1440
1441 case Match_MnemonicFail:
1442 return Error(IDLoc, "unrecognized instruction mnemonic");
1443
1444 case Match_InvalidOperand: {
1445 SMLoc ErrorLoc = IDLoc;
1446 if (ErrorInfo != ~0ULL) {
1447 if (ErrorInfo >= Operands.size()) {
1448 return Error(IDLoc, "too few operands for instruction");
1449 }
1450 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1451 if (ErrorLoc == SMLoc())
1452 ErrorLoc = IDLoc;
1453 }
1454 return Error(ErrorLoc, "invalid operand for instruction");
1455 }
1456
1457 case Match_PreferE32:
1458 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1459 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001460 }
1461 llvm_unreachable("Implement any new match types added!");
1462}
1463
Tom Stellard347ac792015-06-26 21:15:07 +00001464bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1465 uint32_t &Minor) {
1466 if (getLexer().isNot(AsmToken::Integer))
1467 return TokError("invalid major version");
1468
1469 Major = getLexer().getTok().getIntVal();
1470 Lex();
1471
1472 if (getLexer().isNot(AsmToken::Comma))
1473 return TokError("minor version number required, comma expected");
1474 Lex();
1475
1476 if (getLexer().isNot(AsmToken::Integer))
1477 return TokError("invalid minor version");
1478
1479 Minor = getLexer().getTok().getIntVal();
1480 Lex();
1481
1482 return false;
1483}
1484
1485bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1486
1487 uint32_t Major;
1488 uint32_t Minor;
1489
1490 if (ParseDirectiveMajorMinor(Major, Minor))
1491 return true;
1492
1493 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1494 return false;
1495}
1496
1497bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1498
1499 uint32_t Major;
1500 uint32_t Minor;
1501 uint32_t Stepping;
1502 StringRef VendorName;
1503 StringRef ArchName;
1504
1505 // If this directive has no arguments, then use the ISA version for the
1506 // targeted GPU.
1507 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001508 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001509 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1510 Isa.Stepping,
1511 "AMD", "AMDGPU");
1512 return false;
1513 }
1514
1515
1516 if (ParseDirectiveMajorMinor(Major, Minor))
1517 return true;
1518
1519 if (getLexer().isNot(AsmToken::Comma))
1520 return TokError("stepping version number required, comma expected");
1521 Lex();
1522
1523 if (getLexer().isNot(AsmToken::Integer))
1524 return TokError("invalid stepping version");
1525
1526 Stepping = getLexer().getTok().getIntVal();
1527 Lex();
1528
1529 if (getLexer().isNot(AsmToken::Comma))
1530 return TokError("vendor name required, comma expected");
1531 Lex();
1532
1533 if (getLexer().isNot(AsmToken::String))
1534 return TokError("invalid vendor name");
1535
1536 VendorName = getLexer().getTok().getStringContents();
1537 Lex();
1538
1539 if (getLexer().isNot(AsmToken::Comma))
1540 return TokError("arch name required, comma expected");
1541 Lex();
1542
1543 if (getLexer().isNot(AsmToken::String))
1544 return TokError("invalid arch name");
1545
1546 ArchName = getLexer().getTok().getStringContents();
1547 Lex();
1548
1549 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1550 VendorName, ArchName);
1551 return false;
1552}
1553
Tom Stellardff7416b2015-06-26 21:58:31 +00001554bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1555 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001556 SmallString<40> ErrStr;
1557 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001558 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001559 return TokError(Err.str());
1560 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001561 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001562 return false;
1563}
1564
1565bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1566
1567 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001568 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001569
1570 while (true) {
1571
Tom Stellardff7416b2015-06-26 21:58:31 +00001572 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1573 // will set the current token to EndOfStatement.
1574 while(getLexer().is(AsmToken::EndOfStatement))
1575 Lex();
1576
1577 if (getLexer().isNot(AsmToken::Identifier))
1578 return TokError("expected value identifier or .end_amd_kernel_code_t");
1579
1580 StringRef ID = getLexer().getTok().getIdentifier();
1581 Lex();
1582
1583 if (ID == ".end_amd_kernel_code_t")
1584 break;
1585
1586 if (ParseAMDKernelCodeTValue(ID, Header))
1587 return true;
1588 }
1589
1590 getTargetStreamer().EmitAMDKernelCodeT(Header);
1591
1592 return false;
1593}
1594
Tom Stellarde135ffd2015-09-25 21:41:28 +00001595bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1596 getParser().getStreamer().SwitchSection(
1597 AMDGPU::getHSATextSection(getContext()));
1598 return false;
1599}
1600
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001601bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1602 if (getLexer().isNot(AsmToken::Identifier))
1603 return TokError("expected symbol name");
1604
1605 StringRef KernelName = Parser.getTok().getString();
1606
1607 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1608 ELF::STT_AMDGPU_HSA_KERNEL);
1609 Lex();
1610 return false;
1611}
1612
Tom Stellard00f2f912015-12-02 19:47:57 +00001613bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1614 if (getLexer().isNot(AsmToken::Identifier))
1615 return TokError("expected symbol name");
1616
1617 StringRef GlobalName = Parser.getTok().getIdentifier();
1618
1619 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1620 Lex();
1621 return false;
1622}
1623
1624bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1625 if (getLexer().isNot(AsmToken::Identifier))
1626 return TokError("expected symbol name");
1627
1628 StringRef GlobalName = Parser.getTok().getIdentifier();
1629
1630 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1631 Lex();
1632 return false;
1633}
1634
1635bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1636 getParser().getStreamer().SwitchSection(
1637 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1638 return false;
1639}
1640
1641bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1642 getParser().getStreamer().SwitchSection(
1643 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1644 return false;
1645}
1646
Tom Stellard9760f032015-12-03 03:34:32 +00001647bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1648 getParser().getStreamer().SwitchSection(
1649 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1650 return false;
1651}
1652
Tom Stellard45bb48e2015-06-13 03:28:10 +00001653bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001654 StringRef IDVal = DirectiveID.getString();
1655
1656 if (IDVal == ".hsa_code_object_version")
1657 return ParseDirectiveHSACodeObjectVersion();
1658
1659 if (IDVal == ".hsa_code_object_isa")
1660 return ParseDirectiveHSACodeObjectISA();
1661
Tom Stellardff7416b2015-06-26 21:58:31 +00001662 if (IDVal == ".amd_kernel_code_t")
1663 return ParseDirectiveAMDKernelCodeT();
1664
Tom Stellardfcfaea42016-05-05 17:03:33 +00001665 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001666 return ParseSectionDirectiveHSAText();
1667
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001668 if (IDVal == ".amdgpu_hsa_kernel")
1669 return ParseDirectiveAMDGPUHsaKernel();
1670
Tom Stellard00f2f912015-12-02 19:47:57 +00001671 if (IDVal == ".amdgpu_hsa_module_global")
1672 return ParseDirectiveAMDGPUHsaModuleGlobal();
1673
1674 if (IDVal == ".amdgpu_hsa_program_global")
1675 return ParseDirectiveAMDGPUHsaProgramGlobal();
1676
1677 if (IDVal == ".hsadata_global_agent")
1678 return ParseSectionDirectiveHSADataGlobalAgent();
1679
1680 if (IDVal == ".hsadata_global_program")
1681 return ParseSectionDirectiveHSADataGlobalProgram();
1682
Tom Stellard9760f032015-12-03 03:34:32 +00001683 if (IDVal == ".hsarodata_readonly_agent")
1684 return ParseSectionDirectiveHSARodataReadonlyAgent();
1685
Tom Stellard45bb48e2015-06-13 03:28:10 +00001686 return true;
1687}
1688
Matt Arsenault68802d32015-11-05 03:11:27 +00001689bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1690 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001691 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001692 return true;
1693
Matt Arsenault3b159672015-12-01 20:31:08 +00001694 if (isSI()) {
1695 // No flat_scr
1696 switch (RegNo) {
1697 case AMDGPU::FLAT_SCR:
1698 case AMDGPU::FLAT_SCR_LO:
1699 case AMDGPU::FLAT_SCR_HI:
1700 return false;
1701 default:
1702 return true;
1703 }
1704 }
1705
Matt Arsenault68802d32015-11-05 03:11:27 +00001706 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1707 // SI/CI have.
1708 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1709 R.isValid(); ++R) {
1710 if (*R == RegNo)
1711 return false;
1712 }
1713
1714 return true;
1715}
1716
Alex Bradbury58eba092016-11-01 16:32:05 +00001717OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001718AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1719
1720 // Try to parse with a custom parser
1721 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1722
1723 // If we successfully parsed the operand or if there as an error parsing,
1724 // we are done.
1725 //
1726 // If we are parsing after we reach EndOfStatement then this means we
1727 // are appending default values to the Operands list. This is only done
1728 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001729 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001730 getLexer().is(AsmToken::EndOfStatement))
1731 return ResTy;
1732
Sam Kolton1bdcef72016-05-23 09:59:02 +00001733 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001734
Sam Kolton1bdcef72016-05-23 09:59:02 +00001735 if (ResTy == MatchOperand_Success)
1736 return ResTy;
1737
1738 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00001739 // If this identifier is a symbol, we want to create an expression for it.
1740 // It is a little difficult to distinguish between a symbol name, and
1741 // an instruction flag like 'gds'. In order to do this, we parse
1742 // all tokens as expressions and then treate the symbol name as the token
1743 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001744 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00001745 SMLoc S = Tok.getLoc();
1746 const MCExpr *Expr = nullptr;
1747 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001748 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00001749 return MatchOperand_Success;
1750 }
1751
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001752 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001753 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001754 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001755 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001756 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001757}
1758
Sam Kolton05ef1c92016-06-03 10:27:37 +00001759StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1760 // Clear any forced encodings from the previous instruction.
1761 setForcedEncodingSize(0);
1762 setForcedDPP(false);
1763 setForcedSDWA(false);
1764
1765 if (Name.endswith("_e64")) {
1766 setForcedEncodingSize(64);
1767 return Name.substr(0, Name.size() - 4);
1768 } else if (Name.endswith("_e32")) {
1769 setForcedEncodingSize(32);
1770 return Name.substr(0, Name.size() - 4);
1771 } else if (Name.endswith("_dpp")) {
1772 setForcedDPP(true);
1773 return Name.substr(0, Name.size() - 4);
1774 } else if (Name.endswith("_sdwa")) {
1775 setForcedSDWA(true);
1776 return Name.substr(0, Name.size() - 5);
1777 }
1778 return Name;
1779}
1780
Tom Stellard45bb48e2015-06-13 03:28:10 +00001781bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1782 StringRef Name,
1783 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001784 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00001785 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001786 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00001787
Tom Stellard45bb48e2015-06-13 03:28:10 +00001788 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00001789 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001790
1791 // Eat the comma or space if there is one.
1792 if (getLexer().is(AsmToken::Comma))
1793 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00001794
Tom Stellard45bb48e2015-06-13 03:28:10 +00001795 switch (Res) {
1796 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001797 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001798 Error(getLexer().getLoc(), "failed parsing operand.");
1799 while (!getLexer().is(AsmToken::EndOfStatement)) {
1800 Parser.Lex();
1801 }
1802 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001803 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001804 Error(getLexer().getLoc(), "not a valid operand.");
1805 while (!getLexer().is(AsmToken::EndOfStatement)) {
1806 Parser.Lex();
1807 }
1808 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001809 }
1810 }
1811
Tom Stellard45bb48e2015-06-13 03:28:10 +00001812 return false;
1813}
1814
1815//===----------------------------------------------------------------------===//
1816// Utility functions
1817//===----------------------------------------------------------------------===//
1818
Alex Bradbury58eba092016-11-01 16:32:05 +00001819OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001820AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001821 switch(getLexer().getKind()) {
1822 default: return MatchOperand_NoMatch;
1823 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001824 StringRef Name = Parser.getTok().getString();
1825 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001826 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001827 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001828
1829 Parser.Lex();
1830 if (getLexer().isNot(AsmToken::Colon))
1831 return MatchOperand_ParseFail;
1832
1833 Parser.Lex();
1834 if (getLexer().isNot(AsmToken::Integer))
1835 return MatchOperand_ParseFail;
1836
1837 if (getParser().parseAbsoluteExpression(Int))
1838 return MatchOperand_ParseFail;
1839 break;
1840 }
1841 }
1842 return MatchOperand_Success;
1843}
1844
Alex Bradbury58eba092016-11-01 16:32:05 +00001845OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001846AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001847 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001848 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001849
1850 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001851 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001852
Alex Bradbury58eba092016-11-01 16:32:05 +00001853 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001854 if (Res != MatchOperand_Success)
1855 return Res;
1856
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001857 if (ConvertResult && !ConvertResult(Value)) {
1858 return MatchOperand_ParseFail;
1859 }
1860
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001861 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001862 return MatchOperand_Success;
1863}
1864
Alex Bradbury58eba092016-11-01 16:32:05 +00001865OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001866AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001867 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001868 int64_t Bit = 0;
1869 SMLoc S = Parser.getTok().getLoc();
1870
1871 // We are at the end of the statement, and this is a default argument, so
1872 // use a default value.
1873 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1874 switch(getLexer().getKind()) {
1875 case AsmToken::Identifier: {
1876 StringRef Tok = Parser.getTok().getString();
1877 if (Tok == Name) {
1878 Bit = 1;
1879 Parser.Lex();
1880 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1881 Bit = 0;
1882 Parser.Lex();
1883 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001884 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001885 }
1886 break;
1887 }
1888 default:
1889 return MatchOperand_NoMatch;
1890 }
1891 }
1892
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001893 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001894 return MatchOperand_Success;
1895}
1896
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001897typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1898
Sam Koltona74cd522016-03-18 15:35:51 +00001899void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1900 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001901 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001902 auto i = OptionalIdx.find(ImmT);
1903 if (i != OptionalIdx.end()) {
1904 unsigned Idx = i->second;
1905 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1906 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001907 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001908 }
1909}
1910
Alex Bradbury58eba092016-11-01 16:32:05 +00001911OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00001912AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00001913 if (getLexer().isNot(AsmToken::Identifier)) {
1914 return MatchOperand_NoMatch;
1915 }
1916 StringRef Tok = Parser.getTok().getString();
1917 if (Tok != Prefix) {
1918 return MatchOperand_NoMatch;
1919 }
1920
1921 Parser.Lex();
1922 if (getLexer().isNot(AsmToken::Colon)) {
1923 return MatchOperand_ParseFail;
1924 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001925
Sam Kolton3025e7f2016-04-26 13:33:56 +00001926 Parser.Lex();
1927 if (getLexer().isNot(AsmToken::Identifier)) {
1928 return MatchOperand_ParseFail;
1929 }
1930
1931 Value = Parser.getTok().getString();
1932 return MatchOperand_Success;
1933}
1934
Tom Stellard45bb48e2015-06-13 03:28:10 +00001935//===----------------------------------------------------------------------===//
1936// ds
1937//===----------------------------------------------------------------------===//
1938
Tom Stellard45bb48e2015-06-13 03:28:10 +00001939void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1940 const OperandVector &Operands) {
1941
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001942 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001943
1944 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1945 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1946
1947 // Add the register arguments
1948 if (Op.isReg()) {
1949 Op.addRegOperands(Inst, 1);
1950 continue;
1951 }
1952
1953 // Handle optional arguments
1954 OptionalIdx[Op.getImmTy()] = i;
1955 }
1956
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001957 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1958 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001959 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001960
Tom Stellard45bb48e2015-06-13 03:28:10 +00001961 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1962}
1963
1964void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1965
1966 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1967 bool GDSOnly = false;
1968
1969 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1970 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1971
1972 // Add the register arguments
1973 if (Op.isReg()) {
1974 Op.addRegOperands(Inst, 1);
1975 continue;
1976 }
1977
1978 if (Op.isToken() && Op.getToken() == "gds") {
1979 GDSOnly = true;
1980 continue;
1981 }
1982
1983 // Handle optional arguments
1984 OptionalIdx[Op.getImmTy()] = i;
1985 }
1986
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001987 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1988 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001989
1990 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001992 }
1993 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1994}
1995
1996
1997//===----------------------------------------------------------------------===//
1998// s_waitcnt
1999//===----------------------------------------------------------------------===//
2000
2001bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2002 StringRef CntName = Parser.getTok().getString();
2003 int64_t CntVal;
2004
2005 Parser.Lex();
2006 if (getLexer().isNot(AsmToken::LParen))
2007 return true;
2008
2009 Parser.Lex();
2010 if (getLexer().isNot(AsmToken::Integer))
2011 return true;
2012
2013 if (getParser().parseAbsoluteExpression(CntVal))
2014 return true;
2015
2016 if (getLexer().isNot(AsmToken::RParen))
2017 return true;
2018
2019 Parser.Lex();
2020 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2021 Parser.Lex();
2022
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002023 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002024 if (CntName == "vmcnt")
2025 IntVal = encodeVmcnt(IV, IntVal, CntVal);
2026 else if (CntName == "expcnt")
2027 IntVal = encodeExpcnt(IV, IntVal, CntVal);
2028 else if (CntName == "lgkmcnt")
2029 IntVal = encodeLgkmcnt(IV, IntVal, CntVal);
2030 else
Tom Stellard45bb48e2015-06-13 03:28:10 +00002031 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002032
Tom Stellard45bb48e2015-06-13 03:28:10 +00002033 return false;
2034}
2035
Alex Bradbury58eba092016-11-01 16:32:05 +00002036OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002037AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002038 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
2039 int64_t Waitcnt = getWaitcntBitMask(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002040 SMLoc S = Parser.getTok().getLoc();
2041
2042 switch(getLexer().getKind()) {
2043 default: return MatchOperand_ParseFail;
2044 case AsmToken::Integer:
2045 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002046 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002047 return MatchOperand_ParseFail;
2048 break;
2049
2050 case AsmToken::Identifier:
2051 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002052 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002053 return MatchOperand_ParseFail;
2054 } while(getLexer().isNot(AsmToken::EndOfStatement));
2055 break;
2056 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002057 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002058 return MatchOperand_Success;
2059}
2060
Artem Tamazov6edc1352016-05-26 17:00:33 +00002061bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
2062 using namespace llvm::AMDGPU::Hwreg;
2063
Artem Tamazovd6468662016-04-25 14:13:51 +00002064 if (Parser.getTok().getString() != "hwreg")
2065 return true;
2066 Parser.Lex();
2067
2068 if (getLexer().isNot(AsmToken::LParen))
2069 return true;
2070 Parser.Lex();
2071
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002072 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002073 HwReg.IsSymbolic = true;
2074 HwReg.Id = ID_UNKNOWN_;
2075 const StringRef tok = Parser.getTok().getString();
2076 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2077 if (tok == IdSymbolic[i]) {
2078 HwReg.Id = i;
2079 break;
2080 }
2081 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002082 Parser.Lex();
2083 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002084 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002085 if (getLexer().isNot(AsmToken::Integer))
2086 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002087 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002088 return true;
2089 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002090
2091 if (getLexer().is(AsmToken::RParen)) {
2092 Parser.Lex();
2093 return false;
2094 }
2095
2096 // optional params
2097 if (getLexer().isNot(AsmToken::Comma))
2098 return true;
2099 Parser.Lex();
2100
2101 if (getLexer().isNot(AsmToken::Integer))
2102 return true;
2103 if (getParser().parseAbsoluteExpression(Offset))
2104 return true;
2105
2106 if (getLexer().isNot(AsmToken::Comma))
2107 return true;
2108 Parser.Lex();
2109
2110 if (getLexer().isNot(AsmToken::Integer))
2111 return true;
2112 if (getParser().parseAbsoluteExpression(Width))
2113 return true;
2114
2115 if (getLexer().isNot(AsmToken::RParen))
2116 return true;
2117 Parser.Lex();
2118
2119 return false;
2120}
2121
Alex Bradbury58eba092016-11-01 16:32:05 +00002122OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002123AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002124 using namespace llvm::AMDGPU::Hwreg;
2125
Artem Tamazovd6468662016-04-25 14:13:51 +00002126 int64_t Imm16Val = 0;
2127 SMLoc S = Parser.getTok().getLoc();
2128
2129 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002130 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002131 case AsmToken::Integer:
2132 // The operand can be an integer value.
2133 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002134 return MatchOperand_NoMatch;
2135 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002136 Error(S, "invalid immediate: only 16-bit values are legal");
2137 // Do not return error code, but create an imm operand anyway and proceed
2138 // to the next operand, if any. That avoids unneccessary error messages.
2139 }
2140 break;
2141
2142 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002143 OperandInfoTy HwReg(ID_UNKNOWN_);
2144 int64_t Offset = OFFSET_DEFAULT_;
2145 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2146 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002147 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002148 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2149 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002150 Error(S, "invalid symbolic name of hardware register");
2151 else
2152 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002153 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002154 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002155 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002156 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002157 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002158 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002159 }
2160 break;
2161 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002162 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00002163 return MatchOperand_Success;
2164}
2165
Tom Stellard45bb48e2015-06-13 03:28:10 +00002166bool AMDGPUOperand::isSWaitCnt() const {
2167 return isImm();
2168}
2169
Artem Tamazovd6468662016-04-25 14:13:51 +00002170bool AMDGPUOperand::isHwreg() const {
2171 return isImmTy(ImmTyHwreg);
2172}
2173
Artem Tamazov6edc1352016-05-26 17:00:33 +00002174bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002175 using namespace llvm::AMDGPU::SendMsg;
2176
2177 if (Parser.getTok().getString() != "sendmsg")
2178 return true;
2179 Parser.Lex();
2180
2181 if (getLexer().isNot(AsmToken::LParen))
2182 return true;
2183 Parser.Lex();
2184
2185 if (getLexer().is(AsmToken::Identifier)) {
2186 Msg.IsSymbolic = true;
2187 Msg.Id = ID_UNKNOWN_;
2188 const std::string tok = Parser.getTok().getString();
2189 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
2190 switch(i) {
2191 default: continue; // Omit gaps.
2192 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
2193 }
2194 if (tok == IdSymbolic[i]) {
2195 Msg.Id = i;
2196 break;
2197 }
2198 }
2199 Parser.Lex();
2200 } else {
2201 Msg.IsSymbolic = false;
2202 if (getLexer().isNot(AsmToken::Integer))
2203 return true;
2204 if (getParser().parseAbsoluteExpression(Msg.Id))
2205 return true;
2206 if (getLexer().is(AsmToken::Integer))
2207 if (getParser().parseAbsoluteExpression(Msg.Id))
2208 Msg.Id = ID_UNKNOWN_;
2209 }
2210 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
2211 return false;
2212
2213 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
2214 if (getLexer().isNot(AsmToken::RParen))
2215 return true;
2216 Parser.Lex();
2217 return false;
2218 }
2219
2220 if (getLexer().isNot(AsmToken::Comma))
2221 return true;
2222 Parser.Lex();
2223
2224 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2225 Operation.Id = ID_UNKNOWN_;
2226 if (getLexer().is(AsmToken::Identifier)) {
2227 Operation.IsSymbolic = true;
2228 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2229 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2230 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002231 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002232 for (int i = F; i < L; ++i) {
2233 if (Tok == S[i]) {
2234 Operation.Id = i;
2235 break;
2236 }
2237 }
2238 Parser.Lex();
2239 } else {
2240 Operation.IsSymbolic = false;
2241 if (getLexer().isNot(AsmToken::Integer))
2242 return true;
2243 if (getParser().parseAbsoluteExpression(Operation.Id))
2244 return true;
2245 }
2246
2247 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2248 // Stream id is optional.
2249 if (getLexer().is(AsmToken::RParen)) {
2250 Parser.Lex();
2251 return false;
2252 }
2253
2254 if (getLexer().isNot(AsmToken::Comma))
2255 return true;
2256 Parser.Lex();
2257
2258 if (getLexer().isNot(AsmToken::Integer))
2259 return true;
2260 if (getParser().parseAbsoluteExpression(StreamId))
2261 return true;
2262 }
2263
2264 if (getLexer().isNot(AsmToken::RParen))
2265 return true;
2266 Parser.Lex();
2267 return false;
2268}
2269
Alex Bradbury58eba092016-11-01 16:32:05 +00002270OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002271AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2272 using namespace llvm::AMDGPU::SendMsg;
2273
2274 int64_t Imm16Val = 0;
2275 SMLoc S = Parser.getTok().getLoc();
2276
2277 switch(getLexer().getKind()) {
2278 default:
2279 return MatchOperand_NoMatch;
2280 case AsmToken::Integer:
2281 // The operand can be an integer value.
2282 if (getParser().parseAbsoluteExpression(Imm16Val))
2283 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002284 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002285 Error(S, "invalid immediate: only 16-bit values are legal");
2286 // Do not return error code, but create an imm operand anyway and proceed
2287 // to the next operand, if any. That avoids unneccessary error messages.
2288 }
2289 break;
2290 case AsmToken::Identifier: {
2291 OperandInfoTy Msg(ID_UNKNOWN_);
2292 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002293 int64_t StreamId = STREAM_ID_DEFAULT_;
2294 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2295 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002296 do {
2297 // Validate and encode message ID.
2298 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2299 || Msg.Id == ID_SYSMSG)) {
2300 if (Msg.IsSymbolic)
2301 Error(S, "invalid/unsupported symbolic name of message");
2302 else
2303 Error(S, "invalid/unsupported code of message");
2304 break;
2305 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002306 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002307 // Validate and encode operation ID.
2308 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2309 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2310 if (Operation.IsSymbolic)
2311 Error(S, "invalid symbolic name of GS_OP");
2312 else
2313 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2314 break;
2315 }
2316 if (Operation.Id == OP_GS_NOP
2317 && Msg.Id != ID_GS_DONE) {
2318 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2319 break;
2320 }
2321 Imm16Val |= (Operation.Id << OP_SHIFT_);
2322 }
2323 if (Msg.Id == ID_SYSMSG) {
2324 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2325 if (Operation.IsSymbolic)
2326 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2327 else
2328 Error(S, "invalid/unsupported code of SYSMSG_OP");
2329 break;
2330 }
2331 Imm16Val |= (Operation.Id << OP_SHIFT_);
2332 }
2333 // Validate and encode stream ID.
2334 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2335 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2336 Error(S, "invalid stream id: only 2-bit values are legal");
2337 break;
2338 }
2339 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2340 }
2341 } while (0);
2342 }
2343 break;
2344 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002345 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002346 return MatchOperand_Success;
2347}
2348
2349bool AMDGPUOperand::isSendMsg() const {
2350 return isImmTy(ImmTySendMsg);
2351}
2352
Tom Stellard45bb48e2015-06-13 03:28:10 +00002353//===----------------------------------------------------------------------===//
2354// sopp branch targets
2355//===----------------------------------------------------------------------===//
2356
Alex Bradbury58eba092016-11-01 16:32:05 +00002357OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002358AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2359 SMLoc S = Parser.getTok().getLoc();
2360
2361 switch (getLexer().getKind()) {
2362 default: return MatchOperand_ParseFail;
2363 case AsmToken::Integer: {
2364 int64_t Imm;
2365 if (getParser().parseAbsoluteExpression(Imm))
2366 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002367 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002368 return MatchOperand_Success;
2369 }
2370
2371 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002372 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00002373 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2374 Parser.getTok().getString()), getContext()), S));
2375 Parser.Lex();
2376 return MatchOperand_Success;
2377 }
2378}
2379
2380//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002381// mubuf
2382//===----------------------------------------------------------------------===//
2383
Sam Kolton5f10a132016-05-06 11:31:17 +00002384AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002385 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002386}
2387
2388AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002389 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002390}
2391
2392AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002393 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002394}
2395
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002396void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2397 const OperandVector &Operands,
2398 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002399 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002400 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002401
2402 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2403 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2404
2405 // Add the register arguments
2406 if (Op.isReg()) {
2407 Op.addRegOperands(Inst, 1);
2408 continue;
2409 }
2410
2411 // Handle the case where soffset is an immediate
2412 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2413 Op.addImmOperands(Inst, 1);
2414 continue;
2415 }
2416
2417 // Handle tokens like 'offen' which are sometimes hard-coded into the
2418 // asm string. There are no MCInst operands for these.
2419 if (Op.isToken()) {
2420 continue;
2421 }
2422 assert(Op.isImm());
2423
2424 // Handle optional arguments
2425 OptionalIdx[Op.getImmTy()] = i;
2426 }
2427
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002428 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2429 if (IsAtomicReturn) {
2430 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2431 Inst.insert(I, *I);
2432 }
2433
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002434 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002435 if (!IsAtomic) { // glc is hard-coded.
2436 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2437 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002438 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2439 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002440}
2441
2442//===----------------------------------------------------------------------===//
2443// mimg
2444//===----------------------------------------------------------------------===//
2445
Sam Kolton1bdcef72016-05-23 09:59:02 +00002446void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2447 unsigned I = 1;
2448 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2449 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2450 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2451 }
2452
2453 OptionalImmIndexMap OptionalIdx;
2454
2455 for (unsigned E = Operands.size(); I != E; ++I) {
2456 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2457
2458 // Add the register arguments
2459 if (Op.isRegOrImm()) {
2460 Op.addRegOrImmOperands(Inst, 1);
2461 continue;
2462 } else if (Op.isImmModifier()) {
2463 OptionalIdx[Op.getImmTy()] = I;
2464 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002465 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002466 }
2467 }
2468
2469 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2470 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2471 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2472 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2473 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2474 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2475 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2476 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2477}
2478
2479void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2480 unsigned I = 1;
2481 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2482 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2483 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2484 }
2485
2486 // Add src, same as dst
2487 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2488
2489 OptionalImmIndexMap OptionalIdx;
2490
2491 for (unsigned E = Operands.size(); I != E; ++I) {
2492 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2493
2494 // Add the register arguments
2495 if (Op.isRegOrImm()) {
2496 Op.addRegOrImmOperands(Inst, 1);
2497 continue;
2498 } else if (Op.isImmModifier()) {
2499 OptionalIdx[Op.getImmTy()] = I;
2500 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002501 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002502 }
2503 }
2504
2505 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2506 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2507 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2508 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2509 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2510 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2511 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2512 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2513}
2514
Sam Kolton5f10a132016-05-06 11:31:17 +00002515AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002516 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00002517}
2518
2519AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002520 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00002521}
2522
2523AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002524 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00002525}
2526
2527AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002528 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00002529}
2530
2531AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002532 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002533}
2534
Tom Stellard45bb48e2015-06-13 03:28:10 +00002535//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002536// smrd
2537//===----------------------------------------------------------------------===//
2538
Artem Tamazov54bfd542016-10-31 16:07:39 +00002539bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00002540 return isImm() && isUInt<8>(getImm());
2541}
2542
Artem Tamazov54bfd542016-10-31 16:07:39 +00002543bool AMDGPUOperand::isSMRDOffset20() const {
2544 return isImm() && isUInt<20>(getImm());
2545}
2546
Tom Stellard217361c2015-08-06 19:28:38 +00002547bool AMDGPUOperand::isSMRDLiteralOffset() const {
2548 // 32-bit literals are only supported on CI and we only want to use them
2549 // when the offset is > 8-bits.
2550 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2551}
2552
Artem Tamazov54bfd542016-10-31 16:07:39 +00002553AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
2554 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2555}
2556
2557AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002558 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002559}
2560
2561AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002562 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002563}
2564
Tom Stellard217361c2015-08-06 19:28:38 +00002565//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002566// vop3
2567//===----------------------------------------------------------------------===//
2568
2569static bool ConvertOmodMul(int64_t &Mul) {
2570 if (Mul != 1 && Mul != 2 && Mul != 4)
2571 return false;
2572
2573 Mul >>= 1;
2574 return true;
2575}
2576
2577static bool ConvertOmodDiv(int64_t &Div) {
2578 if (Div == 1) {
2579 Div = 0;
2580 return true;
2581 }
2582
2583 if (Div == 2) {
2584 Div = 3;
2585 return true;
2586 }
2587
2588 return false;
2589}
2590
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002591static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2592 if (BoundCtrl == 0) {
2593 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002594 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00002595 }
2596
2597 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002598 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002599 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002600 }
Matt Arsenault12c53892016-11-15 19:58:54 +00002601
Tom Stellard45bb48e2015-06-13 03:28:10 +00002602 return false;
2603}
2604
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002605// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002606static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2607 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2608 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2609 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2610 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2611 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2612 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2613 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2614 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2615 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2616 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2617 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2618 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2619 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2620 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2621 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2622 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2623 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2624 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2625 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2626 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00002627 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2628 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2629 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00002630 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002631};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002632
Alex Bradbury58eba092016-11-01 16:32:05 +00002633OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00002634 OperandMatchResultTy res;
2635 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2636 // try to parse any optional operand here
2637 if (Op.IsBit) {
2638 res = parseNamedBit(Op.Name, Operands, Op.Type);
2639 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2640 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002641 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2642 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2643 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2644 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00002645 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2646 res = parseSDWADstUnused(Operands);
2647 } else {
2648 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2649 }
2650 if (res != MatchOperand_NoMatch) {
2651 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002652 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002653 }
2654 return MatchOperand_NoMatch;
2655}
2656
Matt Arsenault12c53892016-11-15 19:58:54 +00002657OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002658 StringRef Name = Parser.getTok().getString();
2659 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00002660 return parseIntWithPrefix("mul", Operands,
2661 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002662 }
Matt Arsenault12c53892016-11-15 19:58:54 +00002663
2664 if (Name == "div") {
2665 return parseIntWithPrefix("div", Operands,
2666 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
2667 }
2668
2669 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002670}
2671
Tom Stellarda90b9522016-02-11 03:28:15 +00002672void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2673 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002674 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002675 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002676 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2677 }
2678 for (unsigned E = Operands.size(); I != E; ++I)
2679 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2680}
2681
2682void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002683 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2684 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002685 cvtVOP3(Inst, Operands);
2686 } else {
2687 cvtId(Inst, Operands);
2688 }
2689}
2690
Sam Koltona3ec5c12016-10-07 14:46:06 +00002691static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
2692 // 1. This operand is input modifiers
2693 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
2694 // 2. This is not last operand
2695 && Desc.NumOperands > (OpNum + 1)
2696 // 3. Next operand is register class
2697 && Desc.OpInfo[OpNum + 1].RegClass != -1
2698 // 4. Next register is not tied to any other operand
2699 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
2700}
2701
Tom Stellarda90b9522016-02-11 03:28:15 +00002702void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002703 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002704 unsigned I = 1;
2705 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002706 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002707 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002708 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002709
Tom Stellarda90b9522016-02-11 03:28:15 +00002710 for (unsigned E = Operands.size(); I != E; ++I) {
2711 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002712 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00002713 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002714 } else if (Op.isImm()) {
2715 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002716 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002717 llvm_unreachable("unhandled operand type");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002718 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002719 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002720
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002721 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2722 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002723
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002724 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00002725 // it has src2 register operand that is tied to dst operand
2726 // we don't allow modifiers for this operand in assembler so src2_modifiers
2727 // should be 0
2728 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002729 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
2730 Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002731 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002732 std::advance(
2733 it,
2734 AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
2735 AMDGPU::V_MAC_F16_e64 :
2736 AMDGPU::V_MAC_F32_e64,
2737 AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00002738 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
2739 ++it;
2740 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
2741 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002742}
2743
Sam Koltondfa29f72016-03-09 12:29:31 +00002744//===----------------------------------------------------------------------===//
2745// dpp
2746//===----------------------------------------------------------------------===//
2747
2748bool AMDGPUOperand::isDPPCtrl() const {
2749 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2750 if (result) {
2751 int64_t Imm = getImm();
2752 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2753 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2754 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2755 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2756 (Imm == 0x130) ||
2757 (Imm == 0x134) ||
2758 (Imm == 0x138) ||
2759 (Imm == 0x13c) ||
2760 (Imm == 0x140) ||
2761 (Imm == 0x141) ||
2762 (Imm == 0x142) ||
2763 (Imm == 0x143);
2764 }
2765 return false;
2766}
2767
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00002768bool AMDGPUOperand::isGPRIdxMode() const {
2769 return isImm() && isUInt<4>(getImm());
2770}
2771
Alex Bradbury58eba092016-11-01 16:32:05 +00002772OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002773AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002774 SMLoc S = Parser.getTok().getLoc();
2775 StringRef Prefix;
2776 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002777
Sam Koltona74cd522016-03-18 15:35:51 +00002778 if (getLexer().getKind() == AsmToken::Identifier) {
2779 Prefix = Parser.getTok().getString();
2780 } else {
2781 return MatchOperand_NoMatch;
2782 }
2783
2784 if (Prefix == "row_mirror") {
2785 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002786 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002787 } else if (Prefix == "row_half_mirror") {
2788 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002789 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002790 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002791 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2792 if (Prefix != "quad_perm"
2793 && Prefix != "row_shl"
2794 && Prefix != "row_shr"
2795 && Prefix != "row_ror"
2796 && Prefix != "wave_shl"
2797 && Prefix != "wave_rol"
2798 && Prefix != "wave_shr"
2799 && Prefix != "wave_ror"
2800 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002801 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002802 }
2803
Sam Koltona74cd522016-03-18 15:35:51 +00002804 Parser.Lex();
2805 if (getLexer().isNot(AsmToken::Colon))
2806 return MatchOperand_ParseFail;
2807
2808 if (Prefix == "quad_perm") {
2809 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002810 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002811 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002812 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002813 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00002814
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002815 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00002816 return MatchOperand_ParseFail;
2817
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002818 for (int i = 0; i < 3; ++i) {
2819 if (getLexer().isNot(AsmToken::Comma))
2820 return MatchOperand_ParseFail;
2821 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00002822
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002823 int64_t Temp;
2824 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
2825 return MatchOperand_ParseFail;
2826 const int shift = i*2 + 2;
2827 Int += (Temp << shift);
2828 }
Sam Koltona74cd522016-03-18 15:35:51 +00002829
Sam Koltona74cd522016-03-18 15:35:51 +00002830 if (getLexer().isNot(AsmToken::RBrac))
2831 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002832 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002833
2834 } else {
2835 // sel:%d
2836 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002837 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00002838 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002839
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002840 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002841 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002842 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002843 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002844 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002845 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002846 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002847 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002848 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002849 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002850 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002851 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002852 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002853 Int = 0x13C;
2854 } else if (Prefix == "row_bcast") {
2855 if (Int == 15) {
2856 Int = 0x142;
2857 } else if (Int == 31) {
2858 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00002859 } else {
2860 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002861 }
2862 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002863 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002864 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002865 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002866 }
Sam Koltona74cd522016-03-18 15:35:51 +00002867
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002868 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00002869 return MatchOperand_Success;
2870}
2871
Sam Kolton5f10a132016-05-06 11:31:17 +00002872AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002873 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002874}
2875
Sam Kolton5f10a132016-05-06 11:31:17 +00002876AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002877 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002878}
2879
Sam Kolton5f10a132016-05-06 11:31:17 +00002880AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002881 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00002882}
2883
2884void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002885 OptionalImmIndexMap OptionalIdx;
2886
2887 unsigned I = 1;
2888 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2889 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2890 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2891 }
2892
2893 for (unsigned E = Operands.size(); I != E; ++I) {
2894 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2895 // Add the register arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00002896 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00002897 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002898 } else if (Op.isDPPCtrl()) {
2899 Op.addImmOperands(Inst, 1);
2900 } else if (Op.isImm()) {
2901 // Handle optional arguments
2902 OptionalIdx[Op.getImmTy()] = I;
2903 } else {
2904 llvm_unreachable("Invalid operand type");
2905 }
2906 }
2907
Sam Koltondfa29f72016-03-09 12:29:31 +00002908 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2909 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2910 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002911
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002912 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00002913 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002914 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
2915 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002916 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002917 std::advance(
2918 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00002919 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
2920 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002921}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002922
Sam Kolton3025e7f2016-04-26 13:33:56 +00002923//===----------------------------------------------------------------------===//
2924// sdwa
2925//===----------------------------------------------------------------------===//
2926
Alex Bradbury58eba092016-11-01 16:32:05 +00002927OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002928AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
2929 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002930 using namespace llvm::AMDGPU::SDWA;
2931
Sam Kolton3025e7f2016-04-26 13:33:56 +00002932 SMLoc S = Parser.getTok().getLoc();
2933 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00002934 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002935
Sam Kolton05ef1c92016-06-03 10:27:37 +00002936 res = parseStringWithPrefix(Prefix, Value);
2937 if (res != MatchOperand_Success) {
2938 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00002939 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002940
Sam Kolton3025e7f2016-04-26 13:33:56 +00002941 int64_t Int;
2942 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00002943 .Case("BYTE_0", SdwaSel::BYTE_0)
2944 .Case("BYTE_1", SdwaSel::BYTE_1)
2945 .Case("BYTE_2", SdwaSel::BYTE_2)
2946 .Case("BYTE_3", SdwaSel::BYTE_3)
2947 .Case("WORD_0", SdwaSel::WORD_0)
2948 .Case("WORD_1", SdwaSel::WORD_1)
2949 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00002950 .Default(0xffffffff);
2951 Parser.Lex(); // eat last token
2952
2953 if (Int == 0xffffffff) {
2954 return MatchOperand_ParseFail;
2955 }
2956
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002957 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002958 return MatchOperand_Success;
2959}
2960
Alex Bradbury58eba092016-11-01 16:32:05 +00002961OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00002962AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002963 using namespace llvm::AMDGPU::SDWA;
2964
Sam Kolton3025e7f2016-04-26 13:33:56 +00002965 SMLoc S = Parser.getTok().getLoc();
2966 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00002967 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00002968
2969 res = parseStringWithPrefix("dst_unused", Value);
2970 if (res != MatchOperand_Success) {
2971 return res;
2972 }
2973
2974 int64_t Int;
2975 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00002976 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
2977 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
2978 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00002979 .Default(0xffffffff);
2980 Parser.Lex(); // eat last token
2981
2982 if (Int == 0xffffffff) {
2983 return MatchOperand_ParseFail;
2984 }
2985
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002986 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002987 return MatchOperand_Success;
2988}
2989
Sam Kolton945231a2016-06-10 09:57:59 +00002990void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002991 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002992}
2993
Sam Kolton945231a2016-06-10 09:57:59 +00002994void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002995 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
2996}
2997
2998void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
2999 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003000}
3001
3002void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00003003 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003004 OptionalImmIndexMap OptionalIdx;
3005
3006 unsigned I = 1;
3007 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3008 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3009 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3010 }
3011
3012 for (unsigned E = Operands.size(); I != E; ++I) {
3013 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3014 // Add the register arguments
Sam Kolton5196b882016-07-01 09:59:21 +00003015 if (BasicInstType == SIInstrFlags::VOPC &&
3016 Op.isReg() &&
3017 Op.Reg.RegNo == AMDGPU::VCC) {
3018 // VOPC sdwa use "vcc" token as dst. Skip it.
3019 continue;
Sam Koltona3ec5c12016-10-07 14:46:06 +00003020 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003021 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003022 } else if (Op.isImm()) {
3023 // Handle optional arguments
3024 OptionalIdx[Op.getImmTy()] = I;
3025 } else {
3026 llvm_unreachable("Invalid operand type");
3027 }
3028 }
3029
Sam Kolton945231a2016-06-10 09:57:59 +00003030 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003031
Sam Koltona3ec5c12016-10-07 14:46:06 +00003032 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003033 // V_NOP_sdwa has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003034 switch (BasicInstType) {
3035 case SIInstrFlags::VOP1: {
3036 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3037 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3038 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3039 break;
3040 }
3041 case SIInstrFlags::VOP2: {
3042 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3043 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3044 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3045 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3046 break;
3047 }
3048 case SIInstrFlags::VOPC: {
3049 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3050 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3051 break;
3052 }
3053 default:
3054 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
3055 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00003056 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003057
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003058 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003059 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003060 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa ||
3061 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003062 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003063 std::advance(
3064 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003065 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00003066 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00003067
Sam Kolton05ef1c92016-06-03 10:27:37 +00003068}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003069
Tom Stellard45bb48e2015-06-13 03:28:10 +00003070/// Force static initialization.
3071extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00003072 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
3073 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003074}
3075
3076#define GET_REGISTER_MATCHER
3077#define GET_MATCHER_IMPLEMENTATION
3078#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00003079
3080
3081// This fuction should be defined after auto-generated include so that we have
3082// MatchClassKind enum defined
3083unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
3084 unsigned Kind) {
3085 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00003086 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00003087 // operand. This method checks if we are given immediate operand but expect to
3088 // get corresponding token.
3089 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
3090 switch (Kind) {
3091 case MCK_addr64:
3092 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
3093 case MCK_gds:
3094 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
3095 case MCK_glc:
3096 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
3097 case MCK_idxen:
3098 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
3099 case MCK_offen:
3100 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003101 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00003102 // When operands have expression values, they will return true for isToken,
3103 // because it is not possible to distinguish between a token and an
3104 // expression at parse time. MatchInstructionImpl() will always try to
3105 // match an operand as a token, when isToken returns true, and when the
3106 // name of the expression is not a valid token, the match will fail,
3107 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003108 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
3109 case MCK_SSrcF32:
3110 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00003111 case MCK_SoppBrTarget:
3112 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00003113 default: return Match_InvalidOperand;
3114 }
3115}