blob: 5a9695f0d570dbe42c285b34eb293dc8ca9f14bd [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000023#include "llvm/CodeGen/MachineValueType.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/MC/MCContext.h"
25#include "llvm/MC/MCExpr.h"
26#include "llvm/MC/MCInst.h"
27#include "llvm/MC/MCInstrInfo.h"
28#include "llvm/MC/MCParser/MCAsmLexer.h"
29#include "llvm/MC/MCParser/MCAsmParser.h"
30#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000031#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCRegisterInfo.h"
33#include "llvm/MC/MCStreamer.h"
34#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000036#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000037#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/Support/SourceMgr.h"
39#include "llvm/Support/TargetRegistry.h"
40#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000041#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000042
Tom Stellard45bb48e2015-06-13 03:28:10 +000043using namespace llvm;
44
45namespace {
46
Sam Kolton1eeb11b2016-09-09 14:44:04 +000047class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000048struct OptionalOperand;
49
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000050enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
51
Sam Kolton1eeb11b2016-09-09 14:44:04 +000052//===----------------------------------------------------------------------===//
53// Operand
54//===----------------------------------------------------------------------===//
55
Tom Stellard45bb48e2015-06-13 03:28:10 +000056class AMDGPUOperand : public MCParsedAsmOperand {
57 enum KindTy {
58 Token,
59 Immediate,
60 Register,
61 Expression
62 } Kind;
63
64 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000065 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000066
67public:
Sam Kolton1eeb11b2016-09-09 14:44:04 +000068 AMDGPUOperand(enum KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
69 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000070
Sam Kolton5f10a132016-05-06 11:31:17 +000071 typedef std::unique_ptr<AMDGPUOperand> Ptr;
72
Sam Kolton945231a2016-06-10 09:57:59 +000073 struct Modifiers {
74 bool Abs;
75 bool Neg;
76 bool Sext;
77
78 bool hasFPModifiers() const { return Abs || Neg; }
79 bool hasIntModifiers() const { return Sext; }
80 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
81
82 int64_t getFPModifiersOperand() const {
83 int64_t Operand = 0;
84 Operand |= Abs ? SISrcMods::ABS : 0;
85 Operand |= Neg ? SISrcMods::NEG : 0;
86 return Operand;
87 }
88
89 int64_t getIntModifiersOperand() const {
90 int64_t Operand = 0;
91 Operand |= Sext ? SISrcMods::SEXT : 0;
92 return Operand;
93 }
94
95 int64_t getModifiersOperand() const {
96 assert(!(hasFPModifiers() && hasIntModifiers())
97 && "fp and int modifiers should not be used simultaneously");
98 if (hasFPModifiers()) {
99 return getFPModifiersOperand();
100 } else if (hasIntModifiers()) {
101 return getIntModifiersOperand();
102 } else {
103 return 0;
104 }
105 }
106
107 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
108 };
109
Tom Stellard45bb48e2015-06-13 03:28:10 +0000110 enum ImmTy {
111 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000112 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000113 ImmTyOffen,
114 ImmTyIdxen,
115 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000116 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000117 ImmTyOffset0,
118 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000119 ImmTyGLC,
120 ImmTySLC,
121 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000122 ImmTyClampSI,
123 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000124 ImmTyDppCtrl,
125 ImmTyDppRowMask,
126 ImmTyDppBankMask,
127 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000128 ImmTySdwaDstSel,
129 ImmTySdwaSrc0Sel,
130 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000131 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000132 ImmTyDMask,
133 ImmTyUNorm,
134 ImmTyDA,
135 ImmTyR128,
136 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000137 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000138 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 };
140
141 struct TokOp {
142 const char *Data;
143 unsigned Length;
144 };
145
146 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000147 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000148 ImmTy Type;
149 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000150 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000151 };
152
153 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000154 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000155 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000156 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000157 };
158
159 union {
160 TokOp Tok;
161 ImmOp Imm;
162 RegOp Reg;
163 const MCExpr *Expr;
164 };
165
Tom Stellard45bb48e2015-06-13 03:28:10 +0000166 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000167 if (Kind == Token)
168 return true;
169
170 if (Kind != Expression || !Expr)
171 return false;
172
173 // When parsing operands, we can't always tell if something was meant to be
174 // a token, like 'gds', or an expression that references a global variable.
175 // In this case, we assume the string is an expression, and if we need to
176 // interpret is a token, then we treat the symbol name as the token.
177 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000178 }
179
180 bool isImm() const override {
181 return Kind == Immediate;
182 }
183
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000184 bool isInlinableImm(MVT type) const;
185 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000186
Tom Stellard45bb48e2015-06-13 03:28:10 +0000187 bool isRegKind() const {
188 return Kind == Register;
189 }
190
191 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000192 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000193 }
194
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000195 bool isRegOrImmWithInputMods(MVT type) const {
196 return isRegKind() || isInlinableImm(type);
197 }
198
199 bool isRegOrImmWithInt32InputMods() const {
200 return isRegOrImmWithInputMods(MVT::i32);
201 }
202
203 bool isRegOrImmWithInt64InputMods() const {
204 return isRegOrImmWithInputMods(MVT::i64);
205 }
206
207 bool isRegOrImmWithFP32InputMods() const {
208 return isRegOrImmWithInputMods(MVT::f32);
209 }
210
211 bool isRegOrImmWithFP64InputMods() const {
212 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000213 }
214
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000215 bool isImmTy(ImmTy ImmT) const {
216 return isImm() && Imm.Type == ImmT;
217 }
Sam Kolton945231a2016-06-10 09:57:59 +0000218
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000219 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000220 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000221 }
Sam Kolton945231a2016-06-10 09:57:59 +0000222
223 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
224 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
225 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000226 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
227 bool isDA() const { return isImmTy(ImmTyDA); }
228 bool isR128() const { return isImmTy(ImmTyUNorm); }
229 bool isLWE() const { return isImmTy(ImmTyLWE); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000230 bool isOffen() const { return isImmTy(ImmTyOffen); }
231 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
232 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
233 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
234 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
235 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000236 bool isGDS() const { return isImmTy(ImmTyGDS); }
237 bool isGLC() const { return isImmTy(ImmTyGLC); }
238 bool isSLC() const { return isImmTy(ImmTySLC); }
239 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000240 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
241 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
242 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
243 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
244 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
245 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
246 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
247
248 bool isMod() const {
249 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000250 }
251
252 bool isRegOrImm() const {
253 return isReg() || isImm();
254 }
255
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000256 bool isRegClass(unsigned RCID) const;
257
258 bool isSCSrcB32() const {
259 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000260 }
261
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000262 bool isSCSrcB64() const {
263 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000264 }
265
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000266 bool isSCSrcF32() const {
267 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000268 }
269
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000270 bool isSCSrcF64() const {
271 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000272 }
273
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000274 bool isSSrcB32() const {
275 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
276 }
277
278 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000279 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
280 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000281 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000282 }
283
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000284 bool isSSrcF32() const {
285 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000286 }
287
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000288 bool isSSrcF64() const {
289 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000290 }
291
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000292 bool isVCSrcB32() const {
293 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000294 }
295
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000296 bool isVCSrcB64() const {
297 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64);
298 }
299
300 bool isVCSrcF32() const {
301 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32);
302 }
303
304 bool isVCSrcF64() const {
305 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64);
306 }
307
308 bool isVSrcB32() const {
309 return isVCSrcF32() || isLiteralImm(MVT::i32);
310 }
311
312 bool isVSrcB64() const {
313 return isVCSrcF64() || isLiteralImm(MVT::i64);
314 }
315
316 bool isVSrcF32() const {
317 return isVCSrcF32() || isLiteralImm(MVT::f32);
318 }
319
320 bool isVSrcF64() const {
321 return isVCSrcF64() || isLiteralImm(MVT::f64);
322 }
323
324 bool isKImmFP32() const {
325 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000326 }
327
328 bool isMem() const override {
329 return false;
330 }
331
332 bool isExpr() const {
333 return Kind == Expression;
334 }
335
336 bool isSoppBrTarget() const {
337 return isExpr() || isImm();
338 }
339
Sam Kolton945231a2016-06-10 09:57:59 +0000340 bool isSWaitCnt() const;
341 bool isHwreg() const;
342 bool isSendMsg() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000343 bool isSMRDOffset() const;
344 bool isSMRDLiteralOffset() const;
345 bool isDPPCtrl() const;
346
Tom Stellard89049702016-06-15 02:54:14 +0000347 StringRef getExpressionAsToken() const {
348 assert(isExpr());
349 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
350 return S->getSymbol().getName();
351 }
352
353
Sam Kolton945231a2016-06-10 09:57:59 +0000354 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000355 assert(isToken());
356
357 if (Kind == Expression)
358 return getExpressionAsToken();
359
Sam Kolton945231a2016-06-10 09:57:59 +0000360 return StringRef(Tok.Data, Tok.Length);
361 }
362
363 int64_t getImm() const {
364 assert(isImm());
365 return Imm.Val;
366 }
367
368 enum ImmTy getImmTy() const {
369 assert(isImm());
370 return Imm.Type;
371 }
372
373 unsigned getReg() const override {
374 return Reg.RegNo;
375 }
376
Tom Stellard45bb48e2015-06-13 03:28:10 +0000377 SMLoc getStartLoc() const override {
378 return StartLoc;
379 }
380
381 SMLoc getEndLoc() const override {
382 return EndLoc;
383 }
384
Sam Kolton945231a2016-06-10 09:57:59 +0000385 Modifiers getModifiers() const {
386 assert(isRegKind() || isImmTy(ImmTyNone));
387 return isRegKind() ? Reg.Mods : Imm.Mods;
388 }
389
390 void setModifiers(Modifiers Mods) {
391 assert(isRegKind() || isImmTy(ImmTyNone));
392 if (isRegKind())
393 Reg.Mods = Mods;
394 else
395 Imm.Mods = Mods;
396 }
397
398 bool hasModifiers() const {
399 return getModifiers().hasModifiers();
400 }
401
402 bool hasFPModifiers() const {
403 return getModifiers().hasFPModifiers();
404 }
405
406 bool hasIntModifiers() const {
407 return getModifiers().hasIntModifiers();
408 }
409
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000410 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000411
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000412 void addLiteralImmOperand(MCInst &Inst, int64_t Val) const;
413
414 void addKImmFP32Operands(MCInst &Inst, unsigned N) const;
415
416 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000417
418 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
419 if (isRegKind())
420 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000421 else if (isExpr())
422 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000423 else
424 addImmOperands(Inst, N);
425 }
426
427 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
428 Modifiers Mods = getModifiers();
429 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
430 if (isRegKind()) {
431 addRegOperands(Inst, N);
432 } else {
433 addImmOperands(Inst, N, false);
434 }
435 }
436
437 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
438 assert(!hasIntModifiers());
439 addRegOrImmWithInputModsOperands(Inst, N);
440 }
441
442 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
443 assert(!hasFPModifiers());
444 addRegOrImmWithInputModsOperands(Inst, N);
445 }
446
447 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
448 if (isImm())
449 addImmOperands(Inst, N);
450 else {
451 assert(isExpr());
452 Inst.addOperand(MCOperand::createExpr(Expr));
453 }
454 }
455
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000456 void printImmTy(raw_ostream& OS, ImmTy Type) const {
457 switch (Type) {
458 case ImmTyNone: OS << "None"; break;
459 case ImmTyGDS: OS << "GDS"; break;
460 case ImmTyOffen: OS << "Offen"; break;
461 case ImmTyIdxen: OS << "Idxen"; break;
462 case ImmTyAddr64: OS << "Addr64"; break;
463 case ImmTyOffset: OS << "Offset"; break;
464 case ImmTyOffset0: OS << "Offset0"; break;
465 case ImmTyOffset1: OS << "Offset1"; break;
466 case ImmTyGLC: OS << "GLC"; break;
467 case ImmTySLC: OS << "SLC"; break;
468 case ImmTyTFE: OS << "TFE"; break;
469 case ImmTyClampSI: OS << "ClampSI"; break;
470 case ImmTyOModSI: OS << "OModSI"; break;
471 case ImmTyDppCtrl: OS << "DppCtrl"; break;
472 case ImmTyDppRowMask: OS << "DppRowMask"; break;
473 case ImmTyDppBankMask: OS << "DppBankMask"; break;
474 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000475 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
476 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
477 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000478 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
479 case ImmTyDMask: OS << "DMask"; break;
480 case ImmTyUNorm: OS << "UNorm"; break;
481 case ImmTyDA: OS << "DA"; break;
482 case ImmTyR128: OS << "R128"; break;
483 case ImmTyLWE: OS << "LWE"; break;
484 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000485 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000486 }
487 }
488
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000489 void print(raw_ostream &OS) const override {
490 switch (Kind) {
491 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000492 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000493 break;
494 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000495 OS << '<' << getImm();
496 if (getImmTy() != ImmTyNone) {
497 OS << " type: "; printImmTy(OS, getImmTy());
498 }
Sam Kolton945231a2016-06-10 09:57:59 +0000499 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000500 break;
501 case Token:
502 OS << '\'' << getToken() << '\'';
503 break;
504 case Expression:
505 OS << "<expr " << *Expr << '>';
506 break;
507 }
508 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000509
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000510 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
511 int64_t Val, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000512 enum ImmTy Type = ImmTyNone,
513 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000514 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000515 Op->Imm.Val = Val;
516 Op->Imm.IsFPImm = IsFPImm;
517 Op->Imm.Type = Type;
Sam Kolton945231a2016-06-10 09:57:59 +0000518 Op->Imm.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000519 Op->StartLoc = Loc;
520 Op->EndLoc = Loc;
521 return Op;
522 }
523
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000524 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
525 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000526 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000527 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000528 Res->Tok.Data = Str.data();
529 Res->Tok.Length = Str.size();
530 Res->StartLoc = Loc;
531 Res->EndLoc = Loc;
532 return Res;
533 }
534
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000535 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
536 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000537 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000538 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000539 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540 Op->Reg.RegNo = RegNo;
Sam Kolton945231a2016-06-10 09:57:59 +0000541 Op->Reg.Mods = {false, false, false};
Tom Stellard45bb48e2015-06-13 03:28:10 +0000542 Op->Reg.IsForcedVOP3 = ForceVOP3;
543 Op->StartLoc = S;
544 Op->EndLoc = E;
545 return Op;
546 }
547
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000548 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
549 const class MCExpr *Expr, SMLoc S) {
550 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000551 Op->Expr = Expr;
552 Op->StartLoc = S;
553 Op->EndLoc = S;
554 return Op;
555 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000556};
557
Sam Kolton945231a2016-06-10 09:57:59 +0000558raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
559 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
560 return OS;
561}
562
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000563//===----------------------------------------------------------------------===//
564// AsmParser
565//===----------------------------------------------------------------------===//
566
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000568 const MCInstrInfo &MII;
569 MCAsmParser &Parser;
570
571 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000572 bool ForcedDPP;
573 bool ForcedSDWA;
Matt Arsenault68802d32015-11-05 03:11:27 +0000574
Tom Stellard45bb48e2015-06-13 03:28:10 +0000575 /// @name Auto-generated Match Functions
576 /// {
577
578#define GET_ASSEMBLER_HEADER
579#include "AMDGPUGenAsmMatcher.inc"
580
581 /// }
582
Tom Stellard347ac792015-06-26 21:15:07 +0000583private:
584 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
585 bool ParseDirectiveHSACodeObjectVersion();
586 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000587 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
588 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000589 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000590 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000591 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000592 bool ParseDirectiveAMDGPUHsaModuleGlobal();
593 bool ParseDirectiveAMDGPUHsaProgramGlobal();
594 bool ParseSectionDirectiveHSADataGlobalAgent();
595 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000596 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000597 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
598 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000599 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000600
Tom Stellard45bb48e2015-06-13 03:28:10 +0000601public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000602 enum AMDGPUMatchResultTy {
603 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
604 };
605
Akira Hatanakab11ef082015-11-14 06:35:56 +0000606 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000607 const MCInstrInfo &MII,
608 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000609 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000610 ForcedEncodingSize(0),
611 ForcedDPP(false),
612 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000613 MCAsmParserExtension::Initialize(Parser);
614
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000615 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000616 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000617 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000618 }
619
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000620 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000621
622 {
623 // TODO: make those pre-defined variables read-only.
624 // Currently there is none suitable machinery in the core llvm-mc for this.
625 // MCSymbol::isRedefinable is intended for another purpose, and
626 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
627 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
628 MCContext &Ctx = getContext();
629 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
630 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
631 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
632 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
633 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
634 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
635 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000636 }
637
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000638 bool isSI() const {
639 return AMDGPU::isSI(getSTI());
640 }
641
642 bool isCI() const {
643 return AMDGPU::isCI(getSTI());
644 }
645
646 bool isVI() const {
647 return AMDGPU::isVI(getSTI());
648 }
649
650 bool hasSGPR102_SGPR103() const {
651 return !isVI();
652 }
653
Tom Stellard347ac792015-06-26 21:15:07 +0000654 AMDGPUTargetStreamer &getTargetStreamer() {
655 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
656 return static_cast<AMDGPUTargetStreamer &>(TS);
657 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000658
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000659 const MCRegisterInfo *getMRI() const {
660 // We need this const_cast because for some reason getContext() is not const
661 // in MCAsmParser.
662 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
663 }
664
665 const MCInstrInfo *getMII() const {
666 return &MII;
667 }
668
Sam Kolton05ef1c92016-06-03 10:27:37 +0000669 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
670 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
671 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000672
Sam Kolton05ef1c92016-06-03 10:27:37 +0000673 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
674 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
675 bool isForcedDPP() const { return ForcedDPP; }
676 bool isForcedSDWA() const { return ForcedSDWA; }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000677
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000678 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000679 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
680 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000681 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
682 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000683 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
684 OperandVector &Operands, MCStreamer &Out,
685 uint64_t &ErrorInfo,
686 bool MatchingInlineAsm) override;
687 bool ParseDirective(AsmToken DirectiveID) override;
688 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000689 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000690 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
691 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000692 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693
Sam Kolton11de3702016-05-24 12:38:33 +0000694 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000695 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
696 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000697 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000698 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000699 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000700 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000701 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000702
Sam Kolton1bdcef72016-05-23 09:59:02 +0000703 OperandMatchResultTy parseImm(OperandVector &Operands);
704 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000705 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
706 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000707
Tom Stellard45bb48e2015-06-13 03:28:10 +0000708 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
709 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000710
711 bool parseCnt(int64_t &IntVal);
712 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000713 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000714
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000715private:
716 struct OperandInfoTy {
717 int64_t Id;
718 bool IsSymbolic;
719 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
720 };
Sam Kolton11de3702016-05-24 12:38:33 +0000721
Artem Tamazov6edc1352016-05-26 17:00:33 +0000722 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
723 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000724public:
Sam Kolton11de3702016-05-24 12:38:33 +0000725 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
726
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000727 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000728 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
729
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000730 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
731 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
732 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000733 AMDGPUOperand::Ptr defaultGLC() const;
734 AMDGPUOperand::Ptr defaultSLC() const;
735 AMDGPUOperand::Ptr defaultTFE() const;
736
Sam Kolton5f10a132016-05-06 11:31:17 +0000737 AMDGPUOperand::Ptr defaultDMask() const;
738 AMDGPUOperand::Ptr defaultUNorm() const;
739 AMDGPUOperand::Ptr defaultDA() const;
740 AMDGPUOperand::Ptr defaultR128() const;
741 AMDGPUOperand::Ptr defaultLWE() const;
742 AMDGPUOperand::Ptr defaultSMRDOffset() const;
743 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000744
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000745 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
746
Tom Stellarda90b9522016-02-11 03:28:15 +0000747 void cvtId(MCInst &Inst, const OperandVector &Operands);
748 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000749 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000750
751 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000752 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000753
Sam Kolton11de3702016-05-24 12:38:33 +0000754 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000755 AMDGPUOperand::Ptr defaultRowMask() const;
756 AMDGPUOperand::Ptr defaultBankMask() const;
757 AMDGPUOperand::Ptr defaultBoundCtrl() const;
758 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000759
Sam Kolton05ef1c92016-06-03 10:27:37 +0000760 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
761 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000762 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000763 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
764 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000765 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
766 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
767 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000768};
769
770struct OptionalOperand {
771 const char *Name;
772 AMDGPUOperand::ImmTy Type;
773 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000774 bool (*ConvertResult)(int64_t&);
775};
776
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000777}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000778
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000779//===----------------------------------------------------------------------===//
780// Operand
781//===----------------------------------------------------------------------===//
782
783bool AMDGPUOperand::isInlinableImm(MVT type) const {
784 if (!isImmTy(ImmTyNone)) {
785 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
786 return false;
787 }
788 // TODO: We should avoid using host float here. It would be better to
789 // check the float bit values which is what a few other places do.
790 // We've had bot failures before due to weird NaN support on mips hosts.
791
792 APInt Literal(64, Imm.Val);
793
794 if (Imm.IsFPImm) { // We got fp literal token
795 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
796 return AMDGPU::isInlinableLiteral64(Imm.Val, AsmParser->isVI());
797 } else { // Expected 32-bit operand
798 bool lost;
799 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
800 // Convert literal to single precision
801 APFloat::opStatus status = FPLiteral.convert(APFloat::IEEEsingle,
802 APFloat::rmNearestTiesToEven,
803 &lost);
804 // We allow precision lost but not overflow or underflow
805 if (status != APFloat::opOK &&
806 lost &&
807 ((status & APFloat::opOverflow) != 0 ||
808 (status & APFloat::opUnderflow) != 0)) {
809 return false;
810 }
811 // Check if single precision literal is inlinable
812 return AMDGPU::isInlinableLiteral32(
813 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
814 AsmParser->isVI());
815 }
816 } else { // We got int literal token
817 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
818 return AMDGPU::isInlinableLiteral64(Imm.Val, AsmParser->isVI());
819 } else { // Expected 32-bit operand
820 return AMDGPU::isInlinableLiteral32(
821 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
822 AsmParser->isVI());
823 }
824 }
825 return false;
826}
827
828bool AMDGPUOperand::isLiteralImm(MVT type) const {
829 // Check that this imediate can be added as literal
830 if (!isImmTy(ImmTyNone)) {
831 return false;
832 }
833
834 APInt Literal(64, Imm.Val);
835
836 if (Imm.IsFPImm) { // We got fp literal token
837 if (type == MVT::f64) { // Expected 64-bit fp operand
838 // We would set low 64-bits of literal to zeroes but we accept this literals
839 return true;
840 } else if (type == MVT::i64) { // Expected 64-bit int operand
841 // We don't allow fp literals in 64-bit integer instructions. It is
842 // unclear how we should encode them.
843 return false;
844 } else { // Expected 32-bit operand
845 bool lost;
846 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
847 // Convert literal to single precision
848 APFloat::opStatus status = FPLiteral.convert(APFloat::IEEEsingle,
849 APFloat::rmNearestTiesToEven,
850 &lost);
851 // We allow precision lost but not overflow or underflow
852 if (status != APFloat::opOK &&
853 lost &&
854 ((status & APFloat::opOverflow) != 0 ||
855 (status & APFloat::opUnderflow) != 0)) {
856 return false;
857 }
858 return true;
859 }
860 } else { // We got int literal token
861 APInt HiBits = Literal.getHiBits(32);
862 if (HiBits == 0xffffffff &&
863 (*Literal.getLoBits(32).getRawData() & 0x80000000) != 0) {
864 // If high 32 bits aren't zeroes then they all should be ones and 32nd
865 // bit should be set. So that this 64-bit literal is sign-extension of
866 // 32-bit value.
867 return true;
868 } else if (HiBits == 0) {
869 return true;
870 }
871 }
872 return false;
873}
874
875bool AMDGPUOperand::isRegClass(unsigned RCID) const {
876 return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
877}
878
879void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
880 int64_t Val = Imm.Val;
881 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers() && Imm.Mods.Neg) {
882 // Apply modifiers to immediate value. Only negate can get here
883 if (Imm.IsFPImm) {
884 APFloat F(BitsToDouble(Val));
885 F.changeSign();
886 Val = F.bitcastToAPInt().getZExtValue();
887 } else {
888 Val = -Val;
889 }
890 }
891
892 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()), Inst.getNumOperands())) {
893 addLiteralImmOperand(Inst, Val);
894 } else {
895 Inst.addOperand(MCOperand::createImm(Val));
896 }
897}
898
899void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val) const {
900 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
901 auto OpNum = Inst.getNumOperands();
902 // Check that this operand accepts literals
903 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
904
905 APInt Literal(64, Val);
906 auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), InstDesc, OpNum); // expected operand size
907
908 if (Imm.IsFPImm) { // We got fp literal token
909 if (OpSize == 8) { // Expected 64-bit operand
910 // Check if literal is inlinable
911 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(), AsmParser->isVI())) {
912 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
913 } else if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
914 // For fp operands we check if low 32 bits are zeros
915 if (Literal.getLoBits(32) != 0) {
916 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
917 "Can't encode literal as exact 64-bit"
918 " floating-point operand. Low 32-bits will be"
919 " set to zero");
920 }
921 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
922 } else {
923 // We don't allow fp literals in 64-bit integer instructions. It is
924 // unclear how we should encode them. This case should be checked earlier
925 // in predicate methods (isLiteralImm())
926 llvm_unreachable("fp literal in 64-bit integer instruction.");
927 }
928 } else { // Expected 32-bit operand
929 bool lost;
930 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
931 // Convert literal to single precision
932 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
933 // We allow precision lost but not overflow or underflow. This should be
934 // checked earlier in isLiteralImm()
935 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
936 }
937 } else { // We got int literal token
938 if (OpSize == 8) { // Expected 64-bit operand
939 auto LiteralVal = Literal.getZExtValue();
940 if (AMDGPU::isInlinableLiteral64(LiteralVal, AsmParser->isVI())) {
941 Inst.addOperand(MCOperand::createImm(LiteralVal));
942 return;
943 }
944 } else { // Expected 32-bit operand
945 auto LiteralVal = static_cast<int32_t>(Literal.getLoBits(32).getZExtValue());
946 if (AMDGPU::isInlinableLiteral32(LiteralVal, AsmParser->isVI())) {
947 Inst.addOperand(MCOperand::createImm(LiteralVal));
948 return;
949 }
950 }
951 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
952 }
953}
954
955void AMDGPUOperand::addKImmFP32Operands(MCInst &Inst, unsigned N) const {
956 APInt Literal(64, Imm.Val);
957 if (Imm.IsFPImm) { // We got fp literal
958 bool lost;
959 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
960 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
961 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
962 } else { // We got int literal token
963 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
964 }
965}
966
967void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
968 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
969}
970
971//===----------------------------------------------------------------------===//
972// AsmParser
973//===----------------------------------------------------------------------===//
974
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000975static int getRegClass(RegisterKind Is, unsigned RegWidth) {
976 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000977 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000978 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000979 case 1: return AMDGPU::VGPR_32RegClassID;
980 case 2: return AMDGPU::VReg_64RegClassID;
981 case 3: return AMDGPU::VReg_96RegClassID;
982 case 4: return AMDGPU::VReg_128RegClassID;
983 case 8: return AMDGPU::VReg_256RegClassID;
984 case 16: return AMDGPU::VReg_512RegClassID;
985 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000986 } else if (Is == IS_TTMP) {
987 switch (RegWidth) {
988 default: return -1;
989 case 1: return AMDGPU::TTMP_32RegClassID;
990 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000991 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000992 }
993 } else if (Is == IS_SGPR) {
994 switch (RegWidth) {
995 default: return -1;
996 case 1: return AMDGPU::SGPR_32RegClassID;
997 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000998 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000999 case 8: return AMDGPU::SReg_256RegClassID;
1000 case 16: return AMDGPU::SReg_512RegClassID;
1001 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001002 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001003 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001004}
1005
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001006static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001007 return StringSwitch<unsigned>(RegName)
1008 .Case("exec", AMDGPU::EXEC)
1009 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001010 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001011 .Case("m0", AMDGPU::M0)
1012 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001013 .Case("tba", AMDGPU::TBA)
1014 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001015 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1016 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001017 .Case("vcc_lo", AMDGPU::VCC_LO)
1018 .Case("vcc_hi", AMDGPU::VCC_HI)
1019 .Case("exec_lo", AMDGPU::EXEC_LO)
1020 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001021 .Case("tma_lo", AMDGPU::TMA_LO)
1022 .Case("tma_hi", AMDGPU::TMA_HI)
1023 .Case("tba_lo", AMDGPU::TBA_LO)
1024 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001025 .Default(0);
1026}
1027
1028bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001029 auto R = parseRegister();
1030 if (!R) return true;
1031 assert(R->isReg());
1032 RegNo = R->getReg();
1033 StartLoc = R->getStartLoc();
1034 EndLoc = R->getEndLoc();
1035 return false;
1036}
1037
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001038bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
1039{
1040 switch (RegKind) {
1041 case IS_SPECIAL:
1042 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
1043 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
1044 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
1045 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
1046 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
1047 return false;
1048 case IS_VGPR:
1049 case IS_SGPR:
1050 case IS_TTMP:
1051 if (Reg1 != Reg + RegWidth) { return false; }
1052 RegWidth++;
1053 return true;
1054 default:
1055 assert(false); return false;
1056 }
1057}
1058
1059bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
1060{
1061 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1062 if (getLexer().is(AsmToken::Identifier)) {
1063 StringRef RegName = Parser.getTok().getString();
1064 if ((Reg = getSpecialRegForName(RegName))) {
1065 Parser.Lex();
1066 RegKind = IS_SPECIAL;
1067 } else {
1068 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001069 if (RegName[0] == 'v') {
1070 RegNumIndex = 1;
1071 RegKind = IS_VGPR;
1072 } else if (RegName[0] == 's') {
1073 RegNumIndex = 1;
1074 RegKind = IS_SGPR;
1075 } else if (RegName.startswith("ttmp")) {
1076 RegNumIndex = strlen("ttmp");
1077 RegKind = IS_TTMP;
1078 } else {
1079 return false;
1080 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001081 if (RegName.size() > RegNumIndex) {
1082 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001083 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1084 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001085 Parser.Lex();
1086 RegWidth = 1;
1087 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001088 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001089 Parser.Lex();
1090 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001091 if (getLexer().isNot(AsmToken::LBrac))
1092 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001093 Parser.Lex();
1094
Artem Tamazovf88397c2016-06-03 14:41:17 +00001095 if (getParser().parseAbsoluteExpression(RegLo))
1096 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001097
Artem Tamazov7da9b822016-05-27 12:50:13 +00001098 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001099 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1100 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001101 Parser.Lex();
1102
Artem Tamazov7da9b822016-05-27 12:50:13 +00001103 if (isRBrace) {
1104 RegHi = RegLo;
1105 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001106 if (getParser().parseAbsoluteExpression(RegHi))
1107 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001108
Artem Tamazovf88397c2016-06-03 14:41:17 +00001109 if (getLexer().isNot(AsmToken::RBrac))
1110 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001111 Parser.Lex();
1112 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001113 RegNum = (unsigned) RegLo;
1114 RegWidth = (RegHi - RegLo) + 1;
1115 }
1116 }
1117 } else if (getLexer().is(AsmToken::LBrac)) {
1118 // List of consecutive registers: [s0,s1,s2,s3]
1119 Parser.Lex();
Artem Tamazovf88397c2016-06-03 14:41:17 +00001120 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
1121 return false;
1122 if (RegWidth != 1)
1123 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001124 RegisterKind RegKind1;
1125 unsigned Reg1, RegNum1, RegWidth1;
1126 do {
1127 if (getLexer().is(AsmToken::Comma)) {
1128 Parser.Lex();
1129 } else if (getLexer().is(AsmToken::RBrac)) {
1130 Parser.Lex();
1131 break;
1132 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001133 if (RegWidth1 != 1) {
1134 return false;
1135 }
1136 if (RegKind1 != RegKind) {
1137 return false;
1138 }
1139 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1140 return false;
1141 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001142 } else {
1143 return false;
1144 }
1145 } while (true);
1146 } else {
1147 return false;
1148 }
1149 switch (RegKind) {
1150 case IS_SPECIAL:
1151 RegNum = 0;
1152 RegWidth = 1;
1153 break;
1154 case IS_VGPR:
1155 case IS_SGPR:
1156 case IS_TTMP:
1157 {
1158 unsigned Size = 1;
1159 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1160 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
1161 Size = std::min(RegWidth, 4u);
1162 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001163 if (RegNum % Size != 0)
1164 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001165 RegNum = RegNum / Size;
1166 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001167 if (RCID == -1)
1168 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001169 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001170 if (RegNum >= RC.getNumRegs())
1171 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001172 Reg = RC.getRegister(RegNum);
1173 break;
1174 }
1175
1176 default:
1177 assert(false); return false;
1178 }
1179
Artem Tamazovf88397c2016-06-03 14:41:17 +00001180 if (!subtargetHasRegister(*TRI, Reg))
1181 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001182 return true;
1183}
1184
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001185std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001186 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001187 SMLoc StartLoc = Tok.getLoc();
1188 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001189 RegisterKind RegKind;
1190 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001191
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001192 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
1193 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001194 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001195 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001196}
1197
Sam Kolton1bdcef72016-05-23 09:59:02 +00001198AMDGPUAsmParser::OperandMatchResultTy
1199AMDGPUAsmParser::parseImm(OperandVector &Operands) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001200 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001201 bool Minus = false;
1202 if (getLexer().getKind() == AsmToken::Minus) {
1203 Minus = true;
1204 Parser.Lex();
1205 }
1206
1207 SMLoc S = Parser.getTok().getLoc();
1208 switch(getLexer().getKind()) {
1209 case AsmToken::Integer: {
1210 int64_t IntVal;
1211 if (getParser().parseAbsoluteExpression(IntVal))
1212 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001213 if (Minus)
1214 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001215 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001216 return MatchOperand_Success;
1217 }
1218 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001219 int64_t IntVal;
1220 if (getParser().parseAbsoluteExpression(IntVal))
1221 return MatchOperand_ParseFail;
1222
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001223 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001224 if (Minus)
1225 F.changeSign();
1226 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001227 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001228 AMDGPUOperand::ImmTyNone, true));
1229 return MatchOperand_Success;
1230 }
1231 default:
1232 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1233 }
1234}
1235
1236AMDGPUAsmParser::OperandMatchResultTy
1237AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1238 auto res = parseImm(Operands);
1239 if (res != MatchOperand_NoMatch) {
1240 return res;
1241 }
1242
1243 if (auto R = parseRegister()) {
1244 assert(R->isReg());
1245 R->Reg.IsForcedVOP3 = isForcedVOP3();
1246 Operands.push_back(std::move(R));
1247 return MatchOperand_Success;
1248 }
1249 return MatchOperand_ParseFail;
1250}
1251
1252AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001253AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001254 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001255 // negate-modifier or negative immediate value.
1256 // By default we suppose it is modifier.
1257 bool Negate = false, Abs = false, Abs2 = false;
1258
1259 if (getLexer().getKind()== AsmToken::Minus) {
1260 Parser.Lex();
1261 Negate = true;
1262 }
1263
1264 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1265 Parser.Lex();
1266 Abs2 = true;
1267 if (getLexer().isNot(AsmToken::LParen)) {
1268 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1269 return MatchOperand_ParseFail;
1270 }
1271 Parser.Lex();
1272 }
1273
1274 if (getLexer().getKind() == AsmToken::Pipe) {
1275 if (Abs2) {
1276 Error(Parser.getTok().getLoc(), "expected register or immediate");
1277 return MatchOperand_ParseFail;
1278 }
1279 Parser.Lex();
1280 Abs = true;
1281 }
1282
1283 auto Res = parseRegOrImm(Operands);
1284 if (Res != MatchOperand_Success) {
1285 return Res;
1286 }
1287
Sam Kolton945231a2016-06-10 09:57:59 +00001288 AMDGPUOperand::Modifiers Mods = {false, false, false};
Sam Kolton1bdcef72016-05-23 09:59:02 +00001289 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001290 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001291 }
1292 if (Abs) {
1293 if (getLexer().getKind() != AsmToken::Pipe) {
1294 Error(Parser.getTok().getLoc(), "expected vertical bar");
1295 return MatchOperand_ParseFail;
1296 }
1297 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001298 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001299 }
1300 if (Abs2) {
1301 if (getLexer().isNot(AsmToken::RParen)) {
1302 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1303 return MatchOperand_ParseFail;
1304 }
1305 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001306 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001307 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001308
Sam Kolton945231a2016-06-10 09:57:59 +00001309 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001310 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001311 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001312 }
1313 return MatchOperand_Success;
1314}
1315
Sam Kolton945231a2016-06-10 09:57:59 +00001316AMDGPUAsmParser::OperandMatchResultTy
1317AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1318 bool Sext = false;
1319
1320 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1321 Parser.Lex();
1322 Sext = true;
1323 if (getLexer().isNot(AsmToken::LParen)) {
1324 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1325 return MatchOperand_ParseFail;
1326 }
1327 Parser.Lex();
1328 }
1329
1330 auto Res = parseRegOrImm(Operands);
1331 if (Res != MatchOperand_Success) {
1332 return Res;
1333 }
1334
Sam Kolton945231a2016-06-10 09:57:59 +00001335 AMDGPUOperand::Modifiers Mods = {false, false, false};
1336 if (Sext) {
1337 if (getLexer().isNot(AsmToken::RParen)) {
1338 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1339 return MatchOperand_ParseFail;
1340 }
1341 Parser.Lex();
1342 Mods.Sext = true;
1343 }
1344
1345 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001346 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001347 Op.setModifiers(Mods);
1348 }
1349 return MatchOperand_Success;
1350}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001351
Tom Stellard45bb48e2015-06-13 03:28:10 +00001352unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1353
1354 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1355
1356 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001357 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1358 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1359 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001360 return Match_InvalidOperand;
1361
Tom Stellard88e0b252015-10-06 15:57:53 +00001362 if ((TSFlags & SIInstrFlags::VOP3) &&
1363 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1364 getForcedEncodingSize() != 64)
1365 return Match_PreferE32;
1366
Tom Stellard45bb48e2015-06-13 03:28:10 +00001367 return Match_Success;
1368}
1369
Tom Stellard45bb48e2015-06-13 03:28:10 +00001370bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1371 OperandVector &Operands,
1372 MCStreamer &Out,
1373 uint64_t &ErrorInfo,
1374 bool MatchingInlineAsm) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001375 // What asm variants we should check
1376 std::vector<unsigned> MatchedVariants;
1377 if (getForcedEncodingSize() == 32) {
1378 MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
1379 } else if (isForcedVOP3()) {
1380 MatchedVariants = {AMDGPUAsmVariants::VOP3};
1381 } else if (isForcedSDWA()) {
1382 MatchedVariants = {AMDGPUAsmVariants::SDWA};
1383 } else if (isForcedDPP()) {
1384 MatchedVariants = {AMDGPUAsmVariants::DPP};
1385 } else {
1386 MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
1387 AMDGPUAsmVariants::VOP3,
1388 AMDGPUAsmVariants::SDWA,
1389 AMDGPUAsmVariants::DPP};
1390 }
1391
Tom Stellard45bb48e2015-06-13 03:28:10 +00001392 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001393 unsigned Result = Match_Success;
1394 for (auto Variant : MatchedVariants) {
1395 uint64_t EI;
1396 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1397 Variant);
1398 // We order match statuses from least to most specific. We use most specific
1399 // status as resulting
1400 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1401 if ((R == Match_Success) ||
1402 (R == Match_PreferE32) ||
1403 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1404 (R == Match_InvalidOperand && Result != Match_MissingFeature
1405 && Result != Match_PreferE32) ||
1406 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1407 && Result != Match_MissingFeature
1408 && Result != Match_PreferE32)) {
1409 Result = R;
1410 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001411 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001412 if (R == Match_Success)
1413 break;
1414 }
1415
1416 switch (Result) {
1417 default: break;
1418 case Match_Success:
1419 Inst.setLoc(IDLoc);
1420 Out.EmitInstruction(Inst, getSTI());
1421 return false;
1422
1423 case Match_MissingFeature:
1424 return Error(IDLoc, "instruction not supported on this GPU");
1425
1426 case Match_MnemonicFail:
1427 return Error(IDLoc, "unrecognized instruction mnemonic");
1428
1429 case Match_InvalidOperand: {
1430 SMLoc ErrorLoc = IDLoc;
1431 if (ErrorInfo != ~0ULL) {
1432 if (ErrorInfo >= Operands.size()) {
1433 return Error(IDLoc, "too few operands for instruction");
1434 }
1435 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1436 if (ErrorLoc == SMLoc())
1437 ErrorLoc = IDLoc;
1438 }
1439 return Error(ErrorLoc, "invalid operand for instruction");
1440 }
1441
1442 case Match_PreferE32:
1443 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1444 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001445 }
1446 llvm_unreachable("Implement any new match types added!");
1447}
1448
Tom Stellard347ac792015-06-26 21:15:07 +00001449bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1450 uint32_t &Minor) {
1451 if (getLexer().isNot(AsmToken::Integer))
1452 return TokError("invalid major version");
1453
1454 Major = getLexer().getTok().getIntVal();
1455 Lex();
1456
1457 if (getLexer().isNot(AsmToken::Comma))
1458 return TokError("minor version number required, comma expected");
1459 Lex();
1460
1461 if (getLexer().isNot(AsmToken::Integer))
1462 return TokError("invalid minor version");
1463
1464 Minor = getLexer().getTok().getIntVal();
1465 Lex();
1466
1467 return false;
1468}
1469
1470bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1471
1472 uint32_t Major;
1473 uint32_t Minor;
1474
1475 if (ParseDirectiveMajorMinor(Major, Minor))
1476 return true;
1477
1478 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1479 return false;
1480}
1481
1482bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1483
1484 uint32_t Major;
1485 uint32_t Minor;
1486 uint32_t Stepping;
1487 StringRef VendorName;
1488 StringRef ArchName;
1489
1490 // If this directive has no arguments, then use the ISA version for the
1491 // targeted GPU.
1492 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001493 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001494 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1495 Isa.Stepping,
1496 "AMD", "AMDGPU");
1497 return false;
1498 }
1499
1500
1501 if (ParseDirectiveMajorMinor(Major, Minor))
1502 return true;
1503
1504 if (getLexer().isNot(AsmToken::Comma))
1505 return TokError("stepping version number required, comma expected");
1506 Lex();
1507
1508 if (getLexer().isNot(AsmToken::Integer))
1509 return TokError("invalid stepping version");
1510
1511 Stepping = getLexer().getTok().getIntVal();
1512 Lex();
1513
1514 if (getLexer().isNot(AsmToken::Comma))
1515 return TokError("vendor name required, comma expected");
1516 Lex();
1517
1518 if (getLexer().isNot(AsmToken::String))
1519 return TokError("invalid vendor name");
1520
1521 VendorName = getLexer().getTok().getStringContents();
1522 Lex();
1523
1524 if (getLexer().isNot(AsmToken::Comma))
1525 return TokError("arch name required, comma expected");
1526 Lex();
1527
1528 if (getLexer().isNot(AsmToken::String))
1529 return TokError("invalid arch name");
1530
1531 ArchName = getLexer().getTok().getStringContents();
1532 Lex();
1533
1534 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1535 VendorName, ArchName);
1536 return false;
1537}
1538
Tom Stellardff7416b2015-06-26 21:58:31 +00001539bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1540 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001541 SmallString<40> ErrStr;
1542 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001543 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001544 return TokError(Err.str());
1545 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001546 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001547 return false;
1548}
1549
1550bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1551
1552 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001553 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001554
1555 while (true) {
1556
Tom Stellardff7416b2015-06-26 21:58:31 +00001557 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1558 // will set the current token to EndOfStatement.
1559 while(getLexer().is(AsmToken::EndOfStatement))
1560 Lex();
1561
1562 if (getLexer().isNot(AsmToken::Identifier))
1563 return TokError("expected value identifier or .end_amd_kernel_code_t");
1564
1565 StringRef ID = getLexer().getTok().getIdentifier();
1566 Lex();
1567
1568 if (ID == ".end_amd_kernel_code_t")
1569 break;
1570
1571 if (ParseAMDKernelCodeTValue(ID, Header))
1572 return true;
1573 }
1574
1575 getTargetStreamer().EmitAMDKernelCodeT(Header);
1576
1577 return false;
1578}
1579
Tom Stellarde135ffd2015-09-25 21:41:28 +00001580bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1581 getParser().getStreamer().SwitchSection(
1582 AMDGPU::getHSATextSection(getContext()));
1583 return false;
1584}
1585
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001586bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1587 if (getLexer().isNot(AsmToken::Identifier))
1588 return TokError("expected symbol name");
1589
1590 StringRef KernelName = Parser.getTok().getString();
1591
1592 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1593 ELF::STT_AMDGPU_HSA_KERNEL);
1594 Lex();
1595 return false;
1596}
1597
Tom Stellard00f2f912015-12-02 19:47:57 +00001598bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1599 if (getLexer().isNot(AsmToken::Identifier))
1600 return TokError("expected symbol name");
1601
1602 StringRef GlobalName = Parser.getTok().getIdentifier();
1603
1604 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1605 Lex();
1606 return false;
1607}
1608
1609bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1610 if (getLexer().isNot(AsmToken::Identifier))
1611 return TokError("expected symbol name");
1612
1613 StringRef GlobalName = Parser.getTok().getIdentifier();
1614
1615 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1616 Lex();
1617 return false;
1618}
1619
1620bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1621 getParser().getStreamer().SwitchSection(
1622 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1623 return false;
1624}
1625
1626bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1627 getParser().getStreamer().SwitchSection(
1628 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1629 return false;
1630}
1631
Tom Stellard9760f032015-12-03 03:34:32 +00001632bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1633 getParser().getStreamer().SwitchSection(
1634 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1635 return false;
1636}
1637
Tom Stellard45bb48e2015-06-13 03:28:10 +00001638bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001639 StringRef IDVal = DirectiveID.getString();
1640
1641 if (IDVal == ".hsa_code_object_version")
1642 return ParseDirectiveHSACodeObjectVersion();
1643
1644 if (IDVal == ".hsa_code_object_isa")
1645 return ParseDirectiveHSACodeObjectISA();
1646
Tom Stellardff7416b2015-06-26 21:58:31 +00001647 if (IDVal == ".amd_kernel_code_t")
1648 return ParseDirectiveAMDKernelCodeT();
1649
Tom Stellardfcfaea42016-05-05 17:03:33 +00001650 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001651 return ParseSectionDirectiveHSAText();
1652
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001653 if (IDVal == ".amdgpu_hsa_kernel")
1654 return ParseDirectiveAMDGPUHsaKernel();
1655
Tom Stellard00f2f912015-12-02 19:47:57 +00001656 if (IDVal == ".amdgpu_hsa_module_global")
1657 return ParseDirectiveAMDGPUHsaModuleGlobal();
1658
1659 if (IDVal == ".amdgpu_hsa_program_global")
1660 return ParseDirectiveAMDGPUHsaProgramGlobal();
1661
1662 if (IDVal == ".hsadata_global_agent")
1663 return ParseSectionDirectiveHSADataGlobalAgent();
1664
1665 if (IDVal == ".hsadata_global_program")
1666 return ParseSectionDirectiveHSADataGlobalProgram();
1667
Tom Stellard9760f032015-12-03 03:34:32 +00001668 if (IDVal == ".hsarodata_readonly_agent")
1669 return ParseSectionDirectiveHSARodataReadonlyAgent();
1670
Tom Stellard45bb48e2015-06-13 03:28:10 +00001671 return true;
1672}
1673
Matt Arsenault68802d32015-11-05 03:11:27 +00001674bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1675 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001676 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001677 return true;
1678
Matt Arsenault3b159672015-12-01 20:31:08 +00001679 if (isSI()) {
1680 // No flat_scr
1681 switch (RegNo) {
1682 case AMDGPU::FLAT_SCR:
1683 case AMDGPU::FLAT_SCR_LO:
1684 case AMDGPU::FLAT_SCR_HI:
1685 return false;
1686 default:
1687 return true;
1688 }
1689 }
1690
Matt Arsenault68802d32015-11-05 03:11:27 +00001691 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1692 // SI/CI have.
1693 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1694 R.isValid(); ++R) {
1695 if (*R == RegNo)
1696 return false;
1697 }
1698
1699 return true;
1700}
1701
Tom Stellard45bb48e2015-06-13 03:28:10 +00001702AMDGPUAsmParser::OperandMatchResultTy
1703AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1704
1705 // Try to parse with a custom parser
1706 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1707
1708 // If we successfully parsed the operand or if there as an error parsing,
1709 // we are done.
1710 //
1711 // If we are parsing after we reach EndOfStatement then this means we
1712 // are appending default values to the Operands list. This is only done
1713 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001714 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001715 getLexer().is(AsmToken::EndOfStatement))
1716 return ResTy;
1717
Sam Kolton1bdcef72016-05-23 09:59:02 +00001718 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001719
Sam Kolton1bdcef72016-05-23 09:59:02 +00001720 if (ResTy == MatchOperand_Success)
1721 return ResTy;
1722
1723 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00001724 // If this identifier is a symbol, we want to create an expression for it.
1725 // It is a little difficult to distinguish between a symbol name, and
1726 // an instruction flag like 'gds'. In order to do this, we parse
1727 // all tokens as expressions and then treate the symbol name as the token
1728 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001729 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00001730 SMLoc S = Tok.getLoc();
1731 const MCExpr *Expr = nullptr;
1732 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001733 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00001734 return MatchOperand_Success;
1735 }
1736
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001737 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001738 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001739 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001740 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001741 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001742}
1743
Sam Kolton05ef1c92016-06-03 10:27:37 +00001744StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1745 // Clear any forced encodings from the previous instruction.
1746 setForcedEncodingSize(0);
1747 setForcedDPP(false);
1748 setForcedSDWA(false);
1749
1750 if (Name.endswith("_e64")) {
1751 setForcedEncodingSize(64);
1752 return Name.substr(0, Name.size() - 4);
1753 } else if (Name.endswith("_e32")) {
1754 setForcedEncodingSize(32);
1755 return Name.substr(0, Name.size() - 4);
1756 } else if (Name.endswith("_dpp")) {
1757 setForcedDPP(true);
1758 return Name.substr(0, Name.size() - 4);
1759 } else if (Name.endswith("_sdwa")) {
1760 setForcedSDWA(true);
1761 return Name.substr(0, Name.size() - 5);
1762 }
1763 return Name;
1764}
1765
Tom Stellard45bb48e2015-06-13 03:28:10 +00001766bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1767 StringRef Name,
1768 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001769 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00001770 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001771 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00001772
Tom Stellard45bb48e2015-06-13 03:28:10 +00001773 while (!getLexer().is(AsmToken::EndOfStatement)) {
1774 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1775
1776 // Eat the comma or space if there is one.
1777 if (getLexer().is(AsmToken::Comma))
1778 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00001779
Tom Stellard45bb48e2015-06-13 03:28:10 +00001780 switch (Res) {
1781 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001782 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001783 Error(getLexer().getLoc(), "failed parsing operand.");
1784 while (!getLexer().is(AsmToken::EndOfStatement)) {
1785 Parser.Lex();
1786 }
1787 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001788 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001789 Error(getLexer().getLoc(), "not a valid operand.");
1790 while (!getLexer().is(AsmToken::EndOfStatement)) {
1791 Parser.Lex();
1792 }
1793 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001794 }
1795 }
1796
Tom Stellard45bb48e2015-06-13 03:28:10 +00001797 return false;
1798}
1799
1800//===----------------------------------------------------------------------===//
1801// Utility functions
1802//===----------------------------------------------------------------------===//
1803
1804AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001805AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001806 switch(getLexer().getKind()) {
1807 default: return MatchOperand_NoMatch;
1808 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001809 StringRef Name = Parser.getTok().getString();
1810 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001811 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001812 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001813
1814 Parser.Lex();
1815 if (getLexer().isNot(AsmToken::Colon))
1816 return MatchOperand_ParseFail;
1817
1818 Parser.Lex();
1819 if (getLexer().isNot(AsmToken::Integer))
1820 return MatchOperand_ParseFail;
1821
1822 if (getParser().parseAbsoluteExpression(Int))
1823 return MatchOperand_ParseFail;
1824 break;
1825 }
1826 }
1827 return MatchOperand_Success;
1828}
1829
1830AMDGPUAsmParser::OperandMatchResultTy
1831AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001832 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001833 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001834
1835 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001836 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001837
Sam Kolton11de3702016-05-24 12:38:33 +00001838 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001839 if (Res != MatchOperand_Success)
1840 return Res;
1841
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001842 if (ConvertResult && !ConvertResult(Value)) {
1843 return MatchOperand_ParseFail;
1844 }
1845
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001846 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001847 return MatchOperand_Success;
1848}
1849
1850AMDGPUAsmParser::OperandMatchResultTy
1851AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001852 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001853 int64_t Bit = 0;
1854 SMLoc S = Parser.getTok().getLoc();
1855
1856 // We are at the end of the statement, and this is a default argument, so
1857 // use a default value.
1858 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1859 switch(getLexer().getKind()) {
1860 case AsmToken::Identifier: {
1861 StringRef Tok = Parser.getTok().getString();
1862 if (Tok == Name) {
1863 Bit = 1;
1864 Parser.Lex();
1865 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1866 Bit = 0;
1867 Parser.Lex();
1868 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001869 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001870 }
1871 break;
1872 }
1873 default:
1874 return MatchOperand_NoMatch;
1875 }
1876 }
1877
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001878 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001879 return MatchOperand_Success;
1880}
1881
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001882typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1883
Sam Koltona74cd522016-03-18 15:35:51 +00001884void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1885 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001886 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001887 auto i = OptionalIdx.find(ImmT);
1888 if (i != OptionalIdx.end()) {
1889 unsigned Idx = i->second;
1890 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1891 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001892 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001893 }
1894}
1895
Matt Arsenault37fefd62016-06-10 02:18:02 +00001896AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00001897AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00001898 if (getLexer().isNot(AsmToken::Identifier)) {
1899 return MatchOperand_NoMatch;
1900 }
1901 StringRef Tok = Parser.getTok().getString();
1902 if (Tok != Prefix) {
1903 return MatchOperand_NoMatch;
1904 }
1905
1906 Parser.Lex();
1907 if (getLexer().isNot(AsmToken::Colon)) {
1908 return MatchOperand_ParseFail;
1909 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001910
Sam Kolton3025e7f2016-04-26 13:33:56 +00001911 Parser.Lex();
1912 if (getLexer().isNot(AsmToken::Identifier)) {
1913 return MatchOperand_ParseFail;
1914 }
1915
1916 Value = Parser.getTok().getString();
1917 return MatchOperand_Success;
1918}
1919
Tom Stellard45bb48e2015-06-13 03:28:10 +00001920//===----------------------------------------------------------------------===//
1921// ds
1922//===----------------------------------------------------------------------===//
1923
Tom Stellard45bb48e2015-06-13 03:28:10 +00001924void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1925 const OperandVector &Operands) {
1926
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001927 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001928
1929 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1930 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1931
1932 // Add the register arguments
1933 if (Op.isReg()) {
1934 Op.addRegOperands(Inst, 1);
1935 continue;
1936 }
1937
1938 // Handle optional arguments
1939 OptionalIdx[Op.getImmTy()] = i;
1940 }
1941
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001942 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1943 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001944 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001945
Tom Stellard45bb48e2015-06-13 03:28:10 +00001946 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1947}
1948
1949void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1950
1951 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1952 bool GDSOnly = false;
1953
1954 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1955 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1956
1957 // Add the register arguments
1958 if (Op.isReg()) {
1959 Op.addRegOperands(Inst, 1);
1960 continue;
1961 }
1962
1963 if (Op.isToken() && Op.getToken() == "gds") {
1964 GDSOnly = true;
1965 continue;
1966 }
1967
1968 // Handle optional arguments
1969 OptionalIdx[Op.getImmTy()] = i;
1970 }
1971
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001972 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1973 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001974
1975 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001976 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001977 }
1978 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1979}
1980
1981
1982//===----------------------------------------------------------------------===//
1983// s_waitcnt
1984//===----------------------------------------------------------------------===//
1985
1986bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1987 StringRef CntName = Parser.getTok().getString();
1988 int64_t CntVal;
1989
1990 Parser.Lex();
1991 if (getLexer().isNot(AsmToken::LParen))
1992 return true;
1993
1994 Parser.Lex();
1995 if (getLexer().isNot(AsmToken::Integer))
1996 return true;
1997
1998 if (getParser().parseAbsoluteExpression(CntVal))
1999 return true;
2000
2001 if (getLexer().isNot(AsmToken::RParen))
2002 return true;
2003
2004 Parser.Lex();
2005 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2006 Parser.Lex();
2007
2008 int CntShift;
2009 int CntMask;
2010
2011 if (CntName == "vmcnt") {
2012 CntMask = 0xf;
2013 CntShift = 0;
2014 } else if (CntName == "expcnt") {
2015 CntMask = 0x7;
2016 CntShift = 4;
2017 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00002018 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002019 CntShift = 8;
2020 } else {
2021 return true;
2022 }
2023
2024 IntVal &= ~(CntMask << CntShift);
2025 IntVal |= (CntVal << CntShift);
2026 return false;
2027}
2028
2029AMDGPUAsmParser::OperandMatchResultTy
2030AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
2031 // Disable all counters by default.
2032 // vmcnt [3:0]
2033 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00002034 // lgkmcnt [11:8]
2035 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002036 SMLoc S = Parser.getTok().getLoc();
2037
2038 switch(getLexer().getKind()) {
2039 default: return MatchOperand_ParseFail;
2040 case AsmToken::Integer:
2041 // The operand can be an integer value.
2042 if (getParser().parseAbsoluteExpression(CntVal))
2043 return MatchOperand_ParseFail;
2044 break;
2045
2046 case AsmToken::Identifier:
2047 do {
2048 if (parseCnt(CntVal))
2049 return MatchOperand_ParseFail;
2050 } while(getLexer().isNot(AsmToken::EndOfStatement));
2051 break;
2052 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002053 Operands.push_back(AMDGPUOperand::CreateImm(this, CntVal, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002054 return MatchOperand_Success;
2055}
2056
Artem Tamazov6edc1352016-05-26 17:00:33 +00002057bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
2058 using namespace llvm::AMDGPU::Hwreg;
2059
Artem Tamazovd6468662016-04-25 14:13:51 +00002060 if (Parser.getTok().getString() != "hwreg")
2061 return true;
2062 Parser.Lex();
2063
2064 if (getLexer().isNot(AsmToken::LParen))
2065 return true;
2066 Parser.Lex();
2067
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002068 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002069 HwReg.IsSymbolic = true;
2070 HwReg.Id = ID_UNKNOWN_;
2071 const StringRef tok = Parser.getTok().getString();
2072 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2073 if (tok == IdSymbolic[i]) {
2074 HwReg.Id = i;
2075 break;
2076 }
2077 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002078 Parser.Lex();
2079 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002080 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002081 if (getLexer().isNot(AsmToken::Integer))
2082 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002083 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002084 return true;
2085 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002086
2087 if (getLexer().is(AsmToken::RParen)) {
2088 Parser.Lex();
2089 return false;
2090 }
2091
2092 // optional params
2093 if (getLexer().isNot(AsmToken::Comma))
2094 return true;
2095 Parser.Lex();
2096
2097 if (getLexer().isNot(AsmToken::Integer))
2098 return true;
2099 if (getParser().parseAbsoluteExpression(Offset))
2100 return true;
2101
2102 if (getLexer().isNot(AsmToken::Comma))
2103 return true;
2104 Parser.Lex();
2105
2106 if (getLexer().isNot(AsmToken::Integer))
2107 return true;
2108 if (getParser().parseAbsoluteExpression(Width))
2109 return true;
2110
2111 if (getLexer().isNot(AsmToken::RParen))
2112 return true;
2113 Parser.Lex();
2114
2115 return false;
2116}
2117
2118AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002119AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002120 using namespace llvm::AMDGPU::Hwreg;
2121
Artem Tamazovd6468662016-04-25 14:13:51 +00002122 int64_t Imm16Val = 0;
2123 SMLoc S = Parser.getTok().getLoc();
2124
2125 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002126 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002127 case AsmToken::Integer:
2128 // The operand can be an integer value.
2129 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002130 return MatchOperand_NoMatch;
2131 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002132 Error(S, "invalid immediate: only 16-bit values are legal");
2133 // Do not return error code, but create an imm operand anyway and proceed
2134 // to the next operand, if any. That avoids unneccessary error messages.
2135 }
2136 break;
2137
2138 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002139 OperandInfoTy HwReg(ID_UNKNOWN_);
2140 int64_t Offset = OFFSET_DEFAULT_;
2141 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2142 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002143 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002144 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2145 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002146 Error(S, "invalid symbolic name of hardware register");
2147 else
2148 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002149 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002150 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002151 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002152 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002153 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002154 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002155 }
2156 break;
2157 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002158 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00002159 return MatchOperand_Success;
2160}
2161
Tom Stellard45bb48e2015-06-13 03:28:10 +00002162bool AMDGPUOperand::isSWaitCnt() const {
2163 return isImm();
2164}
2165
Artem Tamazovd6468662016-04-25 14:13:51 +00002166bool AMDGPUOperand::isHwreg() const {
2167 return isImmTy(ImmTyHwreg);
2168}
2169
Artem Tamazov6edc1352016-05-26 17:00:33 +00002170bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002171 using namespace llvm::AMDGPU::SendMsg;
2172
2173 if (Parser.getTok().getString() != "sendmsg")
2174 return true;
2175 Parser.Lex();
2176
2177 if (getLexer().isNot(AsmToken::LParen))
2178 return true;
2179 Parser.Lex();
2180
2181 if (getLexer().is(AsmToken::Identifier)) {
2182 Msg.IsSymbolic = true;
2183 Msg.Id = ID_UNKNOWN_;
2184 const std::string tok = Parser.getTok().getString();
2185 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
2186 switch(i) {
2187 default: continue; // Omit gaps.
2188 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
2189 }
2190 if (tok == IdSymbolic[i]) {
2191 Msg.Id = i;
2192 break;
2193 }
2194 }
2195 Parser.Lex();
2196 } else {
2197 Msg.IsSymbolic = false;
2198 if (getLexer().isNot(AsmToken::Integer))
2199 return true;
2200 if (getParser().parseAbsoluteExpression(Msg.Id))
2201 return true;
2202 if (getLexer().is(AsmToken::Integer))
2203 if (getParser().parseAbsoluteExpression(Msg.Id))
2204 Msg.Id = ID_UNKNOWN_;
2205 }
2206 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
2207 return false;
2208
2209 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
2210 if (getLexer().isNot(AsmToken::RParen))
2211 return true;
2212 Parser.Lex();
2213 return false;
2214 }
2215
2216 if (getLexer().isNot(AsmToken::Comma))
2217 return true;
2218 Parser.Lex();
2219
2220 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2221 Operation.Id = ID_UNKNOWN_;
2222 if (getLexer().is(AsmToken::Identifier)) {
2223 Operation.IsSymbolic = true;
2224 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2225 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2226 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002227 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002228 for (int i = F; i < L; ++i) {
2229 if (Tok == S[i]) {
2230 Operation.Id = i;
2231 break;
2232 }
2233 }
2234 Parser.Lex();
2235 } else {
2236 Operation.IsSymbolic = false;
2237 if (getLexer().isNot(AsmToken::Integer))
2238 return true;
2239 if (getParser().parseAbsoluteExpression(Operation.Id))
2240 return true;
2241 }
2242
2243 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2244 // Stream id is optional.
2245 if (getLexer().is(AsmToken::RParen)) {
2246 Parser.Lex();
2247 return false;
2248 }
2249
2250 if (getLexer().isNot(AsmToken::Comma))
2251 return true;
2252 Parser.Lex();
2253
2254 if (getLexer().isNot(AsmToken::Integer))
2255 return true;
2256 if (getParser().parseAbsoluteExpression(StreamId))
2257 return true;
2258 }
2259
2260 if (getLexer().isNot(AsmToken::RParen))
2261 return true;
2262 Parser.Lex();
2263 return false;
2264}
2265
2266AMDGPUAsmParser::OperandMatchResultTy
2267AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2268 using namespace llvm::AMDGPU::SendMsg;
2269
2270 int64_t Imm16Val = 0;
2271 SMLoc S = Parser.getTok().getLoc();
2272
2273 switch(getLexer().getKind()) {
2274 default:
2275 return MatchOperand_NoMatch;
2276 case AsmToken::Integer:
2277 // The operand can be an integer value.
2278 if (getParser().parseAbsoluteExpression(Imm16Val))
2279 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002280 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002281 Error(S, "invalid immediate: only 16-bit values are legal");
2282 // Do not return error code, but create an imm operand anyway and proceed
2283 // to the next operand, if any. That avoids unneccessary error messages.
2284 }
2285 break;
2286 case AsmToken::Identifier: {
2287 OperandInfoTy Msg(ID_UNKNOWN_);
2288 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002289 int64_t StreamId = STREAM_ID_DEFAULT_;
2290 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2291 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002292 do {
2293 // Validate and encode message ID.
2294 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2295 || Msg.Id == ID_SYSMSG)) {
2296 if (Msg.IsSymbolic)
2297 Error(S, "invalid/unsupported symbolic name of message");
2298 else
2299 Error(S, "invalid/unsupported code of message");
2300 break;
2301 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002302 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002303 // Validate and encode operation ID.
2304 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2305 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2306 if (Operation.IsSymbolic)
2307 Error(S, "invalid symbolic name of GS_OP");
2308 else
2309 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2310 break;
2311 }
2312 if (Operation.Id == OP_GS_NOP
2313 && Msg.Id != ID_GS_DONE) {
2314 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2315 break;
2316 }
2317 Imm16Val |= (Operation.Id << OP_SHIFT_);
2318 }
2319 if (Msg.Id == ID_SYSMSG) {
2320 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2321 if (Operation.IsSymbolic)
2322 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2323 else
2324 Error(S, "invalid/unsupported code of SYSMSG_OP");
2325 break;
2326 }
2327 Imm16Val |= (Operation.Id << OP_SHIFT_);
2328 }
2329 // Validate and encode stream ID.
2330 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2331 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2332 Error(S, "invalid stream id: only 2-bit values are legal");
2333 break;
2334 }
2335 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2336 }
2337 } while (0);
2338 }
2339 break;
2340 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002341 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002342 return MatchOperand_Success;
2343}
2344
2345bool AMDGPUOperand::isSendMsg() const {
2346 return isImmTy(ImmTySendMsg);
2347}
2348
Tom Stellard45bb48e2015-06-13 03:28:10 +00002349//===----------------------------------------------------------------------===//
2350// sopp branch targets
2351//===----------------------------------------------------------------------===//
2352
2353AMDGPUAsmParser::OperandMatchResultTy
2354AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2355 SMLoc S = Parser.getTok().getLoc();
2356
2357 switch (getLexer().getKind()) {
2358 default: return MatchOperand_ParseFail;
2359 case AsmToken::Integer: {
2360 int64_t Imm;
2361 if (getParser().parseAbsoluteExpression(Imm))
2362 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002363 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002364 return MatchOperand_Success;
2365 }
2366
2367 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002368 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00002369 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2370 Parser.getTok().getString()), getContext()), S));
2371 Parser.Lex();
2372 return MatchOperand_Success;
2373 }
2374}
2375
2376//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002377// mubuf
2378//===----------------------------------------------------------------------===//
2379
Sam Kolton5f10a132016-05-06 11:31:17 +00002380AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002381 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002382}
2383
2384AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002385 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002386}
2387
2388AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002389 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002390}
2391
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002392void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2393 const OperandVector &Operands,
2394 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002395 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002396 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002397
2398 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2399 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2400
2401 // Add the register arguments
2402 if (Op.isReg()) {
2403 Op.addRegOperands(Inst, 1);
2404 continue;
2405 }
2406
2407 // Handle the case where soffset is an immediate
2408 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2409 Op.addImmOperands(Inst, 1);
2410 continue;
2411 }
2412
2413 // Handle tokens like 'offen' which are sometimes hard-coded into the
2414 // asm string. There are no MCInst operands for these.
2415 if (Op.isToken()) {
2416 continue;
2417 }
2418 assert(Op.isImm());
2419
2420 // Handle optional arguments
2421 OptionalIdx[Op.getImmTy()] = i;
2422 }
2423
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002424 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2425 if (IsAtomicReturn) {
2426 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2427 Inst.insert(I, *I);
2428 }
2429
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002430 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002431 if (!IsAtomic) { // glc is hard-coded.
2432 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2433 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002434 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2435 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002436}
2437
2438//===----------------------------------------------------------------------===//
2439// mimg
2440//===----------------------------------------------------------------------===//
2441
Sam Kolton1bdcef72016-05-23 09:59:02 +00002442void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2443 unsigned I = 1;
2444 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2445 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2446 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2447 }
2448
2449 OptionalImmIndexMap OptionalIdx;
2450
2451 for (unsigned E = Operands.size(); I != E; ++I) {
2452 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2453
2454 // Add the register arguments
2455 if (Op.isRegOrImm()) {
2456 Op.addRegOrImmOperands(Inst, 1);
2457 continue;
2458 } else if (Op.isImmModifier()) {
2459 OptionalIdx[Op.getImmTy()] = I;
2460 } else {
2461 assert(false);
2462 }
2463 }
2464
2465 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2466 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2467 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2468 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2469 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2470 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2471 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2472 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2473}
2474
2475void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2476 unsigned I = 1;
2477 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2478 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2479 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2480 }
2481
2482 // Add src, same as dst
2483 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2484
2485 OptionalImmIndexMap OptionalIdx;
2486
2487 for (unsigned E = Operands.size(); I != E; ++I) {
2488 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2489
2490 // Add the register arguments
2491 if (Op.isRegOrImm()) {
2492 Op.addRegOrImmOperands(Inst, 1);
2493 continue;
2494 } else if (Op.isImmModifier()) {
2495 OptionalIdx[Op.getImmTy()] = I;
2496 } else {
2497 assert(false);
2498 }
2499 }
2500
2501 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2502 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2503 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2504 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2505 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2506 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2507 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2508 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2509}
2510
Sam Kolton5f10a132016-05-06 11:31:17 +00002511AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002512 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00002513}
2514
2515AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002516 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00002517}
2518
2519AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002520 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00002521}
2522
2523AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002524 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00002525}
2526
2527AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002528 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002529}
2530
Tom Stellard45bb48e2015-06-13 03:28:10 +00002531//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002532// smrd
2533//===----------------------------------------------------------------------===//
2534
2535bool AMDGPUOperand::isSMRDOffset() const {
2536
2537 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2538 // information here.
2539 return isImm() && isUInt<8>(getImm());
2540}
2541
2542bool AMDGPUOperand::isSMRDLiteralOffset() const {
2543 // 32-bit literals are only supported on CI and we only want to use them
2544 // when the offset is > 8-bits.
2545 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2546}
2547
Sam Kolton5f10a132016-05-06 11:31:17 +00002548AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002549 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002550}
2551
2552AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002553 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002554}
2555
Tom Stellard217361c2015-08-06 19:28:38 +00002556//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002557// vop3
2558//===----------------------------------------------------------------------===//
2559
2560static bool ConvertOmodMul(int64_t &Mul) {
2561 if (Mul != 1 && Mul != 2 && Mul != 4)
2562 return false;
2563
2564 Mul >>= 1;
2565 return true;
2566}
2567
2568static bool ConvertOmodDiv(int64_t &Div) {
2569 if (Div == 1) {
2570 Div = 0;
2571 return true;
2572 }
2573
2574 if (Div == 2) {
2575 Div = 3;
2576 return true;
2577 }
2578
2579 return false;
2580}
2581
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002582static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2583 if (BoundCtrl == 0) {
2584 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002585 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002586 } else if (BoundCtrl == -1) {
2587 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002588 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002589 }
2590 return false;
2591}
2592
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002593// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002594static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2595 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2596 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2597 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2598 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2599 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2600 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2601 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2602 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2603 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2604 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2605 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2606 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2607 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2608 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2609 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2610 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2611 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2612 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2613 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2614 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00002615 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2616 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2617 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00002618 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002619};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002620
Sam Kolton11de3702016-05-24 12:38:33 +00002621AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2622 OperandMatchResultTy res;
2623 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2624 // try to parse any optional operand here
2625 if (Op.IsBit) {
2626 res = parseNamedBit(Op.Name, Operands, Op.Type);
2627 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2628 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002629 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2630 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2631 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2632 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00002633 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2634 res = parseSDWADstUnused(Operands);
2635 } else {
2636 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2637 }
2638 if (res != MatchOperand_NoMatch) {
2639 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002640 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002641 }
2642 return MatchOperand_NoMatch;
2643}
2644
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002645AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2646{
2647 StringRef Name = Parser.getTok().getString();
2648 if (Name == "mul") {
Sam Kolton11de3702016-05-24 12:38:33 +00002649 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002650 } else if (Name == "div") {
Sam Kolton11de3702016-05-24 12:38:33 +00002651 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002652 } else {
2653 return MatchOperand_NoMatch;
2654 }
2655}
2656
Tom Stellarda90b9522016-02-11 03:28:15 +00002657void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2658 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002659 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002660 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002661 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2662 }
2663 for (unsigned E = Operands.size(); I != E; ++I)
2664 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2665}
2666
2667void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002668 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2669 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002670 cvtVOP3(Inst, Operands);
2671 } else {
2672 cvtId(Inst, Operands);
2673 }
2674}
2675
Tom Stellarda90b9522016-02-11 03:28:15 +00002676void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002677 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002678 unsigned I = 1;
2679 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002680 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002681 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002682 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002683
Tom Stellarda90b9522016-02-11 03:28:15 +00002684 for (unsigned E = Operands.size(); I != E; ++I) {
2685 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002686 if (Desc.OpInfo[Inst.getNumOperands()].OperandType == AMDGPU::OPERAND_INPUT_MODS) {
Sam Kolton945231a2016-06-10 09:57:59 +00002687 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002688 } else if (Op.isImm()) {
2689 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002690 } else {
2691 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002692 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002693 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002694
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002695 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2696 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002697}
2698
Sam Koltondfa29f72016-03-09 12:29:31 +00002699//===----------------------------------------------------------------------===//
2700// dpp
2701//===----------------------------------------------------------------------===//
2702
2703bool AMDGPUOperand::isDPPCtrl() const {
2704 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2705 if (result) {
2706 int64_t Imm = getImm();
2707 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2708 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2709 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2710 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2711 (Imm == 0x130) ||
2712 (Imm == 0x134) ||
2713 (Imm == 0x138) ||
2714 (Imm == 0x13c) ||
2715 (Imm == 0x140) ||
2716 (Imm == 0x141) ||
2717 (Imm == 0x142) ||
2718 (Imm == 0x143);
2719 }
2720 return false;
2721}
2722
Sam Koltona74cd522016-03-18 15:35:51 +00002723AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002724AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002725 SMLoc S = Parser.getTok().getLoc();
2726 StringRef Prefix;
2727 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002728
Sam Koltona74cd522016-03-18 15:35:51 +00002729 if (getLexer().getKind() == AsmToken::Identifier) {
2730 Prefix = Parser.getTok().getString();
2731 } else {
2732 return MatchOperand_NoMatch;
2733 }
2734
2735 if (Prefix == "row_mirror") {
2736 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002737 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002738 } else if (Prefix == "row_half_mirror") {
2739 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002740 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002741 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002742 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2743 if (Prefix != "quad_perm"
2744 && Prefix != "row_shl"
2745 && Prefix != "row_shr"
2746 && Prefix != "row_ror"
2747 && Prefix != "wave_shl"
2748 && Prefix != "wave_rol"
2749 && Prefix != "wave_shr"
2750 && Prefix != "wave_ror"
2751 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002752 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002753 }
2754
Sam Koltona74cd522016-03-18 15:35:51 +00002755 Parser.Lex();
2756 if (getLexer().isNot(AsmToken::Colon))
2757 return MatchOperand_ParseFail;
2758
2759 if (Prefix == "quad_perm") {
2760 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002761 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002762 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002763 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002764 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00002765
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002766 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00002767 return MatchOperand_ParseFail;
2768
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002769 for (int i = 0; i < 3; ++i) {
2770 if (getLexer().isNot(AsmToken::Comma))
2771 return MatchOperand_ParseFail;
2772 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00002773
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002774 int64_t Temp;
2775 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
2776 return MatchOperand_ParseFail;
2777 const int shift = i*2 + 2;
2778 Int += (Temp << shift);
2779 }
Sam Koltona74cd522016-03-18 15:35:51 +00002780
Sam Koltona74cd522016-03-18 15:35:51 +00002781 if (getLexer().isNot(AsmToken::RBrac))
2782 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002783 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002784
2785 } else {
2786 // sel:%d
2787 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002788 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00002789 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002790
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002791 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002792 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002793 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002794 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002795 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00002796 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002797 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002798 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002799 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002800 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002801 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002802 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002803 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00002804 Int = 0x13C;
2805 } else if (Prefix == "row_bcast") {
2806 if (Int == 15) {
2807 Int = 0x142;
2808 } else if (Int == 31) {
2809 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00002810 } else {
2811 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002812 }
2813 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002814 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002815 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002816 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002817 }
Sam Koltona74cd522016-03-18 15:35:51 +00002818
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002819 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00002820 return MatchOperand_Success;
2821}
2822
Sam Kolton5f10a132016-05-06 11:31:17 +00002823AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002824 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002825}
2826
Sam Kolton5f10a132016-05-06 11:31:17 +00002827AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002828 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002829}
2830
Sam Kolton5f10a132016-05-06 11:31:17 +00002831AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002832 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00002833}
2834
2835void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002836 OptionalImmIndexMap OptionalIdx;
2837
2838 unsigned I = 1;
2839 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2840 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2841 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2842 }
2843
2844 for (unsigned E = Operands.size(); I != E; ++I) {
2845 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2846 // Add the register arguments
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002847 if (Desc.OpInfo[Inst.getNumOperands()].OperandType == AMDGPU::OPERAND_INPUT_MODS) {
Sam Kolton945231a2016-06-10 09:57:59 +00002848 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002849 } else if (Op.isDPPCtrl()) {
2850 Op.addImmOperands(Inst, 1);
2851 } else if (Op.isImm()) {
2852 // Handle optional arguments
2853 OptionalIdx[Op.getImmTy()] = I;
2854 } else {
2855 llvm_unreachable("Invalid operand type");
2856 }
2857 }
2858
Sam Koltondfa29f72016-03-09 12:29:31 +00002859 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2860 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2861 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2862}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002863
Sam Kolton3025e7f2016-04-26 13:33:56 +00002864//===----------------------------------------------------------------------===//
2865// sdwa
2866//===----------------------------------------------------------------------===//
2867
2868AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002869AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
2870 AMDGPUOperand::ImmTy Type) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00002871 SMLoc S = Parser.getTok().getLoc();
2872 StringRef Value;
2873 AMDGPUAsmParser::OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002874
Sam Kolton05ef1c92016-06-03 10:27:37 +00002875 res = parseStringWithPrefix(Prefix, Value);
2876 if (res != MatchOperand_Success) {
2877 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00002878 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002879
Sam Kolton3025e7f2016-04-26 13:33:56 +00002880 int64_t Int;
2881 Int = StringSwitch<int64_t>(Value)
2882 .Case("BYTE_0", 0)
2883 .Case("BYTE_1", 1)
2884 .Case("BYTE_2", 2)
2885 .Case("BYTE_3", 3)
2886 .Case("WORD_0", 4)
2887 .Case("WORD_1", 5)
2888 .Case("DWORD", 6)
2889 .Default(0xffffffff);
2890 Parser.Lex(); // eat last token
2891
2892 if (Int == 0xffffffff) {
2893 return MatchOperand_ParseFail;
2894 }
2895
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002896 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002897 return MatchOperand_Success;
2898}
2899
Matt Arsenault37fefd62016-06-10 02:18:02 +00002900AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00002901AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2902 SMLoc S = Parser.getTok().getLoc();
2903 StringRef Value;
2904 AMDGPUAsmParser::OperandMatchResultTy res;
2905
2906 res = parseStringWithPrefix("dst_unused", Value);
2907 if (res != MatchOperand_Success) {
2908 return res;
2909 }
2910
2911 int64_t Int;
2912 Int = StringSwitch<int64_t>(Value)
2913 .Case("UNUSED_PAD", 0)
2914 .Case("UNUSED_SEXT", 1)
2915 .Case("UNUSED_PRESERVE", 2)
2916 .Default(0xffffffff);
2917 Parser.Lex(); // eat last token
2918
2919 if (Int == 0xffffffff) {
2920 return MatchOperand_ParseFail;
2921 }
2922
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002923 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00002924 return MatchOperand_Success;
2925}
2926
Sam Kolton945231a2016-06-10 09:57:59 +00002927void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002928 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002929}
2930
Sam Kolton945231a2016-06-10 09:57:59 +00002931void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00002932 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
2933}
2934
2935void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
2936 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002937}
2938
2939void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00002940 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002941 OptionalImmIndexMap OptionalIdx;
2942
2943 unsigned I = 1;
2944 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2945 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2946 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2947 }
2948
2949 for (unsigned E = Operands.size(); I != E; ++I) {
2950 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2951 // Add the register arguments
Sam Kolton5196b882016-07-01 09:59:21 +00002952 if (BasicInstType == SIInstrFlags::VOPC &&
2953 Op.isReg() &&
2954 Op.Reg.RegNo == AMDGPU::VCC) {
2955 // VOPC sdwa use "vcc" token as dst. Skip it.
2956 continue;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002957 } else if (Desc.OpInfo[Inst.getNumOperands()].OperandType == AMDGPU::OPERAND_INPUT_MODS) {
2958 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002959 } else if (Op.isImm()) {
2960 // Handle optional arguments
2961 OptionalIdx[Op.getImmTy()] = I;
2962 } else {
2963 llvm_unreachable("Invalid operand type");
2964 }
2965 }
2966
Sam Kolton945231a2016-06-10 09:57:59 +00002967 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
2968
Sam Kolton05ef1c92016-06-03 10:27:37 +00002969 if (Inst.getOpcode() == AMDGPU::V_NOP_sdwa) {
2970 // V_NOP_sdwa has no optional sdwa arguments
2971 return;
2972 }
Sam Kolton5196b882016-07-01 09:59:21 +00002973 switch (BasicInstType) {
2974 case SIInstrFlags::VOP1: {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002975 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
2976 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
2977 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
Sam Kolton5196b882016-07-01 09:59:21 +00002978 break;
2979 }
2980 case SIInstrFlags::VOP2: {
Sam Kolton05ef1c92016-06-03 10:27:37 +00002981 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
2982 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
2983 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2984 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
Sam Kolton5196b882016-07-01 09:59:21 +00002985 break;
2986 }
2987 case SIInstrFlags::VOPC: {
2988 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
2989 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
2990 break;
2991 }
2992 default:
2993 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
Sam Kolton05ef1c92016-06-03 10:27:37 +00002994 }
2995}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002996
Tom Stellard45bb48e2015-06-13 03:28:10 +00002997/// Force static initialization.
2998extern "C" void LLVMInitializeAMDGPUAsmParser() {
2999 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
3000 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
3001}
3002
3003#define GET_REGISTER_MATCHER
3004#define GET_MATCHER_IMPLEMENTATION
3005#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00003006
3007
3008// This fuction should be defined after auto-generated include so that we have
3009// MatchClassKind enum defined
3010unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
3011 unsigned Kind) {
3012 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00003013 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00003014 // operand. This method checks if we are given immediate operand but expect to
3015 // get corresponding token.
3016 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
3017 switch (Kind) {
3018 case MCK_addr64:
3019 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
3020 case MCK_gds:
3021 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
3022 case MCK_glc:
3023 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
3024 case MCK_idxen:
3025 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
3026 case MCK_offen:
3027 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003028 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00003029 // When operands have expression values, they will return true for isToken,
3030 // because it is not possible to distinguish between a token and an
3031 // expression at parse time. MatchInstructionImpl() will always try to
3032 // match an operand as a token, when isToken returns true, and when the
3033 // name of the expression is not a valid token, the match will fail,
3034 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003035 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
3036 case MCK_SSrcF32:
3037 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00003038 case MCK_SoppBrTarget:
3039 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00003040 default: return Match_InvalidOperand;
3041 }
3042}