blob: 8883d25ee33d1c327c6c61fac7d25c64ff50deda [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrInfo.h"
27#include "llvm/MC/MCParser/MCAsmLexer.h"
28#include "llvm/MC/MCParser/MCAsmParser.h"
29#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000030#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000031#include "llvm/MC/MCRegisterInfo.h"
32#include "llvm/MC/MCStreamer.h"
33#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000035#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000036#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/Support/SourceMgr.h"
38#include "llvm/Support/TargetRegistry.h"
39#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000040#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000041
Tom Stellard45bb48e2015-06-13 03:28:10 +000042using namespace llvm;
43
44namespace {
45
46struct OptionalOperand;
47
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000048enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
49
Tom Stellard45bb48e2015-06-13 03:28:10 +000050class AMDGPUOperand : public MCParsedAsmOperand {
51 enum KindTy {
52 Token,
53 Immediate,
54 Register,
55 Expression
56 } Kind;
57
58 SMLoc StartLoc, EndLoc;
59
60public:
61 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
62
63 MCContext *Ctx;
64
Sam Kolton5f10a132016-05-06 11:31:17 +000065 typedef std::unique_ptr<AMDGPUOperand> Ptr;
66
Tom Stellard45bb48e2015-06-13 03:28:10 +000067 enum ImmTy {
68 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +000069 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +000070 ImmTyOffen,
71 ImmTyIdxen,
72 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +000073 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +000074 ImmTyOffset0,
75 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +000076 ImmTyGLC,
77 ImmTySLC,
78 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +000079 ImmTyClampSI,
80 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +000081 ImmTyDppCtrl,
82 ImmTyDppRowMask,
83 ImmTyDppBankMask,
84 ImmTyDppBoundCtrl,
Sam Kolton3025e7f2016-04-26 13:33:56 +000085 ImmTySdwaSel,
86 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000087 ImmTyDMask,
88 ImmTyUNorm,
89 ImmTyDA,
90 ImmTyR128,
91 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +000092 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +000093 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +000094 };
95
96 struct TokOp {
97 const char *Data;
98 unsigned Length;
99 };
100
101 struct ImmOp {
102 bool IsFPImm;
103 ImmTy Type;
104 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000105 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000106 };
107
108 struct RegOp {
109 unsigned RegNo;
110 int Modifiers;
111 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000112 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 bool IsForcedVOP3;
114 };
115
116 union {
117 TokOp Tok;
118 ImmOp Imm;
119 RegOp Reg;
120 const MCExpr *Expr;
121 };
122
Sam Kolton1bdcef72016-05-23 09:59:02 +0000123 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
124 if (Imm.Type == ImmTyNone && ApplyModifiers && Imm.Modifiers != 0) {
125 // Apply modifiers to immediate value
126 int64_t Val = Imm.Val;
127 bool Negate = Imm.Modifiers & 0x1;
128 bool Abs = Imm.Modifiers & 0x2;
129 if (Imm.IsFPImm) {
130 APFloat F(BitsToFloat(Val));
131 if (Abs) {
132 F.clearSign();
133 }
134 if (Negate) {
135 F.changeSign();
136 }
137 Val = F.bitcastToAPInt().getZExtValue();
138 } else {
139 Val = Abs ? std::abs(Val) : Val;
140 Val = Negate ? -Val : Val;
141 }
142 Inst.addOperand(MCOperand::createImm(Val));
143 } else {
144 Inst.addOperand(MCOperand::createImm(getImm()));
145 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000146 }
147
148 StringRef getToken() const {
149 return StringRef(Tok.Data, Tok.Length);
150 }
151
152 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000153 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154 }
155
156 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000157 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158 addRegOperands(Inst, N);
159 else
160 addImmOperands(Inst, N);
161 }
162
Tom Stellardd93a34f2016-02-22 19:17:56 +0000163 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
164 if (isRegKind()) {
165 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
166 addRegOperands(Inst, N);
167 } else {
168 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
Sam Kolton1bdcef72016-05-23 09:59:02 +0000169 addImmOperands(Inst, N, false);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000170 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000171 }
172
173 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
174 if (isImm())
175 addImmOperands(Inst, N);
176 else {
177 assert(isExpr());
178 Inst.addOperand(MCOperand::createExpr(Expr));
179 }
180 }
181
Tom Stellard45bb48e2015-06-13 03:28:10 +0000182 bool isToken() const override {
183 return Kind == Token;
184 }
185
186 bool isImm() const override {
187 return Kind == Immediate;
188 }
189
Tom Stellardd93a34f2016-02-22 19:17:56 +0000190 bool isInlinableImm() const {
191 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
192 immediates are inlinable (e.g. "clamp" attribute is not) */ )
193 return false;
194 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000195 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000196 // We've had bot failures before due to weird NaN support on mips hosts.
197 const float F = BitsToFloat(Imm.Val);
198 // TODO: Add 1/(2*pi) for VI
199 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000200 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000201 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000202 }
203
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204 int64_t getImm() const {
205 return Imm.Val;
206 }
207
208 enum ImmTy getImmTy() const {
209 assert(isImm());
210 return Imm.Type;
211 }
212
213 bool isRegKind() const {
214 return Kind == Register;
215 }
216
217 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000218 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000219 }
220
Tom Stellardd93a34f2016-02-22 19:17:56 +0000221 bool isRegOrImmWithInputMods() const {
222 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000223 }
224
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000225 bool isImmTy(ImmTy ImmT) const {
226 return isImm() && Imm.Type == ImmT;
227 }
228
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000229 bool isClampSI() const {
230 return isImmTy(ImmTyClampSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000231 }
232
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000233 bool isOModSI() const {
234 return isImmTy(ImmTyOModSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000235 }
236
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000237 bool isImmModifier() const {
238 return Kind == Immediate && Imm.Type != ImmTyNone;
239 }
240
241 bool isDMask() const {
242 return isImmTy(ImmTyDMask);
243 }
244
245 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
246 bool isDA() const { return isImmTy(ImmTyDA); }
247 bool isR128() const { return isImmTy(ImmTyUNorm); }
248 bool isLWE() const { return isImmTy(ImmTyLWE); }
249
Tom Stellarda90b9522016-02-11 03:28:15 +0000250 bool isMod() const {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000251 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000252 }
253
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000254 bool isOffen() const { return isImmTy(ImmTyOffen); }
255 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
256 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
257 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
258 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
259 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000260 bool isGDS() const { return isImmTy(ImmTyGDS); }
261 bool isGLC() const { return isImmTy(ImmTyGLC); }
262 bool isSLC() const { return isImmTy(ImmTySLC); }
263 bool isTFE() const { return isImmTy(ImmTyTFE); }
264
Sam Koltondfa29f72016-03-09 12:29:31 +0000265 bool isBankMask() const {
266 return isImmTy(ImmTyDppBankMask);
267 }
268
269 bool isRowMask() const {
270 return isImmTy(ImmTyDppRowMask);
271 }
272
273 bool isBoundCtrl() const {
274 return isImmTy(ImmTyDppBoundCtrl);
275 }
Sam Koltona74cd522016-03-18 15:35:51 +0000276
Sam Kolton3025e7f2016-04-26 13:33:56 +0000277 bool isSDWASel() const {
278 return isImmTy(ImmTySdwaSel);
279 }
280
281 bool isSDWADstUnused() const {
282 return isImmTy(ImmTySdwaDstUnused);
283 }
284
Tom Stellard45bb48e2015-06-13 03:28:10 +0000285 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000286 assert(isReg() || (isImm() && Imm.Modifiers == 0));
287 if (isReg())
288 Reg.Modifiers = Mods;
289 else
290 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000291 }
292
293 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000294 assert(isRegKind() || isImm());
295 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000296 }
297
298 unsigned getReg() const override {
299 return Reg.RegNo;
300 }
301
302 bool isRegOrImm() const {
303 return isReg() || isImm();
304 }
305
306 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000307 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000308 }
309
310 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000311 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000312 }
313
Matt Arsenault86d336e2015-09-08 21:15:00 +0000314 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000315 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000316 }
317
318 bool isSSrc32() const {
319 return isImm() || isSCSrc32();
320 }
321
322 bool isSSrc64() const {
323 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
324 // See isVSrc64().
325 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000326 }
327
Tom Stellard45bb48e2015-06-13 03:28:10 +0000328 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000329 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000330 }
331
332 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000333 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334 }
335
336 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000337 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000338 }
339
340 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000341 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000342 // narrowed to 32 bits (in the instruction stream). That require knowledge
343 // of instruction type (unsigned/signed, floating or "untyped"/B64),
344 // see [AMD GCN3 ISA 6.3.1].
345 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
346 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000347 }
348
349 bool isMem() const override {
350 return false;
351 }
352
353 bool isExpr() const {
354 return Kind == Expression;
355 }
356
357 bool isSoppBrTarget() const {
358 return isExpr() || isImm();
359 }
360
361 SMLoc getStartLoc() const override {
362 return StartLoc;
363 }
364
365 SMLoc getEndLoc() const override {
366 return EndLoc;
367 }
368
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000369 void printImmTy(raw_ostream& OS, ImmTy Type) const {
370 switch (Type) {
371 case ImmTyNone: OS << "None"; break;
372 case ImmTyGDS: OS << "GDS"; break;
373 case ImmTyOffen: OS << "Offen"; break;
374 case ImmTyIdxen: OS << "Idxen"; break;
375 case ImmTyAddr64: OS << "Addr64"; break;
376 case ImmTyOffset: OS << "Offset"; break;
377 case ImmTyOffset0: OS << "Offset0"; break;
378 case ImmTyOffset1: OS << "Offset1"; break;
379 case ImmTyGLC: OS << "GLC"; break;
380 case ImmTySLC: OS << "SLC"; break;
381 case ImmTyTFE: OS << "TFE"; break;
382 case ImmTyClampSI: OS << "ClampSI"; break;
383 case ImmTyOModSI: OS << "OModSI"; break;
384 case ImmTyDppCtrl: OS << "DppCtrl"; break;
385 case ImmTyDppRowMask: OS << "DppRowMask"; break;
386 case ImmTyDppBankMask: OS << "DppBankMask"; break;
387 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
388 case ImmTySdwaSel: OS << "SdwaSel"; break;
389 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
390 case ImmTyDMask: OS << "DMask"; break;
391 case ImmTyUNorm: OS << "UNorm"; break;
392 case ImmTyDA: OS << "DA"; break;
393 case ImmTyR128: OS << "R128"; break;
394 case ImmTyLWE: OS << "LWE"; break;
395 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000396 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000397 }
398 }
399
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000400 void print(raw_ostream &OS) const override {
401 switch (Kind) {
402 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000403 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000404 break;
405 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000406 OS << '<' << getImm();
407 if (getImmTy() != ImmTyNone) {
408 OS << " type: "; printImmTy(OS, getImmTy());
409 }
410 OS << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000411 break;
412 case Token:
413 OS << '\'' << getToken() << '\'';
414 break;
415 case Expression:
416 OS << "<expr " << *Expr << '>';
417 break;
418 }
419 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000420
Sam Kolton5f10a132016-05-06 11:31:17 +0000421 static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
422 enum ImmTy Type = ImmTyNone,
423 bool IsFPImm = false) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000424 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
425 Op->Imm.Val = Val;
426 Op->Imm.IsFPImm = IsFPImm;
427 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000428 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000429 Op->StartLoc = Loc;
430 Op->EndLoc = Loc;
431 return Op;
432 }
433
Sam Kolton5f10a132016-05-06 11:31:17 +0000434 static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
435 bool HasExplicitEncodingSize = true) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000436 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
437 Res->Tok.Data = Str.data();
438 Res->Tok.Length = Str.size();
439 Res->StartLoc = Loc;
440 Res->EndLoc = Loc;
441 return Res;
442 }
443
Sam Kolton5f10a132016-05-06 11:31:17 +0000444 static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
445 SMLoc E,
446 const MCRegisterInfo *TRI,
447 const MCSubtargetInfo *STI,
448 bool ForceVOP3) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
450 Op->Reg.RegNo = RegNo;
451 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000452 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000453 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454 Op->Reg.IsForcedVOP3 = ForceVOP3;
455 Op->StartLoc = S;
456 Op->EndLoc = E;
457 return Op;
458 }
459
Sam Kolton5f10a132016-05-06 11:31:17 +0000460 static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000461 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
462 Op->Expr = Expr;
463 Op->StartLoc = S;
464 Op->EndLoc = S;
465 return Op;
466 }
467
Tom Stellard45bb48e2015-06-13 03:28:10 +0000468 bool isSWaitCnt() const;
Artem Tamazovd6468662016-04-25 14:13:51 +0000469 bool isHwreg() const;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000470 bool isSendMsg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000471 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000472 bool isSMRDOffset() const;
473 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000474 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000475};
476
477class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000478 const MCInstrInfo &MII;
479 MCAsmParser &Parser;
480
481 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000482
Matt Arsenault3b159672015-12-01 20:31:08 +0000483 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000484 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000485 }
486
487 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000488 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000489 }
490
Matt Arsenault68802d32015-11-05 03:11:27 +0000491 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000492 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000493 }
494
495 bool hasSGPR102_SGPR103() const {
496 return !isVI();
497 }
498
Tom Stellard45bb48e2015-06-13 03:28:10 +0000499 /// @name Auto-generated Match Functions
500 /// {
501
502#define GET_ASSEMBLER_HEADER
503#include "AMDGPUGenAsmMatcher.inc"
504
505 /// }
506
Tom Stellard347ac792015-06-26 21:15:07 +0000507private:
508 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
509 bool ParseDirectiveHSACodeObjectVersion();
510 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000511 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
512 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000513 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000514 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000515 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000516 bool ParseDirectiveAMDGPUHsaModuleGlobal();
517 bool ParseDirectiveAMDGPUHsaProgramGlobal();
518 bool ParseSectionDirectiveHSADataGlobalAgent();
519 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000520 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000521 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
522 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000523 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000524
Tom Stellard45bb48e2015-06-13 03:28:10 +0000525public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000526 enum AMDGPUMatchResultTy {
527 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
528 };
529
Akira Hatanakab11ef082015-11-14 06:35:56 +0000530 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000531 const MCInstrInfo &MII,
532 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000533 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000534 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000535 MCAsmParserExtension::Initialize(Parser);
536
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000537 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000538 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000539 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540 }
541
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000542 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543 }
544
Tom Stellard347ac792015-06-26 21:15:07 +0000545 AMDGPUTargetStreamer &getTargetStreamer() {
546 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
547 return static_cast<AMDGPUTargetStreamer &>(TS);
548 }
549
Tom Stellard45bb48e2015-06-13 03:28:10 +0000550 unsigned getForcedEncodingSize() const {
551 return ForcedEncodingSize;
552 }
553
554 void setForcedEncodingSize(unsigned Size) {
555 ForcedEncodingSize = Size;
556 }
557
558 bool isForcedVOP3() const {
559 return ForcedEncodingSize == 64;
560 }
561
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000562 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000563 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
564 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000565 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
566 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
568 OperandVector &Operands, MCStreamer &Out,
569 uint64_t &ErrorInfo,
570 bool MatchingInlineAsm) override;
571 bool ParseDirective(AsmToken DirectiveID) override;
572 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
573 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
574 SMLoc NameLoc, OperandVector &Operands) override;
575
Sam Kolton11de3702016-05-24 12:38:33 +0000576 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
578 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000579 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000580 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000582 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000583 OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000584
Sam Kolton1bdcef72016-05-23 09:59:02 +0000585 OperandMatchResultTy parseImm(OperandVector &Operands);
586 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
587 OperandMatchResultTy parseRegOrImmWithInputMods(OperandVector &Operands);
588
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
590 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000591
592 bool parseCnt(int64_t &IntVal);
593 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000594 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000595
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000596private:
597 struct OperandInfoTy {
598 int64_t Id;
599 bool IsSymbolic;
600 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
601 };
Sam Kolton11de3702016-05-24 12:38:33 +0000602
Artem Tamazov6edc1352016-05-26 17:00:33 +0000603 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
604 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000605public:
Sam Kolton11de3702016-05-24 12:38:33 +0000606 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
607
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000608 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000609 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000610 AMDGPUOperand::Ptr defaultHwreg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000611
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000612 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
613 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
614 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000615 AMDGPUOperand::Ptr defaultMubufOffset() const;
616 AMDGPUOperand::Ptr defaultGLC() const;
617 AMDGPUOperand::Ptr defaultSLC() const;
618 AMDGPUOperand::Ptr defaultTFE() const;
619
Sam Kolton5f10a132016-05-06 11:31:17 +0000620 AMDGPUOperand::Ptr defaultDMask() const;
621 AMDGPUOperand::Ptr defaultUNorm() const;
622 AMDGPUOperand::Ptr defaultDA() const;
623 AMDGPUOperand::Ptr defaultR128() const;
624 AMDGPUOperand::Ptr defaultLWE() const;
625 AMDGPUOperand::Ptr defaultSMRDOffset() const;
626 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
627
628 AMDGPUOperand::Ptr defaultClampSI() const;
629 AMDGPUOperand::Ptr defaultOModSI() const;
630
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000631 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
632
Tom Stellarda90b9522016-02-11 03:28:15 +0000633 void cvtId(MCInst &Inst, const OperandVector &Operands);
634 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000635 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000636
637 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000638 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000639
Sam Kolton11de3702016-05-24 12:38:33 +0000640 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000641 AMDGPUOperand::Ptr defaultRowMask() const;
642 AMDGPUOperand::Ptr defaultBankMask() const;
643 AMDGPUOperand::Ptr defaultBoundCtrl() const;
644 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000645
646 OperandMatchResultTy parseSDWASel(OperandVector &Operands);
647 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000648 AMDGPUOperand::Ptr defaultSDWASel() const;
649 AMDGPUOperand::Ptr defaultSDWADstUnused() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000650};
651
652struct OptionalOperand {
653 const char *Name;
654 AMDGPUOperand::ImmTy Type;
655 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656 bool (*ConvertResult)(int64_t&);
657};
658
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000659}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000660
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000661static int getRegClass(RegisterKind Is, unsigned RegWidth) {
662 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000663 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000664 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000665 case 1: return AMDGPU::VGPR_32RegClassID;
666 case 2: return AMDGPU::VReg_64RegClassID;
667 case 3: return AMDGPU::VReg_96RegClassID;
668 case 4: return AMDGPU::VReg_128RegClassID;
669 case 8: return AMDGPU::VReg_256RegClassID;
670 case 16: return AMDGPU::VReg_512RegClassID;
671 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000672 } else if (Is == IS_TTMP) {
673 switch (RegWidth) {
674 default: return -1;
675 case 1: return AMDGPU::TTMP_32RegClassID;
676 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000677 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000678 }
679 } else if (Is == IS_SGPR) {
680 switch (RegWidth) {
681 default: return -1;
682 case 1: return AMDGPU::SGPR_32RegClassID;
683 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000684 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000685 case 8: return AMDGPU::SReg_256RegClassID;
686 case 16: return AMDGPU::SReg_512RegClassID;
687 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000688 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000689 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000690}
691
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000692static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693 return StringSwitch<unsigned>(RegName)
694 .Case("exec", AMDGPU::EXEC)
695 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000696 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697 .Case("m0", AMDGPU::M0)
698 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000699 .Case("tba", AMDGPU::TBA)
700 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000701 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
702 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000703 .Case("vcc_lo", AMDGPU::VCC_LO)
704 .Case("vcc_hi", AMDGPU::VCC_HI)
705 .Case("exec_lo", AMDGPU::EXEC_LO)
706 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000707 .Case("tma_lo", AMDGPU::TMA_LO)
708 .Case("tma_hi", AMDGPU::TMA_HI)
709 .Case("tba_lo", AMDGPU::TBA_LO)
710 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711 .Default(0);
712}
713
714bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000715 auto R = parseRegister();
716 if (!R) return true;
717 assert(R->isReg());
718 RegNo = R->getReg();
719 StartLoc = R->getStartLoc();
720 EndLoc = R->getEndLoc();
721 return false;
722}
723
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000724bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
725{
726 switch (RegKind) {
727 case IS_SPECIAL:
728 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
729 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
730 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
731 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
732 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
733 return false;
734 case IS_VGPR:
735 case IS_SGPR:
736 case IS_TTMP:
737 if (Reg1 != Reg + RegWidth) { return false; }
738 RegWidth++;
739 return true;
740 default:
741 assert(false); return false;
742 }
743}
744
745bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
746{
747 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
748 if (getLexer().is(AsmToken::Identifier)) {
749 StringRef RegName = Parser.getTok().getString();
750 if ((Reg = getSpecialRegForName(RegName))) {
751 Parser.Lex();
752 RegKind = IS_SPECIAL;
753 } else {
754 unsigned RegNumIndex = 0;
755 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
756 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
757 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
758 else { return false; }
759 if (RegName.size() > RegNumIndex) {
760 // Single 32-bit register: vXX.
761 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
762 Parser.Lex();
763 RegWidth = 1;
764 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +0000765 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000766 Parser.Lex();
767 int64_t RegLo, RegHi;
768 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
769 Parser.Lex();
770
771 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
772
Artem Tamazov7da9b822016-05-27 12:50:13 +0000773 const bool isRBrace = getLexer().is(AsmToken::RBrac);
774 if (!isRBrace && getLexer().isNot(AsmToken::Colon)) { return false; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000775 Parser.Lex();
776
Artem Tamazov7da9b822016-05-27 12:50:13 +0000777 if (isRBrace) {
778 RegHi = RegLo;
779 } else {
780 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000781
Artem Tamazov7da9b822016-05-27 12:50:13 +0000782 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
783 Parser.Lex();
784 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000785 RegNum = (unsigned) RegLo;
786 RegWidth = (RegHi - RegLo) + 1;
787 }
788 }
789 } else if (getLexer().is(AsmToken::LBrac)) {
790 // List of consecutive registers: [s0,s1,s2,s3]
791 Parser.Lex();
792 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
793 if (RegWidth != 1) { return false; }
794 RegisterKind RegKind1;
795 unsigned Reg1, RegNum1, RegWidth1;
796 do {
797 if (getLexer().is(AsmToken::Comma)) {
798 Parser.Lex();
799 } else if (getLexer().is(AsmToken::RBrac)) {
800 Parser.Lex();
801 break;
802 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
803 if (RegWidth1 != 1) { return false; }
804 if (RegKind1 != RegKind) { return false; }
805 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
806 } else {
807 return false;
808 }
809 } while (true);
810 } else {
811 return false;
812 }
813 switch (RegKind) {
814 case IS_SPECIAL:
815 RegNum = 0;
816 RegWidth = 1;
817 break;
818 case IS_VGPR:
819 case IS_SGPR:
820 case IS_TTMP:
821 {
822 unsigned Size = 1;
823 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
824 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
825 Size = std::min(RegWidth, 4u);
826 }
827 if (RegNum % Size != 0) { return false; }
828 RegNum = RegNum / Size;
829 int RCID = getRegClass(RegKind, RegWidth);
830 if (RCID == -1) { return false; }
831 const MCRegisterClass RC = TRI->getRegClass(RCID);
832 if (RegNum >= RC.getNumRegs()) { return false; }
833 Reg = RC.getRegister(RegNum);
834 break;
835 }
836
837 default:
838 assert(false); return false;
839 }
840
841 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
842 return true;
843}
844
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000845std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000846 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000847 SMLoc StartLoc = Tok.getLoc();
848 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000849 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
850
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000851 RegisterKind RegKind;
852 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000853
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000854 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
855 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000856 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000857 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000858 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000859}
860
Sam Kolton1bdcef72016-05-23 09:59:02 +0000861AMDGPUAsmParser::OperandMatchResultTy
862AMDGPUAsmParser::parseImm(OperandVector &Operands) {
863 bool Minus = false;
864 if (getLexer().getKind() == AsmToken::Minus) {
865 Minus = true;
866 Parser.Lex();
867 }
868
869 SMLoc S = Parser.getTok().getLoc();
870 switch(getLexer().getKind()) {
871 case AsmToken::Integer: {
872 int64_t IntVal;
873 if (getParser().parseAbsoluteExpression(IntVal))
874 return MatchOperand_ParseFail;
875 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
876 Error(S, "invalid immediate: only 32-bit values are legal");
877 return MatchOperand_ParseFail;
878 }
879
880 if (Minus)
881 IntVal *= -1;
882 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
883 return MatchOperand_Success;
884 }
885 case AsmToken::Real: {
886 // FIXME: We should emit an error if a double precisions floating-point
887 // value is used. I'm not sure the best way to detect this.
888 int64_t IntVal;
889 if (getParser().parseAbsoluteExpression(IntVal))
890 return MatchOperand_ParseFail;
891
892 APFloat F((float)BitsToDouble(IntVal));
893 if (Minus)
894 F.changeSign();
895 Operands.push_back(
896 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
897 AMDGPUOperand::ImmTyNone, true));
898 return MatchOperand_Success;
899 }
900 default:
901 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
902 }
903}
904
905AMDGPUAsmParser::OperandMatchResultTy
906AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
907 auto res = parseImm(Operands);
908 if (res != MatchOperand_NoMatch) {
909 return res;
910 }
911
912 if (auto R = parseRegister()) {
913 assert(R->isReg());
914 R->Reg.IsForcedVOP3 = isForcedVOP3();
915 Operands.push_back(std::move(R));
916 return MatchOperand_Success;
917 }
918 return MatchOperand_ParseFail;
919}
920
921AMDGPUAsmParser::OperandMatchResultTy
922AMDGPUAsmParser::parseRegOrImmWithInputMods(OperandVector &Operands) {
923 // XXX: During parsing we can't determine if minus sign means
924 // negate-modifier or negative immediate value.
925 // By default we suppose it is modifier.
926 bool Negate = false, Abs = false, Abs2 = false;
927
928 if (getLexer().getKind()== AsmToken::Minus) {
929 Parser.Lex();
930 Negate = true;
931 }
932
933 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
934 Parser.Lex();
935 Abs2 = true;
936 if (getLexer().isNot(AsmToken::LParen)) {
937 Error(Parser.getTok().getLoc(), "expected left paren after abs");
938 return MatchOperand_ParseFail;
939 }
940 Parser.Lex();
941 }
942
943 if (getLexer().getKind() == AsmToken::Pipe) {
944 if (Abs2) {
945 Error(Parser.getTok().getLoc(), "expected register or immediate");
946 return MatchOperand_ParseFail;
947 }
948 Parser.Lex();
949 Abs = true;
950 }
951
952 auto Res = parseRegOrImm(Operands);
953 if (Res != MatchOperand_Success) {
954 return Res;
955 }
956
957 unsigned Modifiers = 0;
958 if (Negate) {
959 Modifiers |= 0x1;
960 }
961 if (Abs) {
962 if (getLexer().getKind() != AsmToken::Pipe) {
963 Error(Parser.getTok().getLoc(), "expected vertical bar");
964 return MatchOperand_ParseFail;
965 }
966 Parser.Lex();
967 Modifiers |= 0x2;
968 }
969 if (Abs2) {
970 if (getLexer().isNot(AsmToken::RParen)) {
971 Error(Parser.getTok().getLoc(), "expected closing parentheses");
972 return MatchOperand_ParseFail;
973 }
974 Parser.Lex();
975 Modifiers |= 0x2;
976 }
977
978 if (Modifiers) {
979 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
980 Op.setModifiers(Modifiers);
981 }
982 return MatchOperand_Success;
983}
984
985
Tom Stellard45bb48e2015-06-13 03:28:10 +0000986unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
987
988 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
989
990 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
991 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
992 return Match_InvalidOperand;
993
Tom Stellard88e0b252015-10-06 15:57:53 +0000994 if ((TSFlags & SIInstrFlags::VOP3) &&
995 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
996 getForcedEncodingSize() != 64)
997 return Match_PreferE32;
998
Tom Stellard45bb48e2015-06-13 03:28:10 +0000999 return Match_Success;
1000}
1001
Tom Stellard45bb48e2015-06-13 03:28:10 +00001002bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1003 OperandVector &Operands,
1004 MCStreamer &Out,
1005 uint64_t &ErrorInfo,
1006 bool MatchingInlineAsm) {
1007 MCInst Inst;
1008
Ranjeet Singh86ecbb72015-06-30 12:32:53 +00001009 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001010 default: break;
1011 case Match_Success:
1012 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001013 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001014 return false;
1015 case Match_MissingFeature:
1016 return Error(IDLoc, "instruction not supported on this GPU");
1017
1018 case Match_MnemonicFail:
1019 return Error(IDLoc, "unrecognized instruction mnemonic");
1020
1021 case Match_InvalidOperand: {
1022 SMLoc ErrorLoc = IDLoc;
1023 if (ErrorInfo != ~0ULL) {
1024 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001025 return Error(IDLoc, "too few operands for instruction");
1026 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001027 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1028 if (ErrorLoc == SMLoc())
1029 ErrorLoc = IDLoc;
1030 }
1031 return Error(ErrorLoc, "invalid operand for instruction");
1032 }
Tom Stellard88e0b252015-10-06 15:57:53 +00001033 case Match_PreferE32:
1034 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1035 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001036 }
1037 llvm_unreachable("Implement any new match types added!");
1038}
1039
Tom Stellard347ac792015-06-26 21:15:07 +00001040bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1041 uint32_t &Minor) {
1042 if (getLexer().isNot(AsmToken::Integer))
1043 return TokError("invalid major version");
1044
1045 Major = getLexer().getTok().getIntVal();
1046 Lex();
1047
1048 if (getLexer().isNot(AsmToken::Comma))
1049 return TokError("minor version number required, comma expected");
1050 Lex();
1051
1052 if (getLexer().isNot(AsmToken::Integer))
1053 return TokError("invalid minor version");
1054
1055 Minor = getLexer().getTok().getIntVal();
1056 Lex();
1057
1058 return false;
1059}
1060
1061bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1062
1063 uint32_t Major;
1064 uint32_t Minor;
1065
1066 if (ParseDirectiveMajorMinor(Major, Minor))
1067 return true;
1068
1069 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1070 return false;
1071}
1072
1073bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1074
1075 uint32_t Major;
1076 uint32_t Minor;
1077 uint32_t Stepping;
1078 StringRef VendorName;
1079 StringRef ArchName;
1080
1081 // If this directive has no arguments, then use the ISA version for the
1082 // targeted GPU.
1083 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001084 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001085 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1086 Isa.Stepping,
1087 "AMD", "AMDGPU");
1088 return false;
1089 }
1090
1091
1092 if (ParseDirectiveMajorMinor(Major, Minor))
1093 return true;
1094
1095 if (getLexer().isNot(AsmToken::Comma))
1096 return TokError("stepping version number required, comma expected");
1097 Lex();
1098
1099 if (getLexer().isNot(AsmToken::Integer))
1100 return TokError("invalid stepping version");
1101
1102 Stepping = getLexer().getTok().getIntVal();
1103 Lex();
1104
1105 if (getLexer().isNot(AsmToken::Comma))
1106 return TokError("vendor name required, comma expected");
1107 Lex();
1108
1109 if (getLexer().isNot(AsmToken::String))
1110 return TokError("invalid vendor name");
1111
1112 VendorName = getLexer().getTok().getStringContents();
1113 Lex();
1114
1115 if (getLexer().isNot(AsmToken::Comma))
1116 return TokError("arch name required, comma expected");
1117 Lex();
1118
1119 if (getLexer().isNot(AsmToken::String))
1120 return TokError("invalid arch name");
1121
1122 ArchName = getLexer().getTok().getStringContents();
1123 Lex();
1124
1125 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1126 VendorName, ArchName);
1127 return false;
1128}
1129
Tom Stellardff7416b2015-06-26 21:58:31 +00001130bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1131 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001132 SmallString<40> ErrStr;
1133 raw_svector_ostream Err(ErrStr);
1134 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
1135 return TokError(Err.str());
1136 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001137 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001138 return false;
1139}
1140
1141bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1142
1143 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001144 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001145
1146 while (true) {
1147
1148 if (getLexer().isNot(AsmToken::EndOfStatement))
1149 return TokError("amd_kernel_code_t values must begin on a new line");
1150
1151 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1152 // will set the current token to EndOfStatement.
1153 while(getLexer().is(AsmToken::EndOfStatement))
1154 Lex();
1155
1156 if (getLexer().isNot(AsmToken::Identifier))
1157 return TokError("expected value identifier or .end_amd_kernel_code_t");
1158
1159 StringRef ID = getLexer().getTok().getIdentifier();
1160 Lex();
1161
1162 if (ID == ".end_amd_kernel_code_t")
1163 break;
1164
1165 if (ParseAMDKernelCodeTValue(ID, Header))
1166 return true;
1167 }
1168
1169 getTargetStreamer().EmitAMDKernelCodeT(Header);
1170
1171 return false;
1172}
1173
Tom Stellarde135ffd2015-09-25 21:41:28 +00001174bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1175 getParser().getStreamer().SwitchSection(
1176 AMDGPU::getHSATextSection(getContext()));
1177 return false;
1178}
1179
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001180bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1181 if (getLexer().isNot(AsmToken::Identifier))
1182 return TokError("expected symbol name");
1183
1184 StringRef KernelName = Parser.getTok().getString();
1185
1186 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1187 ELF::STT_AMDGPU_HSA_KERNEL);
1188 Lex();
1189 return false;
1190}
1191
Tom Stellard00f2f912015-12-02 19:47:57 +00001192bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1193 if (getLexer().isNot(AsmToken::Identifier))
1194 return TokError("expected symbol name");
1195
1196 StringRef GlobalName = Parser.getTok().getIdentifier();
1197
1198 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1199 Lex();
1200 return false;
1201}
1202
1203bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1204 if (getLexer().isNot(AsmToken::Identifier))
1205 return TokError("expected symbol name");
1206
1207 StringRef GlobalName = Parser.getTok().getIdentifier();
1208
1209 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1210 Lex();
1211 return false;
1212}
1213
1214bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1215 getParser().getStreamer().SwitchSection(
1216 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1217 return false;
1218}
1219
1220bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1221 getParser().getStreamer().SwitchSection(
1222 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1223 return false;
1224}
1225
Tom Stellard9760f032015-12-03 03:34:32 +00001226bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1227 getParser().getStreamer().SwitchSection(
1228 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1229 return false;
1230}
1231
Tom Stellard45bb48e2015-06-13 03:28:10 +00001232bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001233 StringRef IDVal = DirectiveID.getString();
1234
1235 if (IDVal == ".hsa_code_object_version")
1236 return ParseDirectiveHSACodeObjectVersion();
1237
1238 if (IDVal == ".hsa_code_object_isa")
1239 return ParseDirectiveHSACodeObjectISA();
1240
Tom Stellardff7416b2015-06-26 21:58:31 +00001241 if (IDVal == ".amd_kernel_code_t")
1242 return ParseDirectiveAMDKernelCodeT();
1243
Tom Stellardfcfaea42016-05-05 17:03:33 +00001244 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001245 return ParseSectionDirectiveHSAText();
1246
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001247 if (IDVal == ".amdgpu_hsa_kernel")
1248 return ParseDirectiveAMDGPUHsaKernel();
1249
Tom Stellard00f2f912015-12-02 19:47:57 +00001250 if (IDVal == ".amdgpu_hsa_module_global")
1251 return ParseDirectiveAMDGPUHsaModuleGlobal();
1252
1253 if (IDVal == ".amdgpu_hsa_program_global")
1254 return ParseDirectiveAMDGPUHsaProgramGlobal();
1255
1256 if (IDVal == ".hsadata_global_agent")
1257 return ParseSectionDirectiveHSADataGlobalAgent();
1258
1259 if (IDVal == ".hsadata_global_program")
1260 return ParseSectionDirectiveHSADataGlobalProgram();
1261
Tom Stellard9760f032015-12-03 03:34:32 +00001262 if (IDVal == ".hsarodata_readonly_agent")
1263 return ParseSectionDirectiveHSARodataReadonlyAgent();
1264
Tom Stellard45bb48e2015-06-13 03:28:10 +00001265 return true;
1266}
1267
Matt Arsenault68802d32015-11-05 03:11:27 +00001268bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1269 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001270 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001271 return true;
1272
Matt Arsenault3b159672015-12-01 20:31:08 +00001273 if (isSI()) {
1274 // No flat_scr
1275 switch (RegNo) {
1276 case AMDGPU::FLAT_SCR:
1277 case AMDGPU::FLAT_SCR_LO:
1278 case AMDGPU::FLAT_SCR_HI:
1279 return false;
1280 default:
1281 return true;
1282 }
1283 }
1284
Matt Arsenault68802d32015-11-05 03:11:27 +00001285 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1286 // SI/CI have.
1287 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1288 R.isValid(); ++R) {
1289 if (*R == RegNo)
1290 return false;
1291 }
1292
1293 return true;
1294}
1295
Tom Stellard45bb48e2015-06-13 03:28:10 +00001296AMDGPUAsmParser::OperandMatchResultTy
1297AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1298
1299 // Try to parse with a custom parser
1300 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1301
1302 // If we successfully parsed the operand or if there as an error parsing,
1303 // we are done.
1304 //
1305 // If we are parsing after we reach EndOfStatement then this means we
1306 // are appending default values to the Operands list. This is only done
1307 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001308 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001309 getLexer().is(AsmToken::EndOfStatement))
1310 return ResTy;
1311
Sam Kolton1bdcef72016-05-23 09:59:02 +00001312 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001313
Sam Kolton1bdcef72016-05-23 09:59:02 +00001314 if (ResTy == MatchOperand_Success)
1315 return ResTy;
1316
1317 if (getLexer().getKind() == AsmToken::Identifier) {
1318 const auto &Tok = Parser.getTok();
1319 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001320 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001321 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001322 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001323 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001324}
1325
1326bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1327 StringRef Name,
1328 SMLoc NameLoc, OperandVector &Operands) {
1329
1330 // Clear any forced encodings from the previous instruction.
1331 setForcedEncodingSize(0);
1332
1333 if (Name.endswith("_e64"))
1334 setForcedEncodingSize(64);
1335 else if (Name.endswith("_e32"))
1336 setForcedEncodingSize(32);
1337
1338 // Add the instruction mnemonic
1339 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1340
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001341
1342 if (Name.endswith("_e64")) { Name = Name.substr(0, Name.size() - 4); }
1343 if (Name.endswith("_e32")) { Name = Name.substr(0, Name.size() - 4); }
1344
Tom Stellard45bb48e2015-06-13 03:28:10 +00001345 while (!getLexer().is(AsmToken::EndOfStatement)) {
1346 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1347
1348 // Eat the comma or space if there is one.
1349 if (getLexer().is(AsmToken::Comma))
1350 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001351
Tom Stellard45bb48e2015-06-13 03:28:10 +00001352 switch (Res) {
1353 case MatchOperand_Success: break;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001354 case MatchOperand_ParseFail:
1355 Error(getLexer().getLoc(), "failed parsing operand.");
1356 while (!getLexer().is(AsmToken::EndOfStatement)) {
1357 Parser.Lex();
1358 }
1359 return true;
1360 case MatchOperand_NoMatch:
1361 Error(getLexer().getLoc(), "not a valid operand.");
1362 while (!getLexer().is(AsmToken::EndOfStatement)) {
1363 Parser.Lex();
1364 }
1365 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001366 }
1367 }
1368
Tom Stellard45bb48e2015-06-13 03:28:10 +00001369 return false;
1370}
1371
1372//===----------------------------------------------------------------------===//
1373// Utility functions
1374//===----------------------------------------------------------------------===//
1375
1376AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001377AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001378 switch(getLexer().getKind()) {
1379 default: return MatchOperand_NoMatch;
1380 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001381 StringRef Name = Parser.getTok().getString();
1382 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001383 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001384 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001385
1386 Parser.Lex();
1387 if (getLexer().isNot(AsmToken::Colon))
1388 return MatchOperand_ParseFail;
1389
1390 Parser.Lex();
1391 if (getLexer().isNot(AsmToken::Integer))
1392 return MatchOperand_ParseFail;
1393
1394 if (getParser().parseAbsoluteExpression(Int))
1395 return MatchOperand_ParseFail;
1396 break;
1397 }
1398 }
1399 return MatchOperand_Success;
1400}
1401
1402AMDGPUAsmParser::OperandMatchResultTy
1403AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001404 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001405 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001406
1407 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001408 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001409
Sam Kolton11de3702016-05-24 12:38:33 +00001410 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001411 if (Res != MatchOperand_Success)
1412 return Res;
1413
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001414 if (ConvertResult && !ConvertResult(Value)) {
1415 return MatchOperand_ParseFail;
1416 }
1417
1418 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001419 return MatchOperand_Success;
1420}
1421
1422AMDGPUAsmParser::OperandMatchResultTy
1423AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001424 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001425 int64_t Bit = 0;
1426 SMLoc S = Parser.getTok().getLoc();
1427
1428 // We are at the end of the statement, and this is a default argument, so
1429 // use a default value.
1430 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1431 switch(getLexer().getKind()) {
1432 case AsmToken::Identifier: {
1433 StringRef Tok = Parser.getTok().getString();
1434 if (Tok == Name) {
1435 Bit = 1;
1436 Parser.Lex();
1437 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1438 Bit = 0;
1439 Parser.Lex();
1440 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001441 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001442 }
1443 break;
1444 }
1445 default:
1446 return MatchOperand_NoMatch;
1447 }
1448 }
1449
1450 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1451 return MatchOperand_Success;
1452}
1453
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001454typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1455
Sam Koltona74cd522016-03-18 15:35:51 +00001456void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1457 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001458 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001459 auto i = OptionalIdx.find(ImmT);
1460 if (i != OptionalIdx.end()) {
1461 unsigned Idx = i->second;
1462 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1463 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001464 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001465 }
1466}
1467
Sam Kolton3025e7f2016-04-26 13:33:56 +00001468AMDGPUAsmParser::OperandMatchResultTy
1469AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1470 if (getLexer().isNot(AsmToken::Identifier)) {
1471 return MatchOperand_NoMatch;
1472 }
1473 StringRef Tok = Parser.getTok().getString();
1474 if (Tok != Prefix) {
1475 return MatchOperand_NoMatch;
1476 }
1477
1478 Parser.Lex();
1479 if (getLexer().isNot(AsmToken::Colon)) {
1480 return MatchOperand_ParseFail;
1481 }
1482
1483 Parser.Lex();
1484 if (getLexer().isNot(AsmToken::Identifier)) {
1485 return MatchOperand_ParseFail;
1486 }
1487
1488 Value = Parser.getTok().getString();
1489 return MatchOperand_Success;
1490}
1491
Tom Stellard45bb48e2015-06-13 03:28:10 +00001492//===----------------------------------------------------------------------===//
1493// ds
1494//===----------------------------------------------------------------------===//
1495
Tom Stellard45bb48e2015-06-13 03:28:10 +00001496void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1497 const OperandVector &Operands) {
1498
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001499 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001500
1501 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1502 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1503
1504 // Add the register arguments
1505 if (Op.isReg()) {
1506 Op.addRegOperands(Inst, 1);
1507 continue;
1508 }
1509
1510 // Handle optional arguments
1511 OptionalIdx[Op.getImmTy()] = i;
1512 }
1513
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001514 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1515 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001516 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001517
Tom Stellard45bb48e2015-06-13 03:28:10 +00001518 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1519}
1520
1521void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1522
1523 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1524 bool GDSOnly = false;
1525
1526 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1527 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1528
1529 // Add the register arguments
1530 if (Op.isReg()) {
1531 Op.addRegOperands(Inst, 1);
1532 continue;
1533 }
1534
1535 if (Op.isToken() && Op.getToken() == "gds") {
1536 GDSOnly = true;
1537 continue;
1538 }
1539
1540 // Handle optional arguments
1541 OptionalIdx[Op.getImmTy()] = i;
1542 }
1543
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001544 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1545 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001546
1547 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001548 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001549 }
1550 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1551}
1552
1553
1554//===----------------------------------------------------------------------===//
1555// s_waitcnt
1556//===----------------------------------------------------------------------===//
1557
1558bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1559 StringRef CntName = Parser.getTok().getString();
1560 int64_t CntVal;
1561
1562 Parser.Lex();
1563 if (getLexer().isNot(AsmToken::LParen))
1564 return true;
1565
1566 Parser.Lex();
1567 if (getLexer().isNot(AsmToken::Integer))
1568 return true;
1569
1570 if (getParser().parseAbsoluteExpression(CntVal))
1571 return true;
1572
1573 if (getLexer().isNot(AsmToken::RParen))
1574 return true;
1575
1576 Parser.Lex();
1577 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1578 Parser.Lex();
1579
1580 int CntShift;
1581 int CntMask;
1582
1583 if (CntName == "vmcnt") {
1584 CntMask = 0xf;
1585 CntShift = 0;
1586 } else if (CntName == "expcnt") {
1587 CntMask = 0x7;
1588 CntShift = 4;
1589 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001590 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001591 CntShift = 8;
1592 } else {
1593 return true;
1594 }
1595
1596 IntVal &= ~(CntMask << CntShift);
1597 IntVal |= (CntVal << CntShift);
1598 return false;
1599}
1600
1601AMDGPUAsmParser::OperandMatchResultTy
1602AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1603 // Disable all counters by default.
1604 // vmcnt [3:0]
1605 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001606 // lgkmcnt [11:8]
1607 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001608 SMLoc S = Parser.getTok().getLoc();
1609
1610 switch(getLexer().getKind()) {
1611 default: return MatchOperand_ParseFail;
1612 case AsmToken::Integer:
1613 // The operand can be an integer value.
1614 if (getParser().parseAbsoluteExpression(CntVal))
1615 return MatchOperand_ParseFail;
1616 break;
1617
1618 case AsmToken::Identifier:
1619 do {
1620 if (parseCnt(CntVal))
1621 return MatchOperand_ParseFail;
1622 } while(getLexer().isNot(AsmToken::EndOfStatement));
1623 break;
1624 }
1625 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1626 return MatchOperand_Success;
1627}
1628
Artem Tamazov6edc1352016-05-26 17:00:33 +00001629bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
1630 using namespace llvm::AMDGPU::Hwreg;
1631
Artem Tamazovd6468662016-04-25 14:13:51 +00001632 if (Parser.getTok().getString() != "hwreg")
1633 return true;
1634 Parser.Lex();
1635
1636 if (getLexer().isNot(AsmToken::LParen))
1637 return true;
1638 Parser.Lex();
1639
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001640 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001641 HwReg.IsSymbolic = true;
1642 HwReg.Id = ID_UNKNOWN_;
1643 const StringRef tok = Parser.getTok().getString();
1644 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
1645 if (tok == IdSymbolic[i]) {
1646 HwReg.Id = i;
1647 break;
1648 }
1649 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001650 Parser.Lex();
1651 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001652 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001653 if (getLexer().isNot(AsmToken::Integer))
1654 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001655 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001656 return true;
1657 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001658
1659 if (getLexer().is(AsmToken::RParen)) {
1660 Parser.Lex();
1661 return false;
1662 }
1663
1664 // optional params
1665 if (getLexer().isNot(AsmToken::Comma))
1666 return true;
1667 Parser.Lex();
1668
1669 if (getLexer().isNot(AsmToken::Integer))
1670 return true;
1671 if (getParser().parseAbsoluteExpression(Offset))
1672 return true;
1673
1674 if (getLexer().isNot(AsmToken::Comma))
1675 return true;
1676 Parser.Lex();
1677
1678 if (getLexer().isNot(AsmToken::Integer))
1679 return true;
1680 if (getParser().parseAbsoluteExpression(Width))
1681 return true;
1682
1683 if (getLexer().isNot(AsmToken::RParen))
1684 return true;
1685 Parser.Lex();
1686
1687 return false;
1688}
1689
1690AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001691AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001692 using namespace llvm::AMDGPU::Hwreg;
1693
Artem Tamazovd6468662016-04-25 14:13:51 +00001694 int64_t Imm16Val = 0;
1695 SMLoc S = Parser.getTok().getLoc();
1696
1697 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00001698 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00001699 case AsmToken::Integer:
1700 // The operand can be an integer value.
1701 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00001702 return MatchOperand_NoMatch;
1703 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001704 Error(S, "invalid immediate: only 16-bit values are legal");
1705 // Do not return error code, but create an imm operand anyway and proceed
1706 // to the next operand, if any. That avoids unneccessary error messages.
1707 }
1708 break;
1709
1710 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001711 OperandInfoTy HwReg(ID_UNKNOWN_);
1712 int64_t Offset = OFFSET_DEFAULT_;
1713 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
1714 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00001715 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001716 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
1717 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001718 Error(S, "invalid symbolic name of hardware register");
1719 else
1720 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001721 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00001722 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00001723 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00001724 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00001725 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00001726 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00001727 }
1728 break;
1729 }
1730 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1731 return MatchOperand_Success;
1732}
1733
Tom Stellard45bb48e2015-06-13 03:28:10 +00001734bool AMDGPUOperand::isSWaitCnt() const {
1735 return isImm();
1736}
1737
Artem Tamazovd6468662016-04-25 14:13:51 +00001738bool AMDGPUOperand::isHwreg() const {
1739 return isImmTy(ImmTyHwreg);
1740}
1741
Sam Kolton5f10a132016-05-06 11:31:17 +00001742AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
1743 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
1744}
1745
Artem Tamazov6edc1352016-05-26 17:00:33 +00001746bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001747 using namespace llvm::AMDGPU::SendMsg;
1748
1749 if (Parser.getTok().getString() != "sendmsg")
1750 return true;
1751 Parser.Lex();
1752
1753 if (getLexer().isNot(AsmToken::LParen))
1754 return true;
1755 Parser.Lex();
1756
1757 if (getLexer().is(AsmToken::Identifier)) {
1758 Msg.IsSymbolic = true;
1759 Msg.Id = ID_UNKNOWN_;
1760 const std::string tok = Parser.getTok().getString();
1761 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1762 switch(i) {
1763 default: continue; // Omit gaps.
1764 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
1765 }
1766 if (tok == IdSymbolic[i]) {
1767 Msg.Id = i;
1768 break;
1769 }
1770 }
1771 Parser.Lex();
1772 } else {
1773 Msg.IsSymbolic = false;
1774 if (getLexer().isNot(AsmToken::Integer))
1775 return true;
1776 if (getParser().parseAbsoluteExpression(Msg.Id))
1777 return true;
1778 if (getLexer().is(AsmToken::Integer))
1779 if (getParser().parseAbsoluteExpression(Msg.Id))
1780 Msg.Id = ID_UNKNOWN_;
1781 }
1782 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1783 return false;
1784
1785 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1786 if (getLexer().isNot(AsmToken::RParen))
1787 return true;
1788 Parser.Lex();
1789 return false;
1790 }
1791
1792 if (getLexer().isNot(AsmToken::Comma))
1793 return true;
1794 Parser.Lex();
1795
1796 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1797 Operation.Id = ID_UNKNOWN_;
1798 if (getLexer().is(AsmToken::Identifier)) {
1799 Operation.IsSymbolic = true;
1800 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1801 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1802 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001803 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001804 for (int i = F; i < L; ++i) {
1805 if (Tok == S[i]) {
1806 Operation.Id = i;
1807 break;
1808 }
1809 }
1810 Parser.Lex();
1811 } else {
1812 Operation.IsSymbolic = false;
1813 if (getLexer().isNot(AsmToken::Integer))
1814 return true;
1815 if (getParser().parseAbsoluteExpression(Operation.Id))
1816 return true;
1817 }
1818
1819 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1820 // Stream id is optional.
1821 if (getLexer().is(AsmToken::RParen)) {
1822 Parser.Lex();
1823 return false;
1824 }
1825
1826 if (getLexer().isNot(AsmToken::Comma))
1827 return true;
1828 Parser.Lex();
1829
1830 if (getLexer().isNot(AsmToken::Integer))
1831 return true;
1832 if (getParser().parseAbsoluteExpression(StreamId))
1833 return true;
1834 }
1835
1836 if (getLexer().isNot(AsmToken::RParen))
1837 return true;
1838 Parser.Lex();
1839 return false;
1840}
1841
1842AMDGPUAsmParser::OperandMatchResultTy
1843AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
1844 using namespace llvm::AMDGPU::SendMsg;
1845
1846 int64_t Imm16Val = 0;
1847 SMLoc S = Parser.getTok().getLoc();
1848
1849 switch(getLexer().getKind()) {
1850 default:
1851 return MatchOperand_NoMatch;
1852 case AsmToken::Integer:
1853 // The operand can be an integer value.
1854 if (getParser().parseAbsoluteExpression(Imm16Val))
1855 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001856 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001857 Error(S, "invalid immediate: only 16-bit values are legal");
1858 // Do not return error code, but create an imm operand anyway and proceed
1859 // to the next operand, if any. That avoids unneccessary error messages.
1860 }
1861 break;
1862 case AsmToken::Identifier: {
1863 OperandInfoTy Msg(ID_UNKNOWN_);
1864 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00001865 int64_t StreamId = STREAM_ID_DEFAULT_;
1866 if (parseSendMsgConstruct(Msg, Operation, StreamId))
1867 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001868 do {
1869 // Validate and encode message ID.
1870 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
1871 || Msg.Id == ID_SYSMSG)) {
1872 if (Msg.IsSymbolic)
1873 Error(S, "invalid/unsupported symbolic name of message");
1874 else
1875 Error(S, "invalid/unsupported code of message");
1876 break;
1877 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00001878 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001879 // Validate and encode operation ID.
1880 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
1881 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
1882 if (Operation.IsSymbolic)
1883 Error(S, "invalid symbolic name of GS_OP");
1884 else
1885 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
1886 break;
1887 }
1888 if (Operation.Id == OP_GS_NOP
1889 && Msg.Id != ID_GS_DONE) {
1890 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
1891 break;
1892 }
1893 Imm16Val |= (Operation.Id << OP_SHIFT_);
1894 }
1895 if (Msg.Id == ID_SYSMSG) {
1896 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
1897 if (Operation.IsSymbolic)
1898 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
1899 else
1900 Error(S, "invalid/unsupported code of SYSMSG_OP");
1901 break;
1902 }
1903 Imm16Val |= (Operation.Id << OP_SHIFT_);
1904 }
1905 // Validate and encode stream ID.
1906 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1907 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
1908 Error(S, "invalid stream id: only 2-bit values are legal");
1909 break;
1910 }
1911 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
1912 }
1913 } while (0);
1914 }
1915 break;
1916 }
1917 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
1918 return MatchOperand_Success;
1919}
1920
1921bool AMDGPUOperand::isSendMsg() const {
1922 return isImmTy(ImmTySendMsg);
1923}
1924
Tom Stellard45bb48e2015-06-13 03:28:10 +00001925//===----------------------------------------------------------------------===//
1926// sopp branch targets
1927//===----------------------------------------------------------------------===//
1928
1929AMDGPUAsmParser::OperandMatchResultTy
1930AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1931 SMLoc S = Parser.getTok().getLoc();
1932
1933 switch (getLexer().getKind()) {
1934 default: return MatchOperand_ParseFail;
1935 case AsmToken::Integer: {
1936 int64_t Imm;
1937 if (getParser().parseAbsoluteExpression(Imm))
1938 return MatchOperand_ParseFail;
1939 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1940 return MatchOperand_Success;
1941 }
1942
1943 case AsmToken::Identifier:
1944 Operands.push_back(AMDGPUOperand::CreateExpr(
1945 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1946 Parser.getTok().getString()), getContext()), S));
1947 Parser.Lex();
1948 return MatchOperand_Success;
1949 }
1950}
1951
1952//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001953// mubuf
1954//===----------------------------------------------------------------------===//
1955
Tom Stellard45bb48e2015-06-13 03:28:10 +00001956bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001957 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001958}
1959
Sam Kolton5f10a132016-05-06 11:31:17 +00001960AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
1961 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
1962}
1963
1964AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
1965 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
1966}
1967
1968AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
1969 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
1970}
1971
1972AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
1973 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
1974}
1975
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001976void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
1977 const OperandVector &Operands,
1978 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001979 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001980 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001981
1982 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1983 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1984
1985 // Add the register arguments
1986 if (Op.isReg()) {
1987 Op.addRegOperands(Inst, 1);
1988 continue;
1989 }
1990
1991 // Handle the case where soffset is an immediate
1992 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1993 Op.addImmOperands(Inst, 1);
1994 continue;
1995 }
1996
1997 // Handle tokens like 'offen' which are sometimes hard-coded into the
1998 // asm string. There are no MCInst operands for these.
1999 if (Op.isToken()) {
2000 continue;
2001 }
2002 assert(Op.isImm());
2003
2004 // Handle optional arguments
2005 OptionalIdx[Op.getImmTy()] = i;
2006 }
2007
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002008 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2009 if (IsAtomicReturn) {
2010 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2011 Inst.insert(I, *I);
2012 }
2013
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002014 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002015 if (!IsAtomic) { // glc is hard-coded.
2016 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2017 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002018 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2019 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002020}
2021
2022//===----------------------------------------------------------------------===//
2023// mimg
2024//===----------------------------------------------------------------------===//
2025
Sam Kolton1bdcef72016-05-23 09:59:02 +00002026void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2027 unsigned I = 1;
2028 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2029 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2030 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2031 }
2032
2033 OptionalImmIndexMap OptionalIdx;
2034
2035 for (unsigned E = Operands.size(); I != E; ++I) {
2036 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2037
2038 // Add the register arguments
2039 if (Op.isRegOrImm()) {
2040 Op.addRegOrImmOperands(Inst, 1);
2041 continue;
2042 } else if (Op.isImmModifier()) {
2043 OptionalIdx[Op.getImmTy()] = I;
2044 } else {
2045 assert(false);
2046 }
2047 }
2048
2049 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2050 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2051 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2052 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2053 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2054 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2055 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2056 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2057}
2058
2059void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2060 unsigned I = 1;
2061 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2062 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2063 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2064 }
2065
2066 // Add src, same as dst
2067 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2068
2069 OptionalImmIndexMap OptionalIdx;
2070
2071 for (unsigned E = Operands.size(); I != E; ++I) {
2072 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2073
2074 // Add the register arguments
2075 if (Op.isRegOrImm()) {
2076 Op.addRegOrImmOperands(Inst, 1);
2077 continue;
2078 } else if (Op.isImmModifier()) {
2079 OptionalIdx[Op.getImmTy()] = I;
2080 } else {
2081 assert(false);
2082 }
2083 }
2084
2085 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2086 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2087 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2088 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2089 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2090 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2091 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2092 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2093}
2094
Sam Kolton5f10a132016-05-06 11:31:17 +00002095AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2096 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2097}
2098
2099AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2100 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2101}
2102
2103AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2104 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2105}
2106
2107AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2108 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2109}
2110
2111AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2112 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2113}
2114
Tom Stellard45bb48e2015-06-13 03:28:10 +00002115//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002116// smrd
2117//===----------------------------------------------------------------------===//
2118
2119bool AMDGPUOperand::isSMRDOffset() const {
2120
2121 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2122 // information here.
2123 return isImm() && isUInt<8>(getImm());
2124}
2125
2126bool AMDGPUOperand::isSMRDLiteralOffset() const {
2127 // 32-bit literals are only supported on CI and we only want to use them
2128 // when the offset is > 8-bits.
2129 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2130}
2131
Sam Kolton5f10a132016-05-06 11:31:17 +00002132AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2133 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2134}
2135
2136AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2137 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2138}
2139
Tom Stellard217361c2015-08-06 19:28:38 +00002140//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002141// vop3
2142//===----------------------------------------------------------------------===//
2143
2144static bool ConvertOmodMul(int64_t &Mul) {
2145 if (Mul != 1 && Mul != 2 && Mul != 4)
2146 return false;
2147
2148 Mul >>= 1;
2149 return true;
2150}
2151
2152static bool ConvertOmodDiv(int64_t &Div) {
2153 if (Div == 1) {
2154 Div = 0;
2155 return true;
2156 }
2157
2158 if (Div == 2) {
2159 Div = 3;
2160 return true;
2161 }
2162
2163 return false;
2164}
2165
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002166static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2167 if (BoundCtrl == 0) {
2168 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002169 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002170 } else if (BoundCtrl == -1) {
2171 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002172 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002173 }
2174 return false;
2175}
2176
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002177// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002178static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2179 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2180 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2181 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2182 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2183 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2184 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2185 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2186 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2187 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2188 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2189 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2190 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2191 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2192 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2193 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2194 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2195 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2196 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2197 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2198 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
2199 {"sdwa_sel", AMDGPUOperand::ImmTySdwaSel, false, nullptr},
2200 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002201};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002202
Sam Kolton11de3702016-05-24 12:38:33 +00002203AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2204 OperandMatchResultTy res;
2205 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2206 // try to parse any optional operand here
2207 if (Op.IsBit) {
2208 res = parseNamedBit(Op.Name, Operands, Op.Type);
2209 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2210 res = parseOModOperand(Operands);
2211 } else if (Op.Type == AMDGPUOperand::ImmTySdwaSel) {
2212 res = parseSDWASel(Operands);
2213 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2214 res = parseSDWADstUnused(Operands);
2215 } else {
2216 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2217 }
2218 if (res != MatchOperand_NoMatch) {
2219 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002220 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002221 }
2222 return MatchOperand_NoMatch;
2223}
2224
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002225AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2226{
2227 StringRef Name = Parser.getTok().getString();
2228 if (Name == "mul") {
Sam Kolton11de3702016-05-24 12:38:33 +00002229 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002230 } else if (Name == "div") {
Sam Kolton11de3702016-05-24 12:38:33 +00002231 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002232 } else {
2233 return MatchOperand_NoMatch;
2234 }
2235}
2236
Sam Kolton5f10a132016-05-06 11:31:17 +00002237AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
2238 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
2239}
2240
2241AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
2242 return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
2243}
2244
Tom Stellarda90b9522016-02-11 03:28:15 +00002245void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2246 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002247 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002248 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002249 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2250 }
2251 for (unsigned E = Operands.size(); I != E; ++I)
2252 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2253}
2254
2255void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002256 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2257 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002258 cvtVOP3(Inst, Operands);
2259 } else {
2260 cvtId(Inst, Operands);
2261 }
2262}
2263
Tom Stellarda90b9522016-02-11 03:28:15 +00002264void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002265 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002266 unsigned I = 1;
2267 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002268 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002269 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002270 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002271
Tom Stellarda90b9522016-02-11 03:28:15 +00002272 for (unsigned E = Operands.size(); I != E; ++I) {
2273 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002274 if (Op.isRegOrImmWithInputMods()) {
2275 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002276 } else if (Op.isImm()) {
2277 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002278 } else {
2279 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002280 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002281 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002282
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002283 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2284 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002285}
2286
Sam Koltondfa29f72016-03-09 12:29:31 +00002287//===----------------------------------------------------------------------===//
2288// dpp
2289//===----------------------------------------------------------------------===//
2290
2291bool AMDGPUOperand::isDPPCtrl() const {
2292 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2293 if (result) {
2294 int64_t Imm = getImm();
2295 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2296 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2297 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2298 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2299 (Imm == 0x130) ||
2300 (Imm == 0x134) ||
2301 (Imm == 0x138) ||
2302 (Imm == 0x13c) ||
2303 (Imm == 0x140) ||
2304 (Imm == 0x141) ||
2305 (Imm == 0x142) ||
2306 (Imm == 0x143);
2307 }
2308 return false;
2309}
2310
Sam Koltona74cd522016-03-18 15:35:51 +00002311AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002312AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002313 SMLoc S = Parser.getTok().getLoc();
2314 StringRef Prefix;
2315 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002316
Sam Koltona74cd522016-03-18 15:35:51 +00002317 if (getLexer().getKind() == AsmToken::Identifier) {
2318 Prefix = Parser.getTok().getString();
2319 } else {
2320 return MatchOperand_NoMatch;
2321 }
2322
2323 if (Prefix == "row_mirror") {
2324 Int = 0x140;
2325 } else if (Prefix == "row_half_mirror") {
2326 Int = 0x141;
2327 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002328 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2329 if (Prefix != "quad_perm"
2330 && Prefix != "row_shl"
2331 && Prefix != "row_shr"
2332 && Prefix != "row_ror"
2333 && Prefix != "wave_shl"
2334 && Prefix != "wave_rol"
2335 && Prefix != "wave_shr"
2336 && Prefix != "wave_ror"
2337 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002338 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002339 }
2340
Sam Koltona74cd522016-03-18 15:35:51 +00002341 Parser.Lex();
2342 if (getLexer().isNot(AsmToken::Colon))
2343 return MatchOperand_ParseFail;
2344
2345 if (Prefix == "quad_perm") {
2346 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002347 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002348 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002349 return MatchOperand_ParseFail;
2350
2351 Parser.Lex();
2352 if (getLexer().isNot(AsmToken::Integer))
2353 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002354 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002355
Sam Koltona74cd522016-03-18 15:35:51 +00002356 Parser.Lex();
2357 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002358 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002359 Parser.Lex();
2360 if (getLexer().isNot(AsmToken::Integer))
2361 return MatchOperand_ParseFail;
2362 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002363
Sam Koltona74cd522016-03-18 15:35:51 +00002364 Parser.Lex();
2365 if (getLexer().isNot(AsmToken::Comma))
2366 return MatchOperand_ParseFail;
2367 Parser.Lex();
2368 if (getLexer().isNot(AsmToken::Integer))
2369 return MatchOperand_ParseFail;
2370 Int += (getLexer().getTok().getIntVal() << 4);
2371
2372 Parser.Lex();
2373 if (getLexer().isNot(AsmToken::Comma))
2374 return MatchOperand_ParseFail;
2375 Parser.Lex();
2376 if (getLexer().isNot(AsmToken::Integer))
2377 return MatchOperand_ParseFail;
2378 Int += (getLexer().getTok().getIntVal() << 6);
2379
2380 Parser.Lex();
2381 if (getLexer().isNot(AsmToken::RBrac))
2382 return MatchOperand_ParseFail;
2383
2384 } else {
2385 // sel:%d
2386 Parser.Lex();
2387 if (getLexer().isNot(AsmToken::Integer))
2388 return MatchOperand_ParseFail;
2389 Int = getLexer().getTok().getIntVal();
2390
2391 if (Prefix == "row_shl") {
2392 Int |= 0x100;
2393 } else if (Prefix == "row_shr") {
2394 Int |= 0x110;
2395 } else if (Prefix == "row_ror") {
2396 Int |= 0x120;
2397 } else if (Prefix == "wave_shl") {
2398 Int = 0x130;
2399 } else if (Prefix == "wave_rol") {
2400 Int = 0x134;
2401 } else if (Prefix == "wave_shr") {
2402 Int = 0x138;
2403 } else if (Prefix == "wave_ror") {
2404 Int = 0x13C;
2405 } else if (Prefix == "row_bcast") {
2406 if (Int == 15) {
2407 Int = 0x142;
2408 } else if (Int == 31) {
2409 Int = 0x143;
2410 }
2411 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002412 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002413 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002414 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002415 }
Sam Koltona74cd522016-03-18 15:35:51 +00002416 Parser.Lex(); // eat last token
2417
2418 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002419 AMDGPUOperand::ImmTyDppCtrl));
2420 return MatchOperand_Success;
2421}
2422
Sam Kolton5f10a132016-05-06 11:31:17 +00002423AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2424 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002425}
2426
Sam Kolton5f10a132016-05-06 11:31:17 +00002427AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2428 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002429}
2430
Sam Kolton5f10a132016-05-06 11:31:17 +00002431AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2432 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2433}
2434
2435void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002436 OptionalImmIndexMap OptionalIdx;
2437
2438 unsigned I = 1;
2439 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2440 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2441 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2442 }
2443
2444 for (unsigned E = Operands.size(); I != E; ++I) {
2445 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2446 // Add the register arguments
Sam Kolton5f10a132016-05-06 11:31:17 +00002447 if (Op.isRegOrImmWithInputMods()) {
2448 // We convert only instructions with modifiers
Sam Koltondfa29f72016-03-09 12:29:31 +00002449 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2450 } else if (Op.isDPPCtrl()) {
2451 Op.addImmOperands(Inst, 1);
2452 } else if (Op.isImm()) {
2453 // Handle optional arguments
2454 OptionalIdx[Op.getImmTy()] = I;
2455 } else {
2456 llvm_unreachable("Invalid operand type");
2457 }
2458 }
2459
2460 // ToDo: fix default values for row_mask and bank_mask
2461 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2462 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2463 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2464}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002465
Sam Kolton3025e7f2016-04-26 13:33:56 +00002466//===----------------------------------------------------------------------===//
2467// sdwa
2468//===----------------------------------------------------------------------===//
2469
2470AMDGPUAsmParser::OperandMatchResultTy
2471AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2472 SMLoc S = Parser.getTok().getLoc();
2473 StringRef Value;
2474 AMDGPUAsmParser::OperandMatchResultTy res;
2475
2476 res = parseStringWithPrefix("dst_sel", Value);
2477 if (res == MatchOperand_ParseFail) {
2478 return MatchOperand_ParseFail;
2479 } else if (res == MatchOperand_NoMatch) {
2480 res = parseStringWithPrefix("src0_sel", Value);
2481 if (res == MatchOperand_ParseFail) {
2482 return MatchOperand_ParseFail;
2483 } else if (res == MatchOperand_NoMatch) {
2484 res = parseStringWithPrefix("src1_sel", Value);
2485 if (res != MatchOperand_Success) {
2486 return res;
2487 }
2488 }
2489 }
2490
2491 int64_t Int;
2492 Int = StringSwitch<int64_t>(Value)
2493 .Case("BYTE_0", 0)
2494 .Case("BYTE_1", 1)
2495 .Case("BYTE_2", 2)
2496 .Case("BYTE_3", 3)
2497 .Case("WORD_0", 4)
2498 .Case("WORD_1", 5)
2499 .Case("DWORD", 6)
2500 .Default(0xffffffff);
2501 Parser.Lex(); // eat last token
2502
2503 if (Int == 0xffffffff) {
2504 return MatchOperand_ParseFail;
2505 }
2506
2507 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2508 AMDGPUOperand::ImmTySdwaSel));
2509 return MatchOperand_Success;
2510}
2511
2512AMDGPUAsmParser::OperandMatchResultTy
2513AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2514 SMLoc S = Parser.getTok().getLoc();
2515 StringRef Value;
2516 AMDGPUAsmParser::OperandMatchResultTy res;
2517
2518 res = parseStringWithPrefix("dst_unused", Value);
2519 if (res != MatchOperand_Success) {
2520 return res;
2521 }
2522
2523 int64_t Int;
2524 Int = StringSwitch<int64_t>(Value)
2525 .Case("UNUSED_PAD", 0)
2526 .Case("UNUSED_SEXT", 1)
2527 .Case("UNUSED_PRESERVE", 2)
2528 .Default(0xffffffff);
2529 Parser.Lex(); // eat last token
2530
2531 if (Int == 0xffffffff) {
2532 return MatchOperand_ParseFail;
2533 }
2534
2535 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2536 AMDGPUOperand::ImmTySdwaDstUnused));
2537 return MatchOperand_Success;
2538}
2539
Sam Kolton5f10a132016-05-06 11:31:17 +00002540AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
2541 return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
2542}
2543
2544AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
2545 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
2546}
2547
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002548
Tom Stellard45bb48e2015-06-13 03:28:10 +00002549/// Force static initialization.
2550extern "C" void LLVMInitializeAMDGPUAsmParser() {
2551 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2552 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2553}
2554
2555#define GET_REGISTER_MATCHER
2556#define GET_MATCHER_IMPLEMENTATION
2557#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00002558
2559
2560// This fuction should be defined after auto-generated include so that we have
2561// MatchClassKind enum defined
2562unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
2563 unsigned Kind) {
2564 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
2565 // But MatchInstructionImpl() expects to meet token and fails to validate
2566 // operand. This method checks if we are given immediate operand but expect to
2567 // get corresponding token.
2568 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
2569 switch (Kind) {
2570 case MCK_addr64:
2571 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
2572 case MCK_gds:
2573 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
2574 case MCK_glc:
2575 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
2576 case MCK_idxen:
2577 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
2578 case MCK_offen:
2579 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
2580 default: return Match_InvalidOperand;
2581 }
2582}