blob: 105074b915896dfe1086de024dc5873aa131328a [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000018#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/StringSwitch.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/MC/MCContext.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrInfo.h"
27#include "llvm/MC/MCParser/MCAsmLexer.h"
28#include "llvm/MC/MCParser/MCAsmParser.h"
29#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000030#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000031#include "llvm/MC/MCRegisterInfo.h"
32#include "llvm/MC/MCStreamer.h"
33#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000035#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000036#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000037#include "llvm/Support/SourceMgr.h"
38#include "llvm/Support/TargetRegistry.h"
39#include "llvm/Support/raw_ostream.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000040#include "llvm/Support/MathExtras.h"
Artem Tamazovebe71ce2016-05-06 17:48:48 +000041
Tom Stellard45bb48e2015-06-13 03:28:10 +000042using namespace llvm;
43
44namespace {
45
46struct OptionalOperand;
47
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000048enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
49
Tom Stellard45bb48e2015-06-13 03:28:10 +000050class AMDGPUOperand : public MCParsedAsmOperand {
51 enum KindTy {
52 Token,
53 Immediate,
54 Register,
55 Expression
56 } Kind;
57
58 SMLoc StartLoc, EndLoc;
59
60public:
61 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
62
63 MCContext *Ctx;
64
Sam Kolton5f10a132016-05-06 11:31:17 +000065 typedef std::unique_ptr<AMDGPUOperand> Ptr;
66
Tom Stellard45bb48e2015-06-13 03:28:10 +000067 enum ImmTy {
68 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +000069 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +000070 ImmTyOffen,
71 ImmTyIdxen,
72 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +000073 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +000074 ImmTyOffset0,
75 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +000076 ImmTyGLC,
77 ImmTySLC,
78 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +000079 ImmTyClampSI,
80 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +000081 ImmTyDppCtrl,
82 ImmTyDppRowMask,
83 ImmTyDppBankMask,
84 ImmTyDppBoundCtrl,
Sam Kolton3025e7f2016-04-26 13:33:56 +000085 ImmTySdwaSel,
86 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000087 ImmTyDMask,
88 ImmTyUNorm,
89 ImmTyDA,
90 ImmTyR128,
91 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +000092 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +000093 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +000094 };
95
96 struct TokOp {
97 const char *Data;
98 unsigned Length;
99 };
100
101 struct ImmOp {
102 bool IsFPImm;
103 ImmTy Type;
104 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000105 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000106 };
107
108 struct RegOp {
109 unsigned RegNo;
110 int Modifiers;
111 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000112 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 bool IsForcedVOP3;
114 };
115
116 union {
117 TokOp Tok;
118 ImmOp Imm;
119 RegOp Reg;
120 const MCExpr *Expr;
121 };
122
Sam Kolton1bdcef72016-05-23 09:59:02 +0000123 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
124 if (Imm.Type == ImmTyNone && ApplyModifiers && Imm.Modifiers != 0) {
125 // Apply modifiers to immediate value
126 int64_t Val = Imm.Val;
127 bool Negate = Imm.Modifiers & 0x1;
128 bool Abs = Imm.Modifiers & 0x2;
129 if (Imm.IsFPImm) {
130 APFloat F(BitsToFloat(Val));
131 if (Abs) {
132 F.clearSign();
133 }
134 if (Negate) {
135 F.changeSign();
136 }
137 Val = F.bitcastToAPInt().getZExtValue();
138 } else {
139 Val = Abs ? std::abs(Val) : Val;
140 Val = Negate ? -Val : Val;
141 }
142 Inst.addOperand(MCOperand::createImm(Val));
143 } else {
144 Inst.addOperand(MCOperand::createImm(getImm()));
145 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000146 }
147
148 StringRef getToken() const {
149 return StringRef(Tok.Data, Tok.Length);
150 }
151
152 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000153 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154 }
155
156 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000157 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158 addRegOperands(Inst, N);
159 else
160 addImmOperands(Inst, N);
161 }
162
Tom Stellardd93a34f2016-02-22 19:17:56 +0000163 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
164 if (isRegKind()) {
165 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
166 addRegOperands(Inst, N);
167 } else {
168 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
Sam Kolton1bdcef72016-05-23 09:59:02 +0000169 addImmOperands(Inst, N, false);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000170 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000171 }
172
173 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
174 if (isImm())
175 addImmOperands(Inst, N);
176 else {
177 assert(isExpr());
178 Inst.addOperand(MCOperand::createExpr(Expr));
179 }
180 }
181
Tom Stellard45bb48e2015-06-13 03:28:10 +0000182 bool isToken() const override {
183 return Kind == Token;
184 }
185
186 bool isImm() const override {
187 return Kind == Immediate;
188 }
189
Tom Stellardd93a34f2016-02-22 19:17:56 +0000190 bool isInlinableImm() const {
191 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
192 immediates are inlinable (e.g. "clamp" attribute is not) */ )
193 return false;
194 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000195 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000196 // We've had bot failures before due to weird NaN support on mips hosts.
197 const float F = BitsToFloat(Imm.Val);
198 // TODO: Add 1/(2*pi) for VI
199 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000200 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000201 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000202 }
203
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204 int64_t getImm() const {
205 return Imm.Val;
206 }
207
208 enum ImmTy getImmTy() const {
209 assert(isImm());
210 return Imm.Type;
211 }
212
213 bool isRegKind() const {
214 return Kind == Register;
215 }
216
217 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000218 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000219 }
220
Tom Stellardd93a34f2016-02-22 19:17:56 +0000221 bool isRegOrImmWithInputMods() const {
222 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000223 }
224
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000225 bool isImmTy(ImmTy ImmT) const {
226 return isImm() && Imm.Type == ImmT;
227 }
228
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000229 bool isClampSI() const {
230 return isImmTy(ImmTyClampSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000231 }
232
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000233 bool isOModSI() const {
234 return isImmTy(ImmTyOModSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000235 }
236
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000237 bool isImmModifier() const {
238 return Kind == Immediate && Imm.Type != ImmTyNone;
239 }
240
241 bool isDMask() const {
242 return isImmTy(ImmTyDMask);
243 }
244
245 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
246 bool isDA() const { return isImmTy(ImmTyDA); }
247 bool isR128() const { return isImmTy(ImmTyUNorm); }
248 bool isLWE() const { return isImmTy(ImmTyLWE); }
249
Tom Stellarda90b9522016-02-11 03:28:15 +0000250 bool isMod() const {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000251 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000252 }
253
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000254 bool isOffen() const { return isImmTy(ImmTyOffen); }
255 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
256 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
257 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
258 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
259 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000260 bool isGDS() const { return isImmTy(ImmTyGDS); }
261 bool isGLC() const { return isImmTy(ImmTyGLC); }
262 bool isSLC() const { return isImmTy(ImmTySLC); }
263 bool isTFE() const { return isImmTy(ImmTyTFE); }
264
Sam Koltondfa29f72016-03-09 12:29:31 +0000265 bool isBankMask() const {
266 return isImmTy(ImmTyDppBankMask);
267 }
268
269 bool isRowMask() const {
270 return isImmTy(ImmTyDppRowMask);
271 }
272
273 bool isBoundCtrl() const {
274 return isImmTy(ImmTyDppBoundCtrl);
275 }
Sam Koltona74cd522016-03-18 15:35:51 +0000276
Sam Kolton3025e7f2016-04-26 13:33:56 +0000277 bool isSDWASel() const {
278 return isImmTy(ImmTySdwaSel);
279 }
280
281 bool isSDWADstUnused() const {
282 return isImmTy(ImmTySdwaDstUnused);
283 }
284
Tom Stellard45bb48e2015-06-13 03:28:10 +0000285 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000286 assert(isReg() || (isImm() && Imm.Modifiers == 0));
287 if (isReg())
288 Reg.Modifiers = Mods;
289 else
290 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000291 }
292
293 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000294 assert(isRegKind() || isImm());
295 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000296 }
297
298 unsigned getReg() const override {
299 return Reg.RegNo;
300 }
301
302 bool isRegOrImm() const {
303 return isReg() || isImm();
304 }
305
306 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000307 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000308 }
309
310 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000311 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000312 }
313
Matt Arsenault86d336e2015-09-08 21:15:00 +0000314 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000315 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000316 }
317
318 bool isSSrc32() const {
319 return isImm() || isSCSrc32();
320 }
321
322 bool isSSrc64() const {
323 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
324 // See isVSrc64().
325 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000326 }
327
Tom Stellard45bb48e2015-06-13 03:28:10 +0000328 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000329 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000330 }
331
332 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000333 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334 }
335
336 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000337 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000338 }
339
340 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000341 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000342 // narrowed to 32 bits (in the instruction stream). That require knowledge
343 // of instruction type (unsigned/signed, floating or "untyped"/B64),
344 // see [AMD GCN3 ISA 6.3.1].
345 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
346 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000347 }
348
349 bool isMem() const override {
350 return false;
351 }
352
353 bool isExpr() const {
354 return Kind == Expression;
355 }
356
357 bool isSoppBrTarget() const {
358 return isExpr() || isImm();
359 }
360
361 SMLoc getStartLoc() const override {
362 return StartLoc;
363 }
364
365 SMLoc getEndLoc() const override {
366 return EndLoc;
367 }
368
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000369 void printImmTy(raw_ostream& OS, ImmTy Type) const {
370 switch (Type) {
371 case ImmTyNone: OS << "None"; break;
372 case ImmTyGDS: OS << "GDS"; break;
373 case ImmTyOffen: OS << "Offen"; break;
374 case ImmTyIdxen: OS << "Idxen"; break;
375 case ImmTyAddr64: OS << "Addr64"; break;
376 case ImmTyOffset: OS << "Offset"; break;
377 case ImmTyOffset0: OS << "Offset0"; break;
378 case ImmTyOffset1: OS << "Offset1"; break;
379 case ImmTyGLC: OS << "GLC"; break;
380 case ImmTySLC: OS << "SLC"; break;
381 case ImmTyTFE: OS << "TFE"; break;
382 case ImmTyClampSI: OS << "ClampSI"; break;
383 case ImmTyOModSI: OS << "OModSI"; break;
384 case ImmTyDppCtrl: OS << "DppCtrl"; break;
385 case ImmTyDppRowMask: OS << "DppRowMask"; break;
386 case ImmTyDppBankMask: OS << "DppBankMask"; break;
387 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
388 case ImmTySdwaSel: OS << "SdwaSel"; break;
389 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
390 case ImmTyDMask: OS << "DMask"; break;
391 case ImmTyUNorm: OS << "UNorm"; break;
392 case ImmTyDA: OS << "DA"; break;
393 case ImmTyR128: OS << "R128"; break;
394 case ImmTyLWE: OS << "LWE"; break;
395 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000396 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000397 }
398 }
399
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000400 void print(raw_ostream &OS) const override {
401 switch (Kind) {
402 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000403 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000404 break;
405 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000406 OS << '<' << getImm();
407 if (getImmTy() != ImmTyNone) {
408 OS << " type: "; printImmTy(OS, getImmTy());
409 }
410 OS << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000411 break;
412 case Token:
413 OS << '\'' << getToken() << '\'';
414 break;
415 case Expression:
416 OS << "<expr " << *Expr << '>';
417 break;
418 }
419 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000420
Sam Kolton5f10a132016-05-06 11:31:17 +0000421 static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
422 enum ImmTy Type = ImmTyNone,
423 bool IsFPImm = false) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000424 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
425 Op->Imm.Val = Val;
426 Op->Imm.IsFPImm = IsFPImm;
427 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000428 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000429 Op->StartLoc = Loc;
430 Op->EndLoc = Loc;
431 return Op;
432 }
433
Sam Kolton5f10a132016-05-06 11:31:17 +0000434 static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
435 bool HasExplicitEncodingSize = true) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000436 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
437 Res->Tok.Data = Str.data();
438 Res->Tok.Length = Str.size();
439 Res->StartLoc = Loc;
440 Res->EndLoc = Loc;
441 return Res;
442 }
443
Sam Kolton5f10a132016-05-06 11:31:17 +0000444 static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
445 SMLoc E,
446 const MCRegisterInfo *TRI,
447 const MCSubtargetInfo *STI,
448 bool ForceVOP3) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
450 Op->Reg.RegNo = RegNo;
451 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000452 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000453 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454 Op->Reg.IsForcedVOP3 = ForceVOP3;
455 Op->StartLoc = S;
456 Op->EndLoc = E;
457 return Op;
458 }
459
Sam Kolton5f10a132016-05-06 11:31:17 +0000460 static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000461 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
462 Op->Expr = Expr;
463 Op->StartLoc = S;
464 Op->EndLoc = S;
465 return Op;
466 }
467
Tom Stellard45bb48e2015-06-13 03:28:10 +0000468 bool isSWaitCnt() const;
Artem Tamazovd6468662016-04-25 14:13:51 +0000469 bool isHwreg() const;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000470 bool isSendMsg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000471 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000472 bool isSMRDOffset() const;
473 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000474 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000475};
476
477class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000478 const MCInstrInfo &MII;
479 MCAsmParser &Parser;
480
481 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000482
Matt Arsenault3b159672015-12-01 20:31:08 +0000483 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000484 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000485 }
486
487 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000488 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000489 }
490
Matt Arsenault68802d32015-11-05 03:11:27 +0000491 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000492 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000493 }
494
495 bool hasSGPR102_SGPR103() const {
496 return !isVI();
497 }
498
Tom Stellard45bb48e2015-06-13 03:28:10 +0000499 /// @name Auto-generated Match Functions
500 /// {
501
502#define GET_ASSEMBLER_HEADER
503#include "AMDGPUGenAsmMatcher.inc"
504
505 /// }
506
Tom Stellard347ac792015-06-26 21:15:07 +0000507private:
508 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
509 bool ParseDirectiveHSACodeObjectVersion();
510 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000511 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
512 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000513 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000514 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000515 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000516 bool ParseDirectiveAMDGPUHsaModuleGlobal();
517 bool ParseDirectiveAMDGPUHsaProgramGlobal();
518 bool ParseSectionDirectiveHSADataGlobalAgent();
519 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000520 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000521 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
522 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000523 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000524
Tom Stellard45bb48e2015-06-13 03:28:10 +0000525public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000526 enum AMDGPUMatchResultTy {
527 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
528 };
529
Akira Hatanakab11ef082015-11-14 06:35:56 +0000530 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000531 const MCInstrInfo &MII,
532 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000533 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000534 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000535 MCAsmParserExtension::Initialize(Parser);
536
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000537 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000538 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000539 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540 }
541
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000542 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543 }
544
Tom Stellard347ac792015-06-26 21:15:07 +0000545 AMDGPUTargetStreamer &getTargetStreamer() {
546 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
547 return static_cast<AMDGPUTargetStreamer &>(TS);
548 }
549
Tom Stellard45bb48e2015-06-13 03:28:10 +0000550 unsigned getForcedEncodingSize() const {
551 return ForcedEncodingSize;
552 }
553
554 void setForcedEncodingSize(unsigned Size) {
555 ForcedEncodingSize = Size;
556 }
557
558 bool isForcedVOP3() const {
559 return ForcedEncodingSize == 64;
560 }
561
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000562 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000563 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
564 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000565 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
566 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
568 OperandVector &Operands, MCStreamer &Out,
569 uint64_t &ErrorInfo,
570 bool MatchingInlineAsm) override;
571 bool ParseDirective(AsmToken DirectiveID) override;
572 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
573 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
574 SMLoc NameLoc, OperandVector &Operands) override;
575
Sam Kolton11de3702016-05-24 12:38:33 +0000576 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
578 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000579 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000580 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000582 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000583 OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000584
Sam Kolton1bdcef72016-05-23 09:59:02 +0000585 OperandMatchResultTy parseImm(OperandVector &Operands);
586 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
587 OperandMatchResultTy parseRegOrImmWithInputMods(OperandVector &Operands);
588
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
590 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000591
592 bool parseCnt(int64_t &IntVal);
593 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000594 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000595
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000596private:
597 struct OperandInfoTy {
598 int64_t Id;
599 bool IsSymbolic;
600 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
601 };
Sam Kolton11de3702016-05-24 12:38:33 +0000602
Artem Tamazov6edc1352016-05-26 17:00:33 +0000603 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
604 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000605public:
Sam Kolton11de3702016-05-24 12:38:33 +0000606 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
607
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000608 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000609 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000610 AMDGPUOperand::Ptr defaultHwreg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000611
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000612 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
613 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
614 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000615 AMDGPUOperand::Ptr defaultMubufOffset() const;
616 AMDGPUOperand::Ptr defaultGLC() const;
617 AMDGPUOperand::Ptr defaultSLC() const;
618 AMDGPUOperand::Ptr defaultTFE() const;
619
Sam Kolton5f10a132016-05-06 11:31:17 +0000620 AMDGPUOperand::Ptr defaultDMask() const;
621 AMDGPUOperand::Ptr defaultUNorm() const;
622 AMDGPUOperand::Ptr defaultDA() const;
623 AMDGPUOperand::Ptr defaultR128() const;
624 AMDGPUOperand::Ptr defaultLWE() const;
625 AMDGPUOperand::Ptr defaultSMRDOffset() const;
626 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
627
628 AMDGPUOperand::Ptr defaultClampSI() const;
629 AMDGPUOperand::Ptr defaultOModSI() const;
630
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000631 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
632
Tom Stellarda90b9522016-02-11 03:28:15 +0000633 void cvtId(MCInst &Inst, const OperandVector &Operands);
634 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000635 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000636
637 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000638 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000639
Sam Kolton11de3702016-05-24 12:38:33 +0000640 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000641 AMDGPUOperand::Ptr defaultRowMask() const;
642 AMDGPUOperand::Ptr defaultBankMask() const;
643 AMDGPUOperand::Ptr defaultBoundCtrl() const;
644 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000645
646 OperandMatchResultTy parseSDWASel(OperandVector &Operands);
647 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000648 AMDGPUOperand::Ptr defaultSDWASel() const;
649 AMDGPUOperand::Ptr defaultSDWADstUnused() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000650};
651
652struct OptionalOperand {
653 const char *Name;
654 AMDGPUOperand::ImmTy Type;
655 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656 bool (*ConvertResult)(int64_t&);
657};
658
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000659}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000660
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000661static int getRegClass(RegisterKind Is, unsigned RegWidth) {
662 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000663 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000664 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000665 case 1: return AMDGPU::VGPR_32RegClassID;
666 case 2: return AMDGPU::VReg_64RegClassID;
667 case 3: return AMDGPU::VReg_96RegClassID;
668 case 4: return AMDGPU::VReg_128RegClassID;
669 case 8: return AMDGPU::VReg_256RegClassID;
670 case 16: return AMDGPU::VReg_512RegClassID;
671 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000672 } else if (Is == IS_TTMP) {
673 switch (RegWidth) {
674 default: return -1;
675 case 1: return AMDGPU::TTMP_32RegClassID;
676 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000677 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000678 }
679 } else if (Is == IS_SGPR) {
680 switch (RegWidth) {
681 default: return -1;
682 case 1: return AMDGPU::SGPR_32RegClassID;
683 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000684 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000685 case 8: return AMDGPU::SReg_256RegClassID;
686 case 16: return AMDGPU::SReg_512RegClassID;
687 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000688 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000689 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000690}
691
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000692static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693 return StringSwitch<unsigned>(RegName)
694 .Case("exec", AMDGPU::EXEC)
695 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000696 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697 .Case("m0", AMDGPU::M0)
698 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000699 .Case("tba", AMDGPU::TBA)
700 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000701 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
702 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000703 .Case("vcc_lo", AMDGPU::VCC_LO)
704 .Case("vcc_hi", AMDGPU::VCC_HI)
705 .Case("exec_lo", AMDGPU::EXEC_LO)
706 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000707 .Case("tma_lo", AMDGPU::TMA_LO)
708 .Case("tma_hi", AMDGPU::TMA_HI)
709 .Case("tba_lo", AMDGPU::TBA_LO)
710 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711 .Default(0);
712}
713
714bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000715 auto R = parseRegister();
716 if (!R) return true;
717 assert(R->isReg());
718 RegNo = R->getReg();
719 StartLoc = R->getStartLoc();
720 EndLoc = R->getEndLoc();
721 return false;
722}
723
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000724bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
725{
726 switch (RegKind) {
727 case IS_SPECIAL:
728 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
729 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
730 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
731 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
732 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
733 return false;
734 case IS_VGPR:
735 case IS_SGPR:
736 case IS_TTMP:
737 if (Reg1 != Reg + RegWidth) { return false; }
738 RegWidth++;
739 return true;
740 default:
741 assert(false); return false;
742 }
743}
744
745bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
746{
747 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
748 if (getLexer().is(AsmToken::Identifier)) {
749 StringRef RegName = Parser.getTok().getString();
750 if ((Reg = getSpecialRegForName(RegName))) {
751 Parser.Lex();
752 RegKind = IS_SPECIAL;
753 } else {
754 unsigned RegNumIndex = 0;
755 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
756 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
757 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
758 else { return false; }
759 if (RegName.size() > RegNumIndex) {
760 // Single 32-bit register: vXX.
761 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
762 Parser.Lex();
763 RegWidth = 1;
764 } else {
765 // Range of registers: v[XX:YY].
766 Parser.Lex();
767 int64_t RegLo, RegHi;
768 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
769 Parser.Lex();
770
771 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
772
773 if (getLexer().isNot(AsmToken::Colon)) { return false; }
774 Parser.Lex();
775
776 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
777
778 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
779 Parser.Lex();
780
781 RegNum = (unsigned) RegLo;
782 RegWidth = (RegHi - RegLo) + 1;
783 }
784 }
785 } else if (getLexer().is(AsmToken::LBrac)) {
786 // List of consecutive registers: [s0,s1,s2,s3]
787 Parser.Lex();
788 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
789 if (RegWidth != 1) { return false; }
790 RegisterKind RegKind1;
791 unsigned Reg1, RegNum1, RegWidth1;
792 do {
793 if (getLexer().is(AsmToken::Comma)) {
794 Parser.Lex();
795 } else if (getLexer().is(AsmToken::RBrac)) {
796 Parser.Lex();
797 break;
798 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
799 if (RegWidth1 != 1) { return false; }
800 if (RegKind1 != RegKind) { return false; }
801 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
802 } else {
803 return false;
804 }
805 } while (true);
806 } else {
807 return false;
808 }
809 switch (RegKind) {
810 case IS_SPECIAL:
811 RegNum = 0;
812 RegWidth = 1;
813 break;
814 case IS_VGPR:
815 case IS_SGPR:
816 case IS_TTMP:
817 {
818 unsigned Size = 1;
819 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
820 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
821 Size = std::min(RegWidth, 4u);
822 }
823 if (RegNum % Size != 0) { return false; }
824 RegNum = RegNum / Size;
825 int RCID = getRegClass(RegKind, RegWidth);
826 if (RCID == -1) { return false; }
827 const MCRegisterClass RC = TRI->getRegClass(RCID);
828 if (RegNum >= RC.getNumRegs()) { return false; }
829 Reg = RC.getRegister(RegNum);
830 break;
831 }
832
833 default:
834 assert(false); return false;
835 }
836
837 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
838 return true;
839}
840
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000841std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000842 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000843 SMLoc StartLoc = Tok.getLoc();
844 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000845 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
846
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000847 RegisterKind RegKind;
848 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000849
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000850 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
851 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000852 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000853 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000854 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000855}
856
Sam Kolton1bdcef72016-05-23 09:59:02 +0000857AMDGPUAsmParser::OperandMatchResultTy
858AMDGPUAsmParser::parseImm(OperandVector &Operands) {
859 bool Minus = false;
860 if (getLexer().getKind() == AsmToken::Minus) {
861 Minus = true;
862 Parser.Lex();
863 }
864
865 SMLoc S = Parser.getTok().getLoc();
866 switch(getLexer().getKind()) {
867 case AsmToken::Integer: {
868 int64_t IntVal;
869 if (getParser().parseAbsoluteExpression(IntVal))
870 return MatchOperand_ParseFail;
871 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
872 Error(S, "invalid immediate: only 32-bit values are legal");
873 return MatchOperand_ParseFail;
874 }
875
876 if (Minus)
877 IntVal *= -1;
878 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
879 return MatchOperand_Success;
880 }
881 case AsmToken::Real: {
882 // FIXME: We should emit an error if a double precisions floating-point
883 // value is used. I'm not sure the best way to detect this.
884 int64_t IntVal;
885 if (getParser().parseAbsoluteExpression(IntVal))
886 return MatchOperand_ParseFail;
887
888 APFloat F((float)BitsToDouble(IntVal));
889 if (Minus)
890 F.changeSign();
891 Operands.push_back(
892 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
893 AMDGPUOperand::ImmTyNone, true));
894 return MatchOperand_Success;
895 }
896 default:
897 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
898 }
899}
900
901AMDGPUAsmParser::OperandMatchResultTy
902AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
903 auto res = parseImm(Operands);
904 if (res != MatchOperand_NoMatch) {
905 return res;
906 }
907
908 if (auto R = parseRegister()) {
909 assert(R->isReg());
910 R->Reg.IsForcedVOP3 = isForcedVOP3();
911 Operands.push_back(std::move(R));
912 return MatchOperand_Success;
913 }
914 return MatchOperand_ParseFail;
915}
916
917AMDGPUAsmParser::OperandMatchResultTy
918AMDGPUAsmParser::parseRegOrImmWithInputMods(OperandVector &Operands) {
919 // XXX: During parsing we can't determine if minus sign means
920 // negate-modifier or negative immediate value.
921 // By default we suppose it is modifier.
922 bool Negate = false, Abs = false, Abs2 = false;
923
924 if (getLexer().getKind()== AsmToken::Minus) {
925 Parser.Lex();
926 Negate = true;
927 }
928
929 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
930 Parser.Lex();
931 Abs2 = true;
932 if (getLexer().isNot(AsmToken::LParen)) {
933 Error(Parser.getTok().getLoc(), "expected left paren after abs");
934 return MatchOperand_ParseFail;
935 }
936 Parser.Lex();
937 }
938
939 if (getLexer().getKind() == AsmToken::Pipe) {
940 if (Abs2) {
941 Error(Parser.getTok().getLoc(), "expected register or immediate");
942 return MatchOperand_ParseFail;
943 }
944 Parser.Lex();
945 Abs = true;
946 }
947
948 auto Res = parseRegOrImm(Operands);
949 if (Res != MatchOperand_Success) {
950 return Res;
951 }
952
953 unsigned Modifiers = 0;
954 if (Negate) {
955 Modifiers |= 0x1;
956 }
957 if (Abs) {
958 if (getLexer().getKind() != AsmToken::Pipe) {
959 Error(Parser.getTok().getLoc(), "expected vertical bar");
960 return MatchOperand_ParseFail;
961 }
962 Parser.Lex();
963 Modifiers |= 0x2;
964 }
965 if (Abs2) {
966 if (getLexer().isNot(AsmToken::RParen)) {
967 Error(Parser.getTok().getLoc(), "expected closing parentheses");
968 return MatchOperand_ParseFail;
969 }
970 Parser.Lex();
971 Modifiers |= 0x2;
972 }
973
974 if (Modifiers) {
975 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
976 Op.setModifiers(Modifiers);
977 }
978 return MatchOperand_Success;
979}
980
981
Tom Stellard45bb48e2015-06-13 03:28:10 +0000982unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
983
984 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
985
986 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
987 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
988 return Match_InvalidOperand;
989
Tom Stellard88e0b252015-10-06 15:57:53 +0000990 if ((TSFlags & SIInstrFlags::VOP3) &&
991 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
992 getForcedEncodingSize() != 64)
993 return Match_PreferE32;
994
Tom Stellard45bb48e2015-06-13 03:28:10 +0000995 return Match_Success;
996}
997
Tom Stellard45bb48e2015-06-13 03:28:10 +0000998bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
999 OperandVector &Operands,
1000 MCStreamer &Out,
1001 uint64_t &ErrorInfo,
1002 bool MatchingInlineAsm) {
1003 MCInst Inst;
1004
Ranjeet Singh86ecbb72015-06-30 12:32:53 +00001005 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001006 default: break;
1007 case Match_Success:
1008 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001009 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001010 return false;
1011 case Match_MissingFeature:
1012 return Error(IDLoc, "instruction not supported on this GPU");
1013
1014 case Match_MnemonicFail:
1015 return Error(IDLoc, "unrecognized instruction mnemonic");
1016
1017 case Match_InvalidOperand: {
1018 SMLoc ErrorLoc = IDLoc;
1019 if (ErrorInfo != ~0ULL) {
1020 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001021 return Error(IDLoc, "too few operands for instruction");
1022 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001023 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1024 if (ErrorLoc == SMLoc())
1025 ErrorLoc = IDLoc;
1026 }
1027 return Error(ErrorLoc, "invalid operand for instruction");
1028 }
Tom Stellard88e0b252015-10-06 15:57:53 +00001029 case Match_PreferE32:
1030 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1031 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001032 }
1033 llvm_unreachable("Implement any new match types added!");
1034}
1035
Tom Stellard347ac792015-06-26 21:15:07 +00001036bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1037 uint32_t &Minor) {
1038 if (getLexer().isNot(AsmToken::Integer))
1039 return TokError("invalid major version");
1040
1041 Major = getLexer().getTok().getIntVal();
1042 Lex();
1043
1044 if (getLexer().isNot(AsmToken::Comma))
1045 return TokError("minor version number required, comma expected");
1046 Lex();
1047
1048 if (getLexer().isNot(AsmToken::Integer))
1049 return TokError("invalid minor version");
1050
1051 Minor = getLexer().getTok().getIntVal();
1052 Lex();
1053
1054 return false;
1055}
1056
1057bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1058
1059 uint32_t Major;
1060 uint32_t Minor;
1061
1062 if (ParseDirectiveMajorMinor(Major, Minor))
1063 return true;
1064
1065 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1066 return false;
1067}
1068
1069bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1070
1071 uint32_t Major;
1072 uint32_t Minor;
1073 uint32_t Stepping;
1074 StringRef VendorName;
1075 StringRef ArchName;
1076
1077 // If this directive has no arguments, then use the ISA version for the
1078 // targeted GPU.
1079 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001080 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001081 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1082 Isa.Stepping,
1083 "AMD", "AMDGPU");
1084 return false;
1085 }
1086
1087
1088 if (ParseDirectiveMajorMinor(Major, Minor))
1089 return true;
1090
1091 if (getLexer().isNot(AsmToken::Comma))
1092 return TokError("stepping version number required, comma expected");
1093 Lex();
1094
1095 if (getLexer().isNot(AsmToken::Integer))
1096 return TokError("invalid stepping version");
1097
1098 Stepping = getLexer().getTok().getIntVal();
1099 Lex();
1100
1101 if (getLexer().isNot(AsmToken::Comma))
1102 return TokError("vendor name required, comma expected");
1103 Lex();
1104
1105 if (getLexer().isNot(AsmToken::String))
1106 return TokError("invalid vendor name");
1107
1108 VendorName = getLexer().getTok().getStringContents();
1109 Lex();
1110
1111 if (getLexer().isNot(AsmToken::Comma))
1112 return TokError("arch name required, comma expected");
1113 Lex();
1114
1115 if (getLexer().isNot(AsmToken::String))
1116 return TokError("invalid arch name");
1117
1118 ArchName = getLexer().getTok().getStringContents();
1119 Lex();
1120
1121 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1122 VendorName, ArchName);
1123 return false;
1124}
1125
Tom Stellardff7416b2015-06-26 21:58:31 +00001126bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1127 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001128 SmallString<40> ErrStr;
1129 raw_svector_ostream Err(ErrStr);
1130 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
1131 return TokError(Err.str());
1132 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001133 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001134 return false;
1135}
1136
1137bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1138
1139 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001140 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001141
1142 while (true) {
1143
1144 if (getLexer().isNot(AsmToken::EndOfStatement))
1145 return TokError("amd_kernel_code_t values must begin on a new line");
1146
1147 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1148 // will set the current token to EndOfStatement.
1149 while(getLexer().is(AsmToken::EndOfStatement))
1150 Lex();
1151
1152 if (getLexer().isNot(AsmToken::Identifier))
1153 return TokError("expected value identifier or .end_amd_kernel_code_t");
1154
1155 StringRef ID = getLexer().getTok().getIdentifier();
1156 Lex();
1157
1158 if (ID == ".end_amd_kernel_code_t")
1159 break;
1160
1161 if (ParseAMDKernelCodeTValue(ID, Header))
1162 return true;
1163 }
1164
1165 getTargetStreamer().EmitAMDKernelCodeT(Header);
1166
1167 return false;
1168}
1169
Tom Stellarde135ffd2015-09-25 21:41:28 +00001170bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1171 getParser().getStreamer().SwitchSection(
1172 AMDGPU::getHSATextSection(getContext()));
1173 return false;
1174}
1175
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001176bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1177 if (getLexer().isNot(AsmToken::Identifier))
1178 return TokError("expected symbol name");
1179
1180 StringRef KernelName = Parser.getTok().getString();
1181
1182 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1183 ELF::STT_AMDGPU_HSA_KERNEL);
1184 Lex();
1185 return false;
1186}
1187
Tom Stellard00f2f912015-12-02 19:47:57 +00001188bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1189 if (getLexer().isNot(AsmToken::Identifier))
1190 return TokError("expected symbol name");
1191
1192 StringRef GlobalName = Parser.getTok().getIdentifier();
1193
1194 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1195 Lex();
1196 return false;
1197}
1198
1199bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1200 if (getLexer().isNot(AsmToken::Identifier))
1201 return TokError("expected symbol name");
1202
1203 StringRef GlobalName = Parser.getTok().getIdentifier();
1204
1205 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1206 Lex();
1207 return false;
1208}
1209
1210bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1211 getParser().getStreamer().SwitchSection(
1212 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1213 return false;
1214}
1215
1216bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1217 getParser().getStreamer().SwitchSection(
1218 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1219 return false;
1220}
1221
Tom Stellard9760f032015-12-03 03:34:32 +00001222bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1223 getParser().getStreamer().SwitchSection(
1224 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1225 return false;
1226}
1227
Tom Stellard45bb48e2015-06-13 03:28:10 +00001228bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001229 StringRef IDVal = DirectiveID.getString();
1230
1231 if (IDVal == ".hsa_code_object_version")
1232 return ParseDirectiveHSACodeObjectVersion();
1233
1234 if (IDVal == ".hsa_code_object_isa")
1235 return ParseDirectiveHSACodeObjectISA();
1236
Tom Stellardff7416b2015-06-26 21:58:31 +00001237 if (IDVal == ".amd_kernel_code_t")
1238 return ParseDirectiveAMDKernelCodeT();
1239
Tom Stellardfcfaea42016-05-05 17:03:33 +00001240 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001241 return ParseSectionDirectiveHSAText();
1242
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001243 if (IDVal == ".amdgpu_hsa_kernel")
1244 return ParseDirectiveAMDGPUHsaKernel();
1245
Tom Stellard00f2f912015-12-02 19:47:57 +00001246 if (IDVal == ".amdgpu_hsa_module_global")
1247 return ParseDirectiveAMDGPUHsaModuleGlobal();
1248
1249 if (IDVal == ".amdgpu_hsa_program_global")
1250 return ParseDirectiveAMDGPUHsaProgramGlobal();
1251
1252 if (IDVal == ".hsadata_global_agent")
1253 return ParseSectionDirectiveHSADataGlobalAgent();
1254
1255 if (IDVal == ".hsadata_global_program")
1256 return ParseSectionDirectiveHSADataGlobalProgram();
1257
Tom Stellard9760f032015-12-03 03:34:32 +00001258 if (IDVal == ".hsarodata_readonly_agent")
1259 return ParseSectionDirectiveHSARodataReadonlyAgent();
1260
Tom Stellard45bb48e2015-06-13 03:28:10 +00001261 return true;
1262}
1263
Matt Arsenault68802d32015-11-05 03:11:27 +00001264bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1265 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001266 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001267 return true;
1268
Matt Arsenault3b159672015-12-01 20:31:08 +00001269 if (isSI()) {
1270 // No flat_scr
1271 switch (RegNo) {
1272 case AMDGPU::FLAT_SCR:
1273 case AMDGPU::FLAT_SCR_LO:
1274 case AMDGPU::FLAT_SCR_HI:
1275 return false;
1276 default:
1277 return true;
1278 }
1279 }
1280
Matt Arsenault68802d32015-11-05 03:11:27 +00001281 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1282 // SI/CI have.
1283 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1284 R.isValid(); ++R) {
1285 if (*R == RegNo)
1286 return false;
1287 }
1288
1289 return true;
1290}
1291
Tom Stellard45bb48e2015-06-13 03:28:10 +00001292AMDGPUAsmParser::OperandMatchResultTy
1293AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1294
1295 // Try to parse with a custom parser
1296 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1297
1298 // If we successfully parsed the operand or if there as an error parsing,
1299 // we are done.
1300 //
1301 // If we are parsing after we reach EndOfStatement then this means we
1302 // are appending default values to the Operands list. This is only done
1303 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001304 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001305 getLexer().is(AsmToken::EndOfStatement))
1306 return ResTy;
1307
Sam Kolton1bdcef72016-05-23 09:59:02 +00001308 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001309
Sam Kolton1bdcef72016-05-23 09:59:02 +00001310 if (ResTy == MatchOperand_Success)
1311 return ResTy;
1312
1313 if (getLexer().getKind() == AsmToken::Identifier) {
1314 const auto &Tok = Parser.getTok();
1315 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001316 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001317 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001318 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001319 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001320}
1321
1322bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1323 StringRef Name,
1324 SMLoc NameLoc, OperandVector &Operands) {
1325
1326 // Clear any forced encodings from the previous instruction.
1327 setForcedEncodingSize(0);
1328
1329 if (Name.endswith("_e64"))
1330 setForcedEncodingSize(64);
1331 else if (Name.endswith("_e32"))
1332 setForcedEncodingSize(32);
1333
1334 // Add the instruction mnemonic
1335 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1336
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001337
1338 if (Name.endswith("_e64")) { Name = Name.substr(0, Name.size() - 4); }
1339 if (Name.endswith("_e32")) { Name = Name.substr(0, Name.size() - 4); }
1340
Tom Stellard45bb48e2015-06-13 03:28:10 +00001341 while (!getLexer().is(AsmToken::EndOfStatement)) {
1342 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1343
1344 // Eat the comma or space if there is one.
1345 if (getLexer().is(AsmToken::Comma))
1346 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001347
Tom Stellard45bb48e2015-06-13 03:28:10 +00001348 switch (Res) {
1349 case MatchOperand_Success: break;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001350 case MatchOperand_ParseFail:
1351 Error(getLexer().getLoc(), "failed parsing operand.");
1352 while (!getLexer().is(AsmToken::EndOfStatement)) {
1353 Parser.Lex();
1354 }
1355 return true;
1356 case MatchOperand_NoMatch:
1357 Error(getLexer().getLoc(), "not a valid operand.");
1358 while (!getLexer().is(AsmToken::EndOfStatement)) {
1359 Parser.Lex();
1360 }
1361 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001362 }
1363 }
1364
Tom Stellard45bb48e2015-06-13 03:28:10 +00001365 return false;
1366}
1367
1368//===----------------------------------------------------------------------===//
1369// Utility functions
1370//===----------------------------------------------------------------------===//
1371
1372AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001373AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001374 switch(getLexer().getKind()) {
1375 default: return MatchOperand_NoMatch;
1376 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001377 StringRef Name = Parser.getTok().getString();
1378 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001379 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001380 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001381
1382 Parser.Lex();
1383 if (getLexer().isNot(AsmToken::Colon))
1384 return MatchOperand_ParseFail;
1385
1386 Parser.Lex();
1387 if (getLexer().isNot(AsmToken::Integer))
1388 return MatchOperand_ParseFail;
1389
1390 if (getParser().parseAbsoluteExpression(Int))
1391 return MatchOperand_ParseFail;
1392 break;
1393 }
1394 }
1395 return MatchOperand_Success;
1396}
1397
1398AMDGPUAsmParser::OperandMatchResultTy
1399AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001400 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001401 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001402
1403 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001404 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001405
Sam Kolton11de3702016-05-24 12:38:33 +00001406 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001407 if (Res != MatchOperand_Success)
1408 return Res;
1409
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001410 if (ConvertResult && !ConvertResult(Value)) {
1411 return MatchOperand_ParseFail;
1412 }
1413
1414 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001415 return MatchOperand_Success;
1416}
1417
1418AMDGPUAsmParser::OperandMatchResultTy
1419AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001420 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001421 int64_t Bit = 0;
1422 SMLoc S = Parser.getTok().getLoc();
1423
1424 // We are at the end of the statement, and this is a default argument, so
1425 // use a default value.
1426 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1427 switch(getLexer().getKind()) {
1428 case AsmToken::Identifier: {
1429 StringRef Tok = Parser.getTok().getString();
1430 if (Tok == Name) {
1431 Bit = 1;
1432 Parser.Lex();
1433 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1434 Bit = 0;
1435 Parser.Lex();
1436 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001437 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001438 }
1439 break;
1440 }
1441 default:
1442 return MatchOperand_NoMatch;
1443 }
1444 }
1445
1446 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1447 return MatchOperand_Success;
1448}
1449
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001450typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1451
Sam Koltona74cd522016-03-18 15:35:51 +00001452void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1453 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001454 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001455 auto i = OptionalIdx.find(ImmT);
1456 if (i != OptionalIdx.end()) {
1457 unsigned Idx = i->second;
1458 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1459 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001460 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001461 }
1462}
1463
Sam Kolton3025e7f2016-04-26 13:33:56 +00001464AMDGPUAsmParser::OperandMatchResultTy
1465AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1466 if (getLexer().isNot(AsmToken::Identifier)) {
1467 return MatchOperand_NoMatch;
1468 }
1469 StringRef Tok = Parser.getTok().getString();
1470 if (Tok != Prefix) {
1471 return MatchOperand_NoMatch;
1472 }
1473
1474 Parser.Lex();
1475 if (getLexer().isNot(AsmToken::Colon)) {
1476 return MatchOperand_ParseFail;
1477 }
1478
1479 Parser.Lex();
1480 if (getLexer().isNot(AsmToken::Identifier)) {
1481 return MatchOperand_ParseFail;
1482 }
1483
1484 Value = Parser.getTok().getString();
1485 return MatchOperand_Success;
1486}
1487
Tom Stellard45bb48e2015-06-13 03:28:10 +00001488//===----------------------------------------------------------------------===//
1489// ds
1490//===----------------------------------------------------------------------===//
1491
Tom Stellard45bb48e2015-06-13 03:28:10 +00001492void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1493 const OperandVector &Operands) {
1494
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001495 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001496
1497 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1498 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1499
1500 // Add the register arguments
1501 if (Op.isReg()) {
1502 Op.addRegOperands(Inst, 1);
1503 continue;
1504 }
1505
1506 // Handle optional arguments
1507 OptionalIdx[Op.getImmTy()] = i;
1508 }
1509
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001510 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1511 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001512 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001513
Tom Stellard45bb48e2015-06-13 03:28:10 +00001514 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1515}
1516
1517void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1518
1519 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1520 bool GDSOnly = false;
1521
1522 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1523 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1524
1525 // Add the register arguments
1526 if (Op.isReg()) {
1527 Op.addRegOperands(Inst, 1);
1528 continue;
1529 }
1530
1531 if (Op.isToken() && Op.getToken() == "gds") {
1532 GDSOnly = true;
1533 continue;
1534 }
1535
1536 // Handle optional arguments
1537 OptionalIdx[Op.getImmTy()] = i;
1538 }
1539
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001540 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1541 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001542
1543 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001544 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001545 }
1546 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1547}
1548
1549
1550//===----------------------------------------------------------------------===//
1551// s_waitcnt
1552//===----------------------------------------------------------------------===//
1553
1554bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1555 StringRef CntName = Parser.getTok().getString();
1556 int64_t CntVal;
1557
1558 Parser.Lex();
1559 if (getLexer().isNot(AsmToken::LParen))
1560 return true;
1561
1562 Parser.Lex();
1563 if (getLexer().isNot(AsmToken::Integer))
1564 return true;
1565
1566 if (getParser().parseAbsoluteExpression(CntVal))
1567 return true;
1568
1569 if (getLexer().isNot(AsmToken::RParen))
1570 return true;
1571
1572 Parser.Lex();
1573 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1574 Parser.Lex();
1575
1576 int CntShift;
1577 int CntMask;
1578
1579 if (CntName == "vmcnt") {
1580 CntMask = 0xf;
1581 CntShift = 0;
1582 } else if (CntName == "expcnt") {
1583 CntMask = 0x7;
1584 CntShift = 4;
1585 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001586 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001587 CntShift = 8;
1588 } else {
1589 return true;
1590 }
1591
1592 IntVal &= ~(CntMask << CntShift);
1593 IntVal |= (CntVal << CntShift);
1594 return false;
1595}
1596
1597AMDGPUAsmParser::OperandMatchResultTy
1598AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1599 // Disable all counters by default.
1600 // vmcnt [3:0]
1601 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001602 // lgkmcnt [11:8]
1603 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001604 SMLoc S = Parser.getTok().getLoc();
1605
1606 switch(getLexer().getKind()) {
1607 default: return MatchOperand_ParseFail;
1608 case AsmToken::Integer:
1609 // The operand can be an integer value.
1610 if (getParser().parseAbsoluteExpression(CntVal))
1611 return MatchOperand_ParseFail;
1612 break;
1613
1614 case AsmToken::Identifier:
1615 do {
1616 if (parseCnt(CntVal))
1617 return MatchOperand_ParseFail;
1618 } while(getLexer().isNot(AsmToken::EndOfStatement));
1619 break;
1620 }
1621 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1622 return MatchOperand_Success;
1623}
1624
Artem Tamazov6edc1352016-05-26 17:00:33 +00001625bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
1626 using namespace llvm::AMDGPU::Hwreg;
1627
Artem Tamazovd6468662016-04-25 14:13:51 +00001628 if (Parser.getTok().getString() != "hwreg")
1629 return true;
1630 Parser.Lex();
1631
1632 if (getLexer().isNot(AsmToken::LParen))
1633 return true;
1634 Parser.Lex();
1635
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001636 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001637 HwReg.IsSymbolic = true;
1638 HwReg.Id = ID_UNKNOWN_;
1639 const StringRef tok = Parser.getTok().getString();
1640 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
1641 if (tok == IdSymbolic[i]) {
1642 HwReg.Id = i;
1643 break;
1644 }
1645 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001646 Parser.Lex();
1647 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001648 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001649 if (getLexer().isNot(AsmToken::Integer))
1650 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001651 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001652 return true;
1653 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001654
1655 if (getLexer().is(AsmToken::RParen)) {
1656 Parser.Lex();
1657 return false;
1658 }
1659
1660 // optional params
1661 if (getLexer().isNot(AsmToken::Comma))
1662 return true;
1663 Parser.Lex();
1664
1665 if (getLexer().isNot(AsmToken::Integer))
1666 return true;
1667 if (getParser().parseAbsoluteExpression(Offset))
1668 return true;
1669
1670 if (getLexer().isNot(AsmToken::Comma))
1671 return true;
1672 Parser.Lex();
1673
1674 if (getLexer().isNot(AsmToken::Integer))
1675 return true;
1676 if (getParser().parseAbsoluteExpression(Width))
1677 return true;
1678
1679 if (getLexer().isNot(AsmToken::RParen))
1680 return true;
1681 Parser.Lex();
1682
1683 return false;
1684}
1685
1686AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001687AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001688 using namespace llvm::AMDGPU::Hwreg;
1689
Artem Tamazovd6468662016-04-25 14:13:51 +00001690 int64_t Imm16Val = 0;
1691 SMLoc S = Parser.getTok().getLoc();
1692
1693 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00001694 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00001695 case AsmToken::Integer:
1696 // The operand can be an integer value.
1697 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00001698 return MatchOperand_NoMatch;
1699 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001700 Error(S, "invalid immediate: only 16-bit values are legal");
1701 // Do not return error code, but create an imm operand anyway and proceed
1702 // to the next operand, if any. That avoids unneccessary error messages.
1703 }
1704 break;
1705
1706 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00001707 OperandInfoTy HwReg(ID_UNKNOWN_);
1708 int64_t Offset = OFFSET_DEFAULT_;
1709 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
1710 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00001711 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001712 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
1713 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001714 Error(S, "invalid symbolic name of hardware register");
1715 else
1716 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001717 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00001718 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00001719 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00001720 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00001721 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00001722 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00001723 }
1724 break;
1725 }
1726 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1727 return MatchOperand_Success;
1728}
1729
Tom Stellard45bb48e2015-06-13 03:28:10 +00001730bool AMDGPUOperand::isSWaitCnt() const {
1731 return isImm();
1732}
1733
Artem Tamazovd6468662016-04-25 14:13:51 +00001734bool AMDGPUOperand::isHwreg() const {
1735 return isImmTy(ImmTyHwreg);
1736}
1737
Sam Kolton5f10a132016-05-06 11:31:17 +00001738AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
1739 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
1740}
1741
Artem Tamazov6edc1352016-05-26 17:00:33 +00001742bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001743 using namespace llvm::AMDGPU::SendMsg;
1744
1745 if (Parser.getTok().getString() != "sendmsg")
1746 return true;
1747 Parser.Lex();
1748
1749 if (getLexer().isNot(AsmToken::LParen))
1750 return true;
1751 Parser.Lex();
1752
1753 if (getLexer().is(AsmToken::Identifier)) {
1754 Msg.IsSymbolic = true;
1755 Msg.Id = ID_UNKNOWN_;
1756 const std::string tok = Parser.getTok().getString();
1757 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1758 switch(i) {
1759 default: continue; // Omit gaps.
1760 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
1761 }
1762 if (tok == IdSymbolic[i]) {
1763 Msg.Id = i;
1764 break;
1765 }
1766 }
1767 Parser.Lex();
1768 } else {
1769 Msg.IsSymbolic = false;
1770 if (getLexer().isNot(AsmToken::Integer))
1771 return true;
1772 if (getParser().parseAbsoluteExpression(Msg.Id))
1773 return true;
1774 if (getLexer().is(AsmToken::Integer))
1775 if (getParser().parseAbsoluteExpression(Msg.Id))
1776 Msg.Id = ID_UNKNOWN_;
1777 }
1778 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1779 return false;
1780
1781 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1782 if (getLexer().isNot(AsmToken::RParen))
1783 return true;
1784 Parser.Lex();
1785 return false;
1786 }
1787
1788 if (getLexer().isNot(AsmToken::Comma))
1789 return true;
1790 Parser.Lex();
1791
1792 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1793 Operation.Id = ID_UNKNOWN_;
1794 if (getLexer().is(AsmToken::Identifier)) {
1795 Operation.IsSymbolic = true;
1796 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1797 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1798 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001799 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001800 for (int i = F; i < L; ++i) {
1801 if (Tok == S[i]) {
1802 Operation.Id = i;
1803 break;
1804 }
1805 }
1806 Parser.Lex();
1807 } else {
1808 Operation.IsSymbolic = false;
1809 if (getLexer().isNot(AsmToken::Integer))
1810 return true;
1811 if (getParser().parseAbsoluteExpression(Operation.Id))
1812 return true;
1813 }
1814
1815 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1816 // Stream id is optional.
1817 if (getLexer().is(AsmToken::RParen)) {
1818 Parser.Lex();
1819 return false;
1820 }
1821
1822 if (getLexer().isNot(AsmToken::Comma))
1823 return true;
1824 Parser.Lex();
1825
1826 if (getLexer().isNot(AsmToken::Integer))
1827 return true;
1828 if (getParser().parseAbsoluteExpression(StreamId))
1829 return true;
1830 }
1831
1832 if (getLexer().isNot(AsmToken::RParen))
1833 return true;
1834 Parser.Lex();
1835 return false;
1836}
1837
1838AMDGPUAsmParser::OperandMatchResultTy
1839AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
1840 using namespace llvm::AMDGPU::SendMsg;
1841
1842 int64_t Imm16Val = 0;
1843 SMLoc S = Parser.getTok().getLoc();
1844
1845 switch(getLexer().getKind()) {
1846 default:
1847 return MatchOperand_NoMatch;
1848 case AsmToken::Integer:
1849 // The operand can be an integer value.
1850 if (getParser().parseAbsoluteExpression(Imm16Val))
1851 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00001852 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001853 Error(S, "invalid immediate: only 16-bit values are legal");
1854 // Do not return error code, but create an imm operand anyway and proceed
1855 // to the next operand, if any. That avoids unneccessary error messages.
1856 }
1857 break;
1858 case AsmToken::Identifier: {
1859 OperandInfoTy Msg(ID_UNKNOWN_);
1860 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00001861 int64_t StreamId = STREAM_ID_DEFAULT_;
1862 if (parseSendMsgConstruct(Msg, Operation, StreamId))
1863 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001864 do {
1865 // Validate and encode message ID.
1866 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
1867 || Msg.Id == ID_SYSMSG)) {
1868 if (Msg.IsSymbolic)
1869 Error(S, "invalid/unsupported symbolic name of message");
1870 else
1871 Error(S, "invalid/unsupported code of message");
1872 break;
1873 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00001874 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001875 // Validate and encode operation ID.
1876 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
1877 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
1878 if (Operation.IsSymbolic)
1879 Error(S, "invalid symbolic name of GS_OP");
1880 else
1881 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
1882 break;
1883 }
1884 if (Operation.Id == OP_GS_NOP
1885 && Msg.Id != ID_GS_DONE) {
1886 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
1887 break;
1888 }
1889 Imm16Val |= (Operation.Id << OP_SHIFT_);
1890 }
1891 if (Msg.Id == ID_SYSMSG) {
1892 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
1893 if (Operation.IsSymbolic)
1894 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
1895 else
1896 Error(S, "invalid/unsupported code of SYSMSG_OP");
1897 break;
1898 }
1899 Imm16Val |= (Operation.Id << OP_SHIFT_);
1900 }
1901 // Validate and encode stream ID.
1902 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1903 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
1904 Error(S, "invalid stream id: only 2-bit values are legal");
1905 break;
1906 }
1907 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
1908 }
1909 } while (0);
1910 }
1911 break;
1912 }
1913 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
1914 return MatchOperand_Success;
1915}
1916
1917bool AMDGPUOperand::isSendMsg() const {
1918 return isImmTy(ImmTySendMsg);
1919}
1920
Tom Stellard45bb48e2015-06-13 03:28:10 +00001921//===----------------------------------------------------------------------===//
1922// sopp branch targets
1923//===----------------------------------------------------------------------===//
1924
1925AMDGPUAsmParser::OperandMatchResultTy
1926AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1927 SMLoc S = Parser.getTok().getLoc();
1928
1929 switch (getLexer().getKind()) {
1930 default: return MatchOperand_ParseFail;
1931 case AsmToken::Integer: {
1932 int64_t Imm;
1933 if (getParser().parseAbsoluteExpression(Imm))
1934 return MatchOperand_ParseFail;
1935 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1936 return MatchOperand_Success;
1937 }
1938
1939 case AsmToken::Identifier:
1940 Operands.push_back(AMDGPUOperand::CreateExpr(
1941 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1942 Parser.getTok().getString()), getContext()), S));
1943 Parser.Lex();
1944 return MatchOperand_Success;
1945 }
1946}
1947
1948//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001949// mubuf
1950//===----------------------------------------------------------------------===//
1951
Tom Stellard45bb48e2015-06-13 03:28:10 +00001952bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001953 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001954}
1955
Sam Kolton5f10a132016-05-06 11:31:17 +00001956AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
1957 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
1958}
1959
1960AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
1961 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
1962}
1963
1964AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
1965 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
1966}
1967
1968AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
1969 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
1970}
1971
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001972void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
1973 const OperandVector &Operands,
1974 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001975 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001976 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001977
1978 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1979 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1980
1981 // Add the register arguments
1982 if (Op.isReg()) {
1983 Op.addRegOperands(Inst, 1);
1984 continue;
1985 }
1986
1987 // Handle the case where soffset is an immediate
1988 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1989 Op.addImmOperands(Inst, 1);
1990 continue;
1991 }
1992
1993 // Handle tokens like 'offen' which are sometimes hard-coded into the
1994 // asm string. There are no MCInst operands for these.
1995 if (Op.isToken()) {
1996 continue;
1997 }
1998 assert(Op.isImm());
1999
2000 // Handle optional arguments
2001 OptionalIdx[Op.getImmTy()] = i;
2002 }
2003
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002004 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2005 if (IsAtomicReturn) {
2006 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2007 Inst.insert(I, *I);
2008 }
2009
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002010 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002011 if (!IsAtomic) { // glc is hard-coded.
2012 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2013 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002014 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2015 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002016}
2017
2018//===----------------------------------------------------------------------===//
2019// mimg
2020//===----------------------------------------------------------------------===//
2021
Sam Kolton1bdcef72016-05-23 09:59:02 +00002022void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2023 unsigned I = 1;
2024 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2025 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2026 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2027 }
2028
2029 OptionalImmIndexMap OptionalIdx;
2030
2031 for (unsigned E = Operands.size(); I != E; ++I) {
2032 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2033
2034 // Add the register arguments
2035 if (Op.isRegOrImm()) {
2036 Op.addRegOrImmOperands(Inst, 1);
2037 continue;
2038 } else if (Op.isImmModifier()) {
2039 OptionalIdx[Op.getImmTy()] = I;
2040 } else {
2041 assert(false);
2042 }
2043 }
2044
2045 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2046 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2047 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2048 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2049 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2050 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2051 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2052 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2053}
2054
2055void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2056 unsigned I = 1;
2057 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2058 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2059 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2060 }
2061
2062 // Add src, same as dst
2063 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2064
2065 OptionalImmIndexMap OptionalIdx;
2066
2067 for (unsigned E = Operands.size(); I != E; ++I) {
2068 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2069
2070 // Add the register arguments
2071 if (Op.isRegOrImm()) {
2072 Op.addRegOrImmOperands(Inst, 1);
2073 continue;
2074 } else if (Op.isImmModifier()) {
2075 OptionalIdx[Op.getImmTy()] = I;
2076 } else {
2077 assert(false);
2078 }
2079 }
2080
2081 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2082 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2083 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2084 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2085 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2086 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2087 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2088 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2089}
2090
Sam Kolton5f10a132016-05-06 11:31:17 +00002091AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2092 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2093}
2094
2095AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2096 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2097}
2098
2099AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2100 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2101}
2102
2103AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2104 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2105}
2106
2107AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2108 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2109}
2110
Tom Stellard45bb48e2015-06-13 03:28:10 +00002111//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002112// smrd
2113//===----------------------------------------------------------------------===//
2114
2115bool AMDGPUOperand::isSMRDOffset() const {
2116
2117 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2118 // information here.
2119 return isImm() && isUInt<8>(getImm());
2120}
2121
2122bool AMDGPUOperand::isSMRDLiteralOffset() const {
2123 // 32-bit literals are only supported on CI and we only want to use them
2124 // when the offset is > 8-bits.
2125 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2126}
2127
Sam Kolton5f10a132016-05-06 11:31:17 +00002128AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2129 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2130}
2131
2132AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2133 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2134}
2135
Tom Stellard217361c2015-08-06 19:28:38 +00002136//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002137// vop3
2138//===----------------------------------------------------------------------===//
2139
2140static bool ConvertOmodMul(int64_t &Mul) {
2141 if (Mul != 1 && Mul != 2 && Mul != 4)
2142 return false;
2143
2144 Mul >>= 1;
2145 return true;
2146}
2147
2148static bool ConvertOmodDiv(int64_t &Div) {
2149 if (Div == 1) {
2150 Div = 0;
2151 return true;
2152 }
2153
2154 if (Div == 2) {
2155 Div = 3;
2156 return true;
2157 }
2158
2159 return false;
2160}
2161
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002162static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2163 if (BoundCtrl == 0) {
2164 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002165 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002166 } else if (BoundCtrl == -1) {
2167 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002168 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002169 }
2170 return false;
2171}
2172
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002173// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002174static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2175 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2176 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2177 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2178 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2179 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2180 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2181 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2182 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2183 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2184 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2185 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2186 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2187 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2188 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2189 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2190 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2191 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2192 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2193 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2194 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
2195 {"sdwa_sel", AMDGPUOperand::ImmTySdwaSel, false, nullptr},
2196 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002197};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002198
Sam Kolton11de3702016-05-24 12:38:33 +00002199AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2200 OperandMatchResultTy res;
2201 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2202 // try to parse any optional operand here
2203 if (Op.IsBit) {
2204 res = parseNamedBit(Op.Name, Operands, Op.Type);
2205 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2206 res = parseOModOperand(Operands);
2207 } else if (Op.Type == AMDGPUOperand::ImmTySdwaSel) {
2208 res = parseSDWASel(Operands);
2209 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2210 res = parseSDWADstUnused(Operands);
2211 } else {
2212 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2213 }
2214 if (res != MatchOperand_NoMatch) {
2215 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002216 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002217 }
2218 return MatchOperand_NoMatch;
2219}
2220
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002221AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2222{
2223 StringRef Name = Parser.getTok().getString();
2224 if (Name == "mul") {
Sam Kolton11de3702016-05-24 12:38:33 +00002225 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002226 } else if (Name == "div") {
Sam Kolton11de3702016-05-24 12:38:33 +00002227 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002228 } else {
2229 return MatchOperand_NoMatch;
2230 }
2231}
2232
Sam Kolton5f10a132016-05-06 11:31:17 +00002233AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
2234 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
2235}
2236
2237AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
2238 return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
2239}
2240
Tom Stellarda90b9522016-02-11 03:28:15 +00002241void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2242 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002243 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002244 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002245 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2246 }
2247 for (unsigned E = Operands.size(); I != E; ++I)
2248 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2249}
2250
2251void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002252 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2253 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002254 cvtVOP3(Inst, Operands);
2255 } else {
2256 cvtId(Inst, Operands);
2257 }
2258}
2259
Tom Stellarda90b9522016-02-11 03:28:15 +00002260void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002261 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002262 unsigned I = 1;
2263 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002264 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002265 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002266 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002267
Tom Stellarda90b9522016-02-11 03:28:15 +00002268 for (unsigned E = Operands.size(); I != E; ++I) {
2269 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002270 if (Op.isRegOrImmWithInputMods()) {
2271 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002272 } else if (Op.isImm()) {
2273 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002274 } else {
2275 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002276 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002277 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002278
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002279 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2280 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002281}
2282
Sam Koltondfa29f72016-03-09 12:29:31 +00002283//===----------------------------------------------------------------------===//
2284// dpp
2285//===----------------------------------------------------------------------===//
2286
2287bool AMDGPUOperand::isDPPCtrl() const {
2288 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2289 if (result) {
2290 int64_t Imm = getImm();
2291 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2292 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2293 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2294 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2295 (Imm == 0x130) ||
2296 (Imm == 0x134) ||
2297 (Imm == 0x138) ||
2298 (Imm == 0x13c) ||
2299 (Imm == 0x140) ||
2300 (Imm == 0x141) ||
2301 (Imm == 0x142) ||
2302 (Imm == 0x143);
2303 }
2304 return false;
2305}
2306
Sam Koltona74cd522016-03-18 15:35:51 +00002307AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002308AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002309 SMLoc S = Parser.getTok().getLoc();
2310 StringRef Prefix;
2311 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002312
Sam Koltona74cd522016-03-18 15:35:51 +00002313 if (getLexer().getKind() == AsmToken::Identifier) {
2314 Prefix = Parser.getTok().getString();
2315 } else {
2316 return MatchOperand_NoMatch;
2317 }
2318
2319 if (Prefix == "row_mirror") {
2320 Int = 0x140;
2321 } else if (Prefix == "row_half_mirror") {
2322 Int = 0x141;
2323 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002324 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2325 if (Prefix != "quad_perm"
2326 && Prefix != "row_shl"
2327 && Prefix != "row_shr"
2328 && Prefix != "row_ror"
2329 && Prefix != "wave_shl"
2330 && Prefix != "wave_rol"
2331 && Prefix != "wave_shr"
2332 && Prefix != "wave_ror"
2333 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002334 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002335 }
2336
Sam Koltona74cd522016-03-18 15:35:51 +00002337 Parser.Lex();
2338 if (getLexer().isNot(AsmToken::Colon))
2339 return MatchOperand_ParseFail;
2340
2341 if (Prefix == "quad_perm") {
2342 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002343 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002344 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002345 return MatchOperand_ParseFail;
2346
2347 Parser.Lex();
2348 if (getLexer().isNot(AsmToken::Integer))
2349 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002350 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002351
Sam Koltona74cd522016-03-18 15:35:51 +00002352 Parser.Lex();
2353 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002354 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002355 Parser.Lex();
2356 if (getLexer().isNot(AsmToken::Integer))
2357 return MatchOperand_ParseFail;
2358 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002359
Sam Koltona74cd522016-03-18 15:35:51 +00002360 Parser.Lex();
2361 if (getLexer().isNot(AsmToken::Comma))
2362 return MatchOperand_ParseFail;
2363 Parser.Lex();
2364 if (getLexer().isNot(AsmToken::Integer))
2365 return MatchOperand_ParseFail;
2366 Int += (getLexer().getTok().getIntVal() << 4);
2367
2368 Parser.Lex();
2369 if (getLexer().isNot(AsmToken::Comma))
2370 return MatchOperand_ParseFail;
2371 Parser.Lex();
2372 if (getLexer().isNot(AsmToken::Integer))
2373 return MatchOperand_ParseFail;
2374 Int += (getLexer().getTok().getIntVal() << 6);
2375
2376 Parser.Lex();
2377 if (getLexer().isNot(AsmToken::RBrac))
2378 return MatchOperand_ParseFail;
2379
2380 } else {
2381 // sel:%d
2382 Parser.Lex();
2383 if (getLexer().isNot(AsmToken::Integer))
2384 return MatchOperand_ParseFail;
2385 Int = getLexer().getTok().getIntVal();
2386
2387 if (Prefix == "row_shl") {
2388 Int |= 0x100;
2389 } else if (Prefix == "row_shr") {
2390 Int |= 0x110;
2391 } else if (Prefix == "row_ror") {
2392 Int |= 0x120;
2393 } else if (Prefix == "wave_shl") {
2394 Int = 0x130;
2395 } else if (Prefix == "wave_rol") {
2396 Int = 0x134;
2397 } else if (Prefix == "wave_shr") {
2398 Int = 0x138;
2399 } else if (Prefix == "wave_ror") {
2400 Int = 0x13C;
2401 } else if (Prefix == "row_bcast") {
2402 if (Int == 15) {
2403 Int = 0x142;
2404 } else if (Int == 31) {
2405 Int = 0x143;
2406 }
2407 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002408 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002409 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002410 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002411 }
Sam Koltona74cd522016-03-18 15:35:51 +00002412 Parser.Lex(); // eat last token
2413
2414 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002415 AMDGPUOperand::ImmTyDppCtrl));
2416 return MatchOperand_Success;
2417}
2418
Sam Kolton5f10a132016-05-06 11:31:17 +00002419AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2420 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002421}
2422
Sam Kolton5f10a132016-05-06 11:31:17 +00002423AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2424 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002425}
2426
Sam Kolton5f10a132016-05-06 11:31:17 +00002427AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2428 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2429}
2430
2431void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002432 OptionalImmIndexMap OptionalIdx;
2433
2434 unsigned I = 1;
2435 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2436 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2437 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2438 }
2439
2440 for (unsigned E = Operands.size(); I != E; ++I) {
2441 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2442 // Add the register arguments
Sam Kolton5f10a132016-05-06 11:31:17 +00002443 if (Op.isRegOrImmWithInputMods()) {
2444 // We convert only instructions with modifiers
Sam Koltondfa29f72016-03-09 12:29:31 +00002445 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2446 } else if (Op.isDPPCtrl()) {
2447 Op.addImmOperands(Inst, 1);
2448 } else if (Op.isImm()) {
2449 // Handle optional arguments
2450 OptionalIdx[Op.getImmTy()] = I;
2451 } else {
2452 llvm_unreachable("Invalid operand type");
2453 }
2454 }
2455
2456 // ToDo: fix default values for row_mask and bank_mask
2457 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2458 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2459 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2460}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002461
Sam Kolton3025e7f2016-04-26 13:33:56 +00002462//===----------------------------------------------------------------------===//
2463// sdwa
2464//===----------------------------------------------------------------------===//
2465
2466AMDGPUAsmParser::OperandMatchResultTy
2467AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2468 SMLoc S = Parser.getTok().getLoc();
2469 StringRef Value;
2470 AMDGPUAsmParser::OperandMatchResultTy res;
2471
2472 res = parseStringWithPrefix("dst_sel", Value);
2473 if (res == MatchOperand_ParseFail) {
2474 return MatchOperand_ParseFail;
2475 } else if (res == MatchOperand_NoMatch) {
2476 res = parseStringWithPrefix("src0_sel", Value);
2477 if (res == MatchOperand_ParseFail) {
2478 return MatchOperand_ParseFail;
2479 } else if (res == MatchOperand_NoMatch) {
2480 res = parseStringWithPrefix("src1_sel", Value);
2481 if (res != MatchOperand_Success) {
2482 return res;
2483 }
2484 }
2485 }
2486
2487 int64_t Int;
2488 Int = StringSwitch<int64_t>(Value)
2489 .Case("BYTE_0", 0)
2490 .Case("BYTE_1", 1)
2491 .Case("BYTE_2", 2)
2492 .Case("BYTE_3", 3)
2493 .Case("WORD_0", 4)
2494 .Case("WORD_1", 5)
2495 .Case("DWORD", 6)
2496 .Default(0xffffffff);
2497 Parser.Lex(); // eat last token
2498
2499 if (Int == 0xffffffff) {
2500 return MatchOperand_ParseFail;
2501 }
2502
2503 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2504 AMDGPUOperand::ImmTySdwaSel));
2505 return MatchOperand_Success;
2506}
2507
2508AMDGPUAsmParser::OperandMatchResultTy
2509AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2510 SMLoc S = Parser.getTok().getLoc();
2511 StringRef Value;
2512 AMDGPUAsmParser::OperandMatchResultTy res;
2513
2514 res = parseStringWithPrefix("dst_unused", Value);
2515 if (res != MatchOperand_Success) {
2516 return res;
2517 }
2518
2519 int64_t Int;
2520 Int = StringSwitch<int64_t>(Value)
2521 .Case("UNUSED_PAD", 0)
2522 .Case("UNUSED_SEXT", 1)
2523 .Case("UNUSED_PRESERVE", 2)
2524 .Default(0xffffffff);
2525 Parser.Lex(); // eat last token
2526
2527 if (Int == 0xffffffff) {
2528 return MatchOperand_ParseFail;
2529 }
2530
2531 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2532 AMDGPUOperand::ImmTySdwaDstUnused));
2533 return MatchOperand_Success;
2534}
2535
Sam Kolton5f10a132016-05-06 11:31:17 +00002536AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
2537 return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
2538}
2539
2540AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
2541 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
2542}
2543
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002544
Tom Stellard45bb48e2015-06-13 03:28:10 +00002545/// Force static initialization.
2546extern "C" void LLVMInitializeAMDGPUAsmParser() {
2547 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2548 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2549}
2550
2551#define GET_REGISTER_MATCHER
2552#define GET_MATCHER_IMPLEMENTATION
2553#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00002554
2555
2556// This fuction should be defined after auto-generated include so that we have
2557// MatchClassKind enum defined
2558unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
2559 unsigned Kind) {
2560 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
2561 // But MatchInstructionImpl() expects to meet token and fails to validate
2562 // operand. This method checks if we are given immediate operand but expect to
2563 // get corresponding token.
2564 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
2565 switch (Kind) {
2566 case MCK_addr64:
2567 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
2568 case MCK_gds:
2569 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
2570 case MCK_glc:
2571 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
2572 case MCK_idxen:
2573 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
2574 case MCK_offen:
2575 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
2576 default: return Match_InvalidOperand;
2577 }
2578}