blob: 6c7dee460236c2bb46e903df53deb65beaf00997 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000018#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/StringSwitch.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000029#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000034#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetRegistry.h"
38#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039
Artem Tamazovebe71ce2016-05-06 17:48:48 +000040// FIXME ODR: Move this to some common place for AsmParser and InstPrinter
41namespace llvm {
42namespace AMDGPU {
43namespace SendMsg {
44
45// This must be in sync with llvm::AMDGPU::SendMsg::Id enum members.
46static
47const char* const IdSymbolic[] = {
48 nullptr,
49 "MSG_INTERRUPT",
50 "MSG_GS",
51 "MSG_GS_DONE",
52 nullptr,
53 nullptr,
54 nullptr,
55 nullptr,
56 nullptr,
57 nullptr,
58 nullptr,
59 nullptr,
60 nullptr,
61 nullptr,
62 nullptr,
63 "MSG_SYSMSG"
64};
65
66// These two must be in sync with llvm::AMDGPU::SendMsg::Op enum members.
67static
68const char* const OpSysSymbolic[] = {
69 nullptr,
70 "SYSMSG_OP_ECC_ERR_INTERRUPT",
71 "SYSMSG_OP_REG_RD",
72 "SYSMSG_OP_HOST_TRAP_ACK",
73 "SYSMSG_OP_TTRACE_PC"
74};
75
76static
77const char* const OpGsSymbolic[] = {
78 "GS_OP_NOP",
79 "GS_OP_CUT",
80 "GS_OP_EMIT",
81 "GS_OP_EMIT_CUT"
82};
83
84} // namespace SendMsg
85} // namespace AMDGPU
86} // namespace llvm
87
Tom Stellard45bb48e2015-06-13 03:28:10 +000088using namespace llvm;
89
90namespace {
91
92struct OptionalOperand;
93
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000094enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
95
Tom Stellard45bb48e2015-06-13 03:28:10 +000096class AMDGPUOperand : public MCParsedAsmOperand {
97 enum KindTy {
98 Token,
99 Immediate,
100 Register,
101 Expression
102 } Kind;
103
104 SMLoc StartLoc, EndLoc;
105
106public:
107 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
108
109 MCContext *Ctx;
110
Sam Kolton5f10a132016-05-06 11:31:17 +0000111 typedef std::unique_ptr<AMDGPUOperand> Ptr;
112
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 enum ImmTy {
114 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000115 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000116 ImmTyOffen,
117 ImmTyIdxen,
118 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000119 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000120 ImmTyOffset0,
121 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000122 ImmTyGLC,
123 ImmTySLC,
124 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000125 ImmTyClampSI,
126 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000127 ImmTyDppCtrl,
128 ImmTyDppRowMask,
129 ImmTyDppBankMask,
130 ImmTyDppBoundCtrl,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000131 ImmTySdwaSel,
132 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000133 ImmTyDMask,
134 ImmTyUNorm,
135 ImmTyDA,
136 ImmTyR128,
137 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000138 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000139 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000140 };
141
142 struct TokOp {
143 const char *Data;
144 unsigned Length;
145 };
146
147 struct ImmOp {
148 bool IsFPImm;
149 ImmTy Type;
150 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000151 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152 };
153
154 struct RegOp {
155 unsigned RegNo;
156 int Modifiers;
157 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000158 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000159 bool IsForcedVOP3;
160 };
161
162 union {
163 TokOp Tok;
164 ImmOp Imm;
165 RegOp Reg;
166 const MCExpr *Expr;
167 };
168
Sam Kolton1bdcef72016-05-23 09:59:02 +0000169 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
170 if (Imm.Type == ImmTyNone && ApplyModifiers && Imm.Modifiers != 0) {
171 // Apply modifiers to immediate value
172 int64_t Val = Imm.Val;
173 bool Negate = Imm.Modifiers & 0x1;
174 bool Abs = Imm.Modifiers & 0x2;
175 if (Imm.IsFPImm) {
176 APFloat F(BitsToFloat(Val));
177 if (Abs) {
178 F.clearSign();
179 }
180 if (Negate) {
181 F.changeSign();
182 }
183 Val = F.bitcastToAPInt().getZExtValue();
184 } else {
185 Val = Abs ? std::abs(Val) : Val;
186 Val = Negate ? -Val : Val;
187 }
188 Inst.addOperand(MCOperand::createImm(Val));
189 } else {
190 Inst.addOperand(MCOperand::createImm(getImm()));
191 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192 }
193
194 StringRef getToken() const {
195 return StringRef(Tok.Data, Tok.Length);
196 }
197
198 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000199 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000200 }
201
202 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000203 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204 addRegOperands(Inst, N);
205 else
206 addImmOperands(Inst, N);
207 }
208
Tom Stellardd93a34f2016-02-22 19:17:56 +0000209 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
210 if (isRegKind()) {
211 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
212 addRegOperands(Inst, N);
213 } else {
214 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
Sam Kolton1bdcef72016-05-23 09:59:02 +0000215 addImmOperands(Inst, N, false);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000216 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000217 }
218
219 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
220 if (isImm())
221 addImmOperands(Inst, N);
222 else {
223 assert(isExpr());
224 Inst.addOperand(MCOperand::createExpr(Expr));
225 }
226 }
227
Tom Stellard45bb48e2015-06-13 03:28:10 +0000228 bool isToken() const override {
229 return Kind == Token;
230 }
231
232 bool isImm() const override {
233 return Kind == Immediate;
234 }
235
Tom Stellardd93a34f2016-02-22 19:17:56 +0000236 bool isInlinableImm() const {
237 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
238 immediates are inlinable (e.g. "clamp" attribute is not) */ )
239 return false;
240 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000241 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000242 // We've had bot failures before due to weird NaN support on mips hosts.
243 const float F = BitsToFloat(Imm.Val);
244 // TODO: Add 1/(2*pi) for VI
245 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000246 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000247 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000248 }
249
Tom Stellard45bb48e2015-06-13 03:28:10 +0000250 int64_t getImm() const {
251 return Imm.Val;
252 }
253
254 enum ImmTy getImmTy() const {
255 assert(isImm());
256 return Imm.Type;
257 }
258
259 bool isRegKind() const {
260 return Kind == Register;
261 }
262
263 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000264 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000265 }
266
Tom Stellardd93a34f2016-02-22 19:17:56 +0000267 bool isRegOrImmWithInputMods() const {
268 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000269 }
270
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000271 bool isImmTy(ImmTy ImmT) const {
272 return isImm() && Imm.Type == ImmT;
273 }
274
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000275 bool isClampSI() const {
276 return isImmTy(ImmTyClampSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000277 }
278
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000279 bool isOModSI() const {
280 return isImmTy(ImmTyOModSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000281 }
282
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000283 bool isImmModifier() const {
284 return Kind == Immediate && Imm.Type != ImmTyNone;
285 }
286
287 bool isDMask() const {
288 return isImmTy(ImmTyDMask);
289 }
290
291 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
292 bool isDA() const { return isImmTy(ImmTyDA); }
293 bool isR128() const { return isImmTy(ImmTyUNorm); }
294 bool isLWE() const { return isImmTy(ImmTyLWE); }
295
Tom Stellarda90b9522016-02-11 03:28:15 +0000296 bool isMod() const {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000297 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000298 }
299
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000300 bool isOffen() const { return isImmTy(ImmTyOffen); }
301 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
302 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
303 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
304 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
305 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000306 bool isGDS() const { return isImmTy(ImmTyGDS); }
307 bool isGLC() const { return isImmTy(ImmTyGLC); }
308 bool isSLC() const { return isImmTy(ImmTySLC); }
309 bool isTFE() const { return isImmTy(ImmTyTFE); }
310
Sam Koltondfa29f72016-03-09 12:29:31 +0000311 bool isBankMask() const {
312 return isImmTy(ImmTyDppBankMask);
313 }
314
315 bool isRowMask() const {
316 return isImmTy(ImmTyDppRowMask);
317 }
318
319 bool isBoundCtrl() const {
320 return isImmTy(ImmTyDppBoundCtrl);
321 }
Sam Koltona74cd522016-03-18 15:35:51 +0000322
Sam Kolton3025e7f2016-04-26 13:33:56 +0000323 bool isSDWASel() const {
324 return isImmTy(ImmTySdwaSel);
325 }
326
327 bool isSDWADstUnused() const {
328 return isImmTy(ImmTySdwaDstUnused);
329 }
330
Tom Stellard45bb48e2015-06-13 03:28:10 +0000331 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000332 assert(isReg() || (isImm() && Imm.Modifiers == 0));
333 if (isReg())
334 Reg.Modifiers = Mods;
335 else
336 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000337 }
338
339 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000340 assert(isRegKind() || isImm());
341 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000342 }
343
344 unsigned getReg() const override {
345 return Reg.RegNo;
346 }
347
348 bool isRegOrImm() const {
349 return isReg() || isImm();
350 }
351
352 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000353 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000354 }
355
356 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000357 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000358 }
359
Matt Arsenault86d336e2015-09-08 21:15:00 +0000360 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000361 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000362 }
363
364 bool isSSrc32() const {
365 return isImm() || isSCSrc32();
366 }
367
368 bool isSSrc64() const {
369 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
370 // See isVSrc64().
371 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000372 }
373
Tom Stellard45bb48e2015-06-13 03:28:10 +0000374 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000375 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000376 }
377
378 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000379 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380 }
381
382 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000383 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000384 }
385
386 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000387 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000388 // narrowed to 32 bits (in the instruction stream). That require knowledge
389 // of instruction type (unsigned/signed, floating or "untyped"/B64),
390 // see [AMD GCN3 ISA 6.3.1].
391 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
392 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000393 }
394
395 bool isMem() const override {
396 return false;
397 }
398
399 bool isExpr() const {
400 return Kind == Expression;
401 }
402
403 bool isSoppBrTarget() const {
404 return isExpr() || isImm();
405 }
406
407 SMLoc getStartLoc() const override {
408 return StartLoc;
409 }
410
411 SMLoc getEndLoc() const override {
412 return EndLoc;
413 }
414
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000415 void printImmTy(raw_ostream& OS, ImmTy Type) const {
416 switch (Type) {
417 case ImmTyNone: OS << "None"; break;
418 case ImmTyGDS: OS << "GDS"; break;
419 case ImmTyOffen: OS << "Offen"; break;
420 case ImmTyIdxen: OS << "Idxen"; break;
421 case ImmTyAddr64: OS << "Addr64"; break;
422 case ImmTyOffset: OS << "Offset"; break;
423 case ImmTyOffset0: OS << "Offset0"; break;
424 case ImmTyOffset1: OS << "Offset1"; break;
425 case ImmTyGLC: OS << "GLC"; break;
426 case ImmTySLC: OS << "SLC"; break;
427 case ImmTyTFE: OS << "TFE"; break;
428 case ImmTyClampSI: OS << "ClampSI"; break;
429 case ImmTyOModSI: OS << "OModSI"; break;
430 case ImmTyDppCtrl: OS << "DppCtrl"; break;
431 case ImmTyDppRowMask: OS << "DppRowMask"; break;
432 case ImmTyDppBankMask: OS << "DppBankMask"; break;
433 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
434 case ImmTySdwaSel: OS << "SdwaSel"; break;
435 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
436 case ImmTyDMask: OS << "DMask"; break;
437 case ImmTyUNorm: OS << "UNorm"; break;
438 case ImmTyDA: OS << "DA"; break;
439 case ImmTyR128: OS << "R128"; break;
440 case ImmTyLWE: OS << "LWE"; break;
441 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000442 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000443 }
444 }
445
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000446 void print(raw_ostream &OS) const override {
447 switch (Kind) {
448 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000449 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000450 break;
451 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000452 OS << '<' << getImm();
453 if (getImmTy() != ImmTyNone) {
454 OS << " type: "; printImmTy(OS, getImmTy());
455 }
456 OS << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000457 break;
458 case Token:
459 OS << '\'' << getToken() << '\'';
460 break;
461 case Expression:
462 OS << "<expr " << *Expr << '>';
463 break;
464 }
465 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000466
Sam Kolton5f10a132016-05-06 11:31:17 +0000467 static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
468 enum ImmTy Type = ImmTyNone,
469 bool IsFPImm = false) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000470 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
471 Op->Imm.Val = Val;
472 Op->Imm.IsFPImm = IsFPImm;
473 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000474 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000475 Op->StartLoc = Loc;
476 Op->EndLoc = Loc;
477 return Op;
478 }
479
Sam Kolton5f10a132016-05-06 11:31:17 +0000480 static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
481 bool HasExplicitEncodingSize = true) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000482 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
483 Res->Tok.Data = Str.data();
484 Res->Tok.Length = Str.size();
485 Res->StartLoc = Loc;
486 Res->EndLoc = Loc;
487 return Res;
488 }
489
Sam Kolton5f10a132016-05-06 11:31:17 +0000490 static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
491 SMLoc E,
492 const MCRegisterInfo *TRI,
493 const MCSubtargetInfo *STI,
494 bool ForceVOP3) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000495 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
496 Op->Reg.RegNo = RegNo;
497 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000498 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000499 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000500 Op->Reg.IsForcedVOP3 = ForceVOP3;
501 Op->StartLoc = S;
502 Op->EndLoc = E;
503 return Op;
504 }
505
Sam Kolton5f10a132016-05-06 11:31:17 +0000506 static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000507 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
508 Op->Expr = Expr;
509 Op->StartLoc = S;
510 Op->EndLoc = S;
511 return Op;
512 }
513
Tom Stellard45bb48e2015-06-13 03:28:10 +0000514 bool isSWaitCnt() const;
Artem Tamazovd6468662016-04-25 14:13:51 +0000515 bool isHwreg() const;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000516 bool isSendMsg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000517 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000518 bool isSMRDOffset() const;
519 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000520 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000521};
522
523class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000524 const MCInstrInfo &MII;
525 MCAsmParser &Parser;
526
527 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000528
Matt Arsenault3b159672015-12-01 20:31:08 +0000529 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000530 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000531 }
532
533 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000534 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000535 }
536
Matt Arsenault68802d32015-11-05 03:11:27 +0000537 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000538 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000539 }
540
541 bool hasSGPR102_SGPR103() const {
542 return !isVI();
543 }
544
Tom Stellard45bb48e2015-06-13 03:28:10 +0000545 /// @name Auto-generated Match Functions
546 /// {
547
548#define GET_ASSEMBLER_HEADER
549#include "AMDGPUGenAsmMatcher.inc"
550
551 /// }
552
Tom Stellard347ac792015-06-26 21:15:07 +0000553private:
554 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
555 bool ParseDirectiveHSACodeObjectVersion();
556 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000557 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
558 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000559 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000560 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000561 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000562 bool ParseDirectiveAMDGPUHsaModuleGlobal();
563 bool ParseDirectiveAMDGPUHsaProgramGlobal();
564 bool ParseSectionDirectiveHSADataGlobalAgent();
565 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000566 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000567 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
568 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000569 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000570
Tom Stellard45bb48e2015-06-13 03:28:10 +0000571public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000572 enum AMDGPUMatchResultTy {
573 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
574 };
575
Akira Hatanakab11ef082015-11-14 06:35:56 +0000576 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577 const MCInstrInfo &MII,
578 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000579 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000580 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000581 MCAsmParserExtension::Initialize(Parser);
582
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000583 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000584 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000585 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000586 }
587
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000588 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589 }
590
Tom Stellard347ac792015-06-26 21:15:07 +0000591 AMDGPUTargetStreamer &getTargetStreamer() {
592 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
593 return static_cast<AMDGPUTargetStreamer &>(TS);
594 }
595
Tom Stellard45bb48e2015-06-13 03:28:10 +0000596 unsigned getForcedEncodingSize() const {
597 return ForcedEncodingSize;
598 }
599
600 void setForcedEncodingSize(unsigned Size) {
601 ForcedEncodingSize = Size;
602 }
603
604 bool isForcedVOP3() const {
605 return ForcedEncodingSize == 64;
606 }
607
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000608 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000609 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
610 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000611 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
612 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000613 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
614 OperandVector &Operands, MCStreamer &Out,
615 uint64_t &ErrorInfo,
616 bool MatchingInlineAsm) override;
617 bool ParseDirective(AsmToken DirectiveID) override;
618 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
619 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
620 SMLoc NameLoc, OperandVector &Operands) override;
621
Sam Kolton11de3702016-05-24 12:38:33 +0000622 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000623 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
624 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000625 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000626 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000627 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +0000628 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000629 OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000630
Sam Kolton1bdcef72016-05-23 09:59:02 +0000631 OperandMatchResultTy parseImm(OperandVector &Operands);
632 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
633 OperandMatchResultTy parseRegOrImmWithInputMods(OperandVector &Operands);
634
Tom Stellard45bb48e2015-06-13 03:28:10 +0000635 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
636 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000637
638 bool parseCnt(int64_t &IntVal);
639 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000640 bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
641 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000642
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000643private:
644 struct OperandInfoTy {
645 int64_t Id;
646 bool IsSymbolic;
647 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
648 };
Sam Kolton11de3702016-05-24 12:38:33 +0000649
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000650 bool parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
Sam Kolton11de3702016-05-24 12:38:33 +0000651
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000652public:
Sam Kolton11de3702016-05-24 12:38:33 +0000653 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
654
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000655 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000657 AMDGPUOperand::Ptr defaultHwreg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000658
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000659 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
660 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
661 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000662 AMDGPUOperand::Ptr defaultMubufOffset() const;
663 AMDGPUOperand::Ptr defaultGLC() const;
664 AMDGPUOperand::Ptr defaultSLC() const;
665 AMDGPUOperand::Ptr defaultTFE() const;
666
Sam Kolton5f10a132016-05-06 11:31:17 +0000667 AMDGPUOperand::Ptr defaultDMask() const;
668 AMDGPUOperand::Ptr defaultUNorm() const;
669 AMDGPUOperand::Ptr defaultDA() const;
670 AMDGPUOperand::Ptr defaultR128() const;
671 AMDGPUOperand::Ptr defaultLWE() const;
672 AMDGPUOperand::Ptr defaultSMRDOffset() const;
673 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
674
675 AMDGPUOperand::Ptr defaultClampSI() const;
676 AMDGPUOperand::Ptr defaultOModSI() const;
677
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000678 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
679
Tom Stellarda90b9522016-02-11 03:28:15 +0000680 void cvtId(MCInst &Inst, const OperandVector &Operands);
681 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000682 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000683
684 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000685 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000686
Sam Kolton11de3702016-05-24 12:38:33 +0000687 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000688 AMDGPUOperand::Ptr defaultRowMask() const;
689 AMDGPUOperand::Ptr defaultBankMask() const;
690 AMDGPUOperand::Ptr defaultBoundCtrl() const;
691 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000692
693 OperandMatchResultTy parseSDWASel(OperandVector &Operands);
694 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000695 AMDGPUOperand::Ptr defaultSDWASel() const;
696 AMDGPUOperand::Ptr defaultSDWADstUnused() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697};
698
699struct OptionalOperand {
700 const char *Name;
701 AMDGPUOperand::ImmTy Type;
702 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000703 bool (*ConvertResult)(int64_t&);
704};
705
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000706}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000707
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000708static int getRegClass(RegisterKind Is, unsigned RegWidth) {
709 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000710 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000711 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000712 case 1: return AMDGPU::VGPR_32RegClassID;
713 case 2: return AMDGPU::VReg_64RegClassID;
714 case 3: return AMDGPU::VReg_96RegClassID;
715 case 4: return AMDGPU::VReg_128RegClassID;
716 case 8: return AMDGPU::VReg_256RegClassID;
717 case 16: return AMDGPU::VReg_512RegClassID;
718 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000719 } else if (Is == IS_TTMP) {
720 switch (RegWidth) {
721 default: return -1;
722 case 1: return AMDGPU::TTMP_32RegClassID;
723 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000724 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000725 }
726 } else if (Is == IS_SGPR) {
727 switch (RegWidth) {
728 default: return -1;
729 case 1: return AMDGPU::SGPR_32RegClassID;
730 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000731 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000732 case 8: return AMDGPU::SReg_256RegClassID;
733 case 16: return AMDGPU::SReg_512RegClassID;
734 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000735 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000736 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000737}
738
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000739static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000740 return StringSwitch<unsigned>(RegName)
741 .Case("exec", AMDGPU::EXEC)
742 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000743 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000744 .Case("m0", AMDGPU::M0)
745 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000746 .Case("tba", AMDGPU::TBA)
747 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000748 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
749 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000750 .Case("vcc_lo", AMDGPU::VCC_LO)
751 .Case("vcc_hi", AMDGPU::VCC_HI)
752 .Case("exec_lo", AMDGPU::EXEC_LO)
753 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000754 .Case("tma_lo", AMDGPU::TMA_LO)
755 .Case("tma_hi", AMDGPU::TMA_HI)
756 .Case("tba_lo", AMDGPU::TBA_LO)
757 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000758 .Default(0);
759}
760
761bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000762 auto R = parseRegister();
763 if (!R) return true;
764 assert(R->isReg());
765 RegNo = R->getReg();
766 StartLoc = R->getStartLoc();
767 EndLoc = R->getEndLoc();
768 return false;
769}
770
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000771bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
772{
773 switch (RegKind) {
774 case IS_SPECIAL:
775 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
776 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
777 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
778 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
779 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
780 return false;
781 case IS_VGPR:
782 case IS_SGPR:
783 case IS_TTMP:
784 if (Reg1 != Reg + RegWidth) { return false; }
785 RegWidth++;
786 return true;
787 default:
788 assert(false); return false;
789 }
790}
791
792bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
793{
794 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
795 if (getLexer().is(AsmToken::Identifier)) {
796 StringRef RegName = Parser.getTok().getString();
797 if ((Reg = getSpecialRegForName(RegName))) {
798 Parser.Lex();
799 RegKind = IS_SPECIAL;
800 } else {
801 unsigned RegNumIndex = 0;
802 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
803 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
804 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
805 else { return false; }
806 if (RegName.size() > RegNumIndex) {
807 // Single 32-bit register: vXX.
808 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
809 Parser.Lex();
810 RegWidth = 1;
811 } else {
812 // Range of registers: v[XX:YY].
813 Parser.Lex();
814 int64_t RegLo, RegHi;
815 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
816 Parser.Lex();
817
818 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
819
820 if (getLexer().isNot(AsmToken::Colon)) { return false; }
821 Parser.Lex();
822
823 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
824
825 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
826 Parser.Lex();
827
828 RegNum = (unsigned) RegLo;
829 RegWidth = (RegHi - RegLo) + 1;
830 }
831 }
832 } else if (getLexer().is(AsmToken::LBrac)) {
833 // List of consecutive registers: [s0,s1,s2,s3]
834 Parser.Lex();
835 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
836 if (RegWidth != 1) { return false; }
837 RegisterKind RegKind1;
838 unsigned Reg1, RegNum1, RegWidth1;
839 do {
840 if (getLexer().is(AsmToken::Comma)) {
841 Parser.Lex();
842 } else if (getLexer().is(AsmToken::RBrac)) {
843 Parser.Lex();
844 break;
845 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
846 if (RegWidth1 != 1) { return false; }
847 if (RegKind1 != RegKind) { return false; }
848 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
849 } else {
850 return false;
851 }
852 } while (true);
853 } else {
854 return false;
855 }
856 switch (RegKind) {
857 case IS_SPECIAL:
858 RegNum = 0;
859 RegWidth = 1;
860 break;
861 case IS_VGPR:
862 case IS_SGPR:
863 case IS_TTMP:
864 {
865 unsigned Size = 1;
866 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
867 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
868 Size = std::min(RegWidth, 4u);
869 }
870 if (RegNum % Size != 0) { return false; }
871 RegNum = RegNum / Size;
872 int RCID = getRegClass(RegKind, RegWidth);
873 if (RCID == -1) { return false; }
874 const MCRegisterClass RC = TRI->getRegClass(RCID);
875 if (RegNum >= RC.getNumRegs()) { return false; }
876 Reg = RC.getRegister(RegNum);
877 break;
878 }
879
880 default:
881 assert(false); return false;
882 }
883
884 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
885 return true;
886}
887
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000888std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000889 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000890 SMLoc StartLoc = Tok.getLoc();
891 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000892 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
893
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000894 RegisterKind RegKind;
895 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000896
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000897 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
898 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000899 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000900 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000901 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000902}
903
Sam Kolton1bdcef72016-05-23 09:59:02 +0000904AMDGPUAsmParser::OperandMatchResultTy
905AMDGPUAsmParser::parseImm(OperandVector &Operands) {
906 bool Minus = false;
907 if (getLexer().getKind() == AsmToken::Minus) {
908 Minus = true;
909 Parser.Lex();
910 }
911
912 SMLoc S = Parser.getTok().getLoc();
913 switch(getLexer().getKind()) {
914 case AsmToken::Integer: {
915 int64_t IntVal;
916 if (getParser().parseAbsoluteExpression(IntVal))
917 return MatchOperand_ParseFail;
918 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
919 Error(S, "invalid immediate: only 32-bit values are legal");
920 return MatchOperand_ParseFail;
921 }
922
923 if (Minus)
924 IntVal *= -1;
925 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
926 return MatchOperand_Success;
927 }
928 case AsmToken::Real: {
929 // FIXME: We should emit an error if a double precisions floating-point
930 // value is used. I'm not sure the best way to detect this.
931 int64_t IntVal;
932 if (getParser().parseAbsoluteExpression(IntVal))
933 return MatchOperand_ParseFail;
934
935 APFloat F((float)BitsToDouble(IntVal));
936 if (Minus)
937 F.changeSign();
938 Operands.push_back(
939 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
940 AMDGPUOperand::ImmTyNone, true));
941 return MatchOperand_Success;
942 }
943 default:
944 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
945 }
946}
947
948AMDGPUAsmParser::OperandMatchResultTy
949AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
950 auto res = parseImm(Operands);
951 if (res != MatchOperand_NoMatch) {
952 return res;
953 }
954
955 if (auto R = parseRegister()) {
956 assert(R->isReg());
957 R->Reg.IsForcedVOP3 = isForcedVOP3();
958 Operands.push_back(std::move(R));
959 return MatchOperand_Success;
960 }
961 return MatchOperand_ParseFail;
962}
963
964AMDGPUAsmParser::OperandMatchResultTy
965AMDGPUAsmParser::parseRegOrImmWithInputMods(OperandVector &Operands) {
966 // XXX: During parsing we can't determine if minus sign means
967 // negate-modifier or negative immediate value.
968 // By default we suppose it is modifier.
969 bool Negate = false, Abs = false, Abs2 = false;
970
971 if (getLexer().getKind()== AsmToken::Minus) {
972 Parser.Lex();
973 Negate = true;
974 }
975
976 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
977 Parser.Lex();
978 Abs2 = true;
979 if (getLexer().isNot(AsmToken::LParen)) {
980 Error(Parser.getTok().getLoc(), "expected left paren after abs");
981 return MatchOperand_ParseFail;
982 }
983 Parser.Lex();
984 }
985
986 if (getLexer().getKind() == AsmToken::Pipe) {
987 if (Abs2) {
988 Error(Parser.getTok().getLoc(), "expected register or immediate");
989 return MatchOperand_ParseFail;
990 }
991 Parser.Lex();
992 Abs = true;
993 }
994
995 auto Res = parseRegOrImm(Operands);
996 if (Res != MatchOperand_Success) {
997 return Res;
998 }
999
1000 unsigned Modifiers = 0;
1001 if (Negate) {
1002 Modifiers |= 0x1;
1003 }
1004 if (Abs) {
1005 if (getLexer().getKind() != AsmToken::Pipe) {
1006 Error(Parser.getTok().getLoc(), "expected vertical bar");
1007 return MatchOperand_ParseFail;
1008 }
1009 Parser.Lex();
1010 Modifiers |= 0x2;
1011 }
1012 if (Abs2) {
1013 if (getLexer().isNot(AsmToken::RParen)) {
1014 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1015 return MatchOperand_ParseFail;
1016 }
1017 Parser.Lex();
1018 Modifiers |= 0x2;
1019 }
1020
1021 if (Modifiers) {
1022 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1023 Op.setModifiers(Modifiers);
1024 }
1025 return MatchOperand_Success;
1026}
1027
1028
Tom Stellard45bb48e2015-06-13 03:28:10 +00001029unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1030
1031 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1032
1033 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
1034 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
1035 return Match_InvalidOperand;
1036
Tom Stellard88e0b252015-10-06 15:57:53 +00001037 if ((TSFlags & SIInstrFlags::VOP3) &&
1038 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1039 getForcedEncodingSize() != 64)
1040 return Match_PreferE32;
1041
Tom Stellard45bb48e2015-06-13 03:28:10 +00001042 return Match_Success;
1043}
1044
Tom Stellard45bb48e2015-06-13 03:28:10 +00001045bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1046 OperandVector &Operands,
1047 MCStreamer &Out,
1048 uint64_t &ErrorInfo,
1049 bool MatchingInlineAsm) {
1050 MCInst Inst;
1051
Ranjeet Singh86ecbb72015-06-30 12:32:53 +00001052 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001053 default: break;
1054 case Match_Success:
1055 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001056 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001057 return false;
1058 case Match_MissingFeature:
1059 return Error(IDLoc, "instruction not supported on this GPU");
1060
1061 case Match_MnemonicFail:
1062 return Error(IDLoc, "unrecognized instruction mnemonic");
1063
1064 case Match_InvalidOperand: {
1065 SMLoc ErrorLoc = IDLoc;
1066 if (ErrorInfo != ~0ULL) {
1067 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001068 return Error(IDLoc, "too few operands for instruction");
1069 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001070 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1071 if (ErrorLoc == SMLoc())
1072 ErrorLoc = IDLoc;
1073 }
1074 return Error(ErrorLoc, "invalid operand for instruction");
1075 }
Tom Stellard88e0b252015-10-06 15:57:53 +00001076 case Match_PreferE32:
1077 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1078 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001079 }
1080 llvm_unreachable("Implement any new match types added!");
1081}
1082
Tom Stellard347ac792015-06-26 21:15:07 +00001083bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1084 uint32_t &Minor) {
1085 if (getLexer().isNot(AsmToken::Integer))
1086 return TokError("invalid major version");
1087
1088 Major = getLexer().getTok().getIntVal();
1089 Lex();
1090
1091 if (getLexer().isNot(AsmToken::Comma))
1092 return TokError("minor version number required, comma expected");
1093 Lex();
1094
1095 if (getLexer().isNot(AsmToken::Integer))
1096 return TokError("invalid minor version");
1097
1098 Minor = getLexer().getTok().getIntVal();
1099 Lex();
1100
1101 return false;
1102}
1103
1104bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1105
1106 uint32_t Major;
1107 uint32_t Minor;
1108
1109 if (ParseDirectiveMajorMinor(Major, Minor))
1110 return true;
1111
1112 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1113 return false;
1114}
1115
1116bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1117
1118 uint32_t Major;
1119 uint32_t Minor;
1120 uint32_t Stepping;
1121 StringRef VendorName;
1122 StringRef ArchName;
1123
1124 // If this directive has no arguments, then use the ISA version for the
1125 // targeted GPU.
1126 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001127 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001128 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1129 Isa.Stepping,
1130 "AMD", "AMDGPU");
1131 return false;
1132 }
1133
1134
1135 if (ParseDirectiveMajorMinor(Major, Minor))
1136 return true;
1137
1138 if (getLexer().isNot(AsmToken::Comma))
1139 return TokError("stepping version number required, comma expected");
1140 Lex();
1141
1142 if (getLexer().isNot(AsmToken::Integer))
1143 return TokError("invalid stepping version");
1144
1145 Stepping = getLexer().getTok().getIntVal();
1146 Lex();
1147
1148 if (getLexer().isNot(AsmToken::Comma))
1149 return TokError("vendor name required, comma expected");
1150 Lex();
1151
1152 if (getLexer().isNot(AsmToken::String))
1153 return TokError("invalid vendor name");
1154
1155 VendorName = getLexer().getTok().getStringContents();
1156 Lex();
1157
1158 if (getLexer().isNot(AsmToken::Comma))
1159 return TokError("arch name required, comma expected");
1160 Lex();
1161
1162 if (getLexer().isNot(AsmToken::String))
1163 return TokError("invalid arch name");
1164
1165 ArchName = getLexer().getTok().getStringContents();
1166 Lex();
1167
1168 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1169 VendorName, ArchName);
1170 return false;
1171}
1172
Tom Stellardff7416b2015-06-26 21:58:31 +00001173bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1174 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001175 SmallString<40> ErrStr;
1176 raw_svector_ostream Err(ErrStr);
1177 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
1178 return TokError(Err.str());
1179 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001180 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001181 return false;
1182}
1183
1184bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1185
1186 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001187 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001188
1189 while (true) {
1190
1191 if (getLexer().isNot(AsmToken::EndOfStatement))
1192 return TokError("amd_kernel_code_t values must begin on a new line");
1193
1194 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1195 // will set the current token to EndOfStatement.
1196 while(getLexer().is(AsmToken::EndOfStatement))
1197 Lex();
1198
1199 if (getLexer().isNot(AsmToken::Identifier))
1200 return TokError("expected value identifier or .end_amd_kernel_code_t");
1201
1202 StringRef ID = getLexer().getTok().getIdentifier();
1203 Lex();
1204
1205 if (ID == ".end_amd_kernel_code_t")
1206 break;
1207
1208 if (ParseAMDKernelCodeTValue(ID, Header))
1209 return true;
1210 }
1211
1212 getTargetStreamer().EmitAMDKernelCodeT(Header);
1213
1214 return false;
1215}
1216
Tom Stellarde135ffd2015-09-25 21:41:28 +00001217bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1218 getParser().getStreamer().SwitchSection(
1219 AMDGPU::getHSATextSection(getContext()));
1220 return false;
1221}
1222
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001223bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1224 if (getLexer().isNot(AsmToken::Identifier))
1225 return TokError("expected symbol name");
1226
1227 StringRef KernelName = Parser.getTok().getString();
1228
1229 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1230 ELF::STT_AMDGPU_HSA_KERNEL);
1231 Lex();
1232 return false;
1233}
1234
Tom Stellard00f2f912015-12-02 19:47:57 +00001235bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1236 if (getLexer().isNot(AsmToken::Identifier))
1237 return TokError("expected symbol name");
1238
1239 StringRef GlobalName = Parser.getTok().getIdentifier();
1240
1241 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1242 Lex();
1243 return false;
1244}
1245
1246bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1247 if (getLexer().isNot(AsmToken::Identifier))
1248 return TokError("expected symbol name");
1249
1250 StringRef GlobalName = Parser.getTok().getIdentifier();
1251
1252 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1253 Lex();
1254 return false;
1255}
1256
1257bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1258 getParser().getStreamer().SwitchSection(
1259 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1260 return false;
1261}
1262
1263bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1264 getParser().getStreamer().SwitchSection(
1265 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1266 return false;
1267}
1268
Tom Stellard9760f032015-12-03 03:34:32 +00001269bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1270 getParser().getStreamer().SwitchSection(
1271 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1272 return false;
1273}
1274
Tom Stellard45bb48e2015-06-13 03:28:10 +00001275bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001276 StringRef IDVal = DirectiveID.getString();
1277
1278 if (IDVal == ".hsa_code_object_version")
1279 return ParseDirectiveHSACodeObjectVersion();
1280
1281 if (IDVal == ".hsa_code_object_isa")
1282 return ParseDirectiveHSACodeObjectISA();
1283
Tom Stellardff7416b2015-06-26 21:58:31 +00001284 if (IDVal == ".amd_kernel_code_t")
1285 return ParseDirectiveAMDKernelCodeT();
1286
Tom Stellardfcfaea42016-05-05 17:03:33 +00001287 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001288 return ParseSectionDirectiveHSAText();
1289
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001290 if (IDVal == ".amdgpu_hsa_kernel")
1291 return ParseDirectiveAMDGPUHsaKernel();
1292
Tom Stellard00f2f912015-12-02 19:47:57 +00001293 if (IDVal == ".amdgpu_hsa_module_global")
1294 return ParseDirectiveAMDGPUHsaModuleGlobal();
1295
1296 if (IDVal == ".amdgpu_hsa_program_global")
1297 return ParseDirectiveAMDGPUHsaProgramGlobal();
1298
1299 if (IDVal == ".hsadata_global_agent")
1300 return ParseSectionDirectiveHSADataGlobalAgent();
1301
1302 if (IDVal == ".hsadata_global_program")
1303 return ParseSectionDirectiveHSADataGlobalProgram();
1304
Tom Stellard9760f032015-12-03 03:34:32 +00001305 if (IDVal == ".hsarodata_readonly_agent")
1306 return ParseSectionDirectiveHSARodataReadonlyAgent();
1307
Tom Stellard45bb48e2015-06-13 03:28:10 +00001308 return true;
1309}
1310
Matt Arsenault68802d32015-11-05 03:11:27 +00001311bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1312 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001313 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001314 return true;
1315
Matt Arsenault3b159672015-12-01 20:31:08 +00001316 if (isSI()) {
1317 // No flat_scr
1318 switch (RegNo) {
1319 case AMDGPU::FLAT_SCR:
1320 case AMDGPU::FLAT_SCR_LO:
1321 case AMDGPU::FLAT_SCR_HI:
1322 return false;
1323 default:
1324 return true;
1325 }
1326 }
1327
Matt Arsenault68802d32015-11-05 03:11:27 +00001328 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1329 // SI/CI have.
1330 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1331 R.isValid(); ++R) {
1332 if (*R == RegNo)
1333 return false;
1334 }
1335
1336 return true;
1337}
1338
Tom Stellard45bb48e2015-06-13 03:28:10 +00001339AMDGPUAsmParser::OperandMatchResultTy
1340AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1341
1342 // Try to parse with a custom parser
1343 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1344
1345 // If we successfully parsed the operand or if there as an error parsing,
1346 // we are done.
1347 //
1348 // If we are parsing after we reach EndOfStatement then this means we
1349 // are appending default values to the Operands list. This is only done
1350 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001351 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001352 getLexer().is(AsmToken::EndOfStatement))
1353 return ResTy;
1354
Sam Kolton1bdcef72016-05-23 09:59:02 +00001355 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001356
Sam Kolton1bdcef72016-05-23 09:59:02 +00001357 if (ResTy == MatchOperand_Success)
1358 return ResTy;
1359
1360 if (getLexer().getKind() == AsmToken::Identifier) {
1361 const auto &Tok = Parser.getTok();
1362 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001363 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001364 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001365 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001366 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001367}
1368
1369bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1370 StringRef Name,
1371 SMLoc NameLoc, OperandVector &Operands) {
1372
1373 // Clear any forced encodings from the previous instruction.
1374 setForcedEncodingSize(0);
1375
1376 if (Name.endswith("_e64"))
1377 setForcedEncodingSize(64);
1378 else if (Name.endswith("_e32"))
1379 setForcedEncodingSize(32);
1380
1381 // Add the instruction mnemonic
1382 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1383
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001384
1385 if (Name.endswith("_e64")) { Name = Name.substr(0, Name.size() - 4); }
1386 if (Name.endswith("_e32")) { Name = Name.substr(0, Name.size() - 4); }
1387
Tom Stellard45bb48e2015-06-13 03:28:10 +00001388 while (!getLexer().is(AsmToken::EndOfStatement)) {
1389 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1390
1391 // Eat the comma or space if there is one.
1392 if (getLexer().is(AsmToken::Comma))
1393 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001394
Tom Stellard45bb48e2015-06-13 03:28:10 +00001395 switch (Res) {
1396 case MatchOperand_Success: break;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001397 case MatchOperand_ParseFail:
1398 Error(getLexer().getLoc(), "failed parsing operand.");
1399 while (!getLexer().is(AsmToken::EndOfStatement)) {
1400 Parser.Lex();
1401 }
1402 return true;
1403 case MatchOperand_NoMatch:
1404 Error(getLexer().getLoc(), "not a valid operand.");
1405 while (!getLexer().is(AsmToken::EndOfStatement)) {
1406 Parser.Lex();
1407 }
1408 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001409 }
1410 }
1411
Tom Stellard45bb48e2015-06-13 03:28:10 +00001412 return false;
1413}
1414
1415//===----------------------------------------------------------------------===//
1416// Utility functions
1417//===----------------------------------------------------------------------===//
1418
1419AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001420AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001421 switch(getLexer().getKind()) {
1422 default: return MatchOperand_NoMatch;
1423 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001424 StringRef Name = Parser.getTok().getString();
1425 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001426 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001427 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001428
1429 Parser.Lex();
1430 if (getLexer().isNot(AsmToken::Colon))
1431 return MatchOperand_ParseFail;
1432
1433 Parser.Lex();
1434 if (getLexer().isNot(AsmToken::Integer))
1435 return MatchOperand_ParseFail;
1436
1437 if (getParser().parseAbsoluteExpression(Int))
1438 return MatchOperand_ParseFail;
1439 break;
1440 }
1441 }
1442 return MatchOperand_Success;
1443}
1444
1445AMDGPUAsmParser::OperandMatchResultTy
1446AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001447 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001448 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001449
1450 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001451 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001452
Sam Kolton11de3702016-05-24 12:38:33 +00001453 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001454 if (Res != MatchOperand_Success)
1455 return Res;
1456
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001457 if (ConvertResult && !ConvertResult(Value)) {
1458 return MatchOperand_ParseFail;
1459 }
1460
1461 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001462 return MatchOperand_Success;
1463}
1464
1465AMDGPUAsmParser::OperandMatchResultTy
1466AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001467 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001468 int64_t Bit = 0;
1469 SMLoc S = Parser.getTok().getLoc();
1470
1471 // We are at the end of the statement, and this is a default argument, so
1472 // use a default value.
1473 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1474 switch(getLexer().getKind()) {
1475 case AsmToken::Identifier: {
1476 StringRef Tok = Parser.getTok().getString();
1477 if (Tok == Name) {
1478 Bit = 1;
1479 Parser.Lex();
1480 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1481 Bit = 0;
1482 Parser.Lex();
1483 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001484 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001485 }
1486 break;
1487 }
1488 default:
1489 return MatchOperand_NoMatch;
1490 }
1491 }
1492
1493 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1494 return MatchOperand_Success;
1495}
1496
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001497typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1498
Sam Koltona74cd522016-03-18 15:35:51 +00001499void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1500 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001501 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001502 auto i = OptionalIdx.find(ImmT);
1503 if (i != OptionalIdx.end()) {
1504 unsigned Idx = i->second;
1505 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1506 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001507 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001508 }
1509}
1510
Sam Kolton3025e7f2016-04-26 13:33:56 +00001511AMDGPUAsmParser::OperandMatchResultTy
1512AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1513 if (getLexer().isNot(AsmToken::Identifier)) {
1514 return MatchOperand_NoMatch;
1515 }
1516 StringRef Tok = Parser.getTok().getString();
1517 if (Tok != Prefix) {
1518 return MatchOperand_NoMatch;
1519 }
1520
1521 Parser.Lex();
1522 if (getLexer().isNot(AsmToken::Colon)) {
1523 return MatchOperand_ParseFail;
1524 }
1525
1526 Parser.Lex();
1527 if (getLexer().isNot(AsmToken::Identifier)) {
1528 return MatchOperand_ParseFail;
1529 }
1530
1531 Value = Parser.getTok().getString();
1532 return MatchOperand_Success;
1533}
1534
Tom Stellard45bb48e2015-06-13 03:28:10 +00001535//===----------------------------------------------------------------------===//
1536// ds
1537//===----------------------------------------------------------------------===//
1538
Tom Stellard45bb48e2015-06-13 03:28:10 +00001539void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1540 const OperandVector &Operands) {
1541
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001542 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001543
1544 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1545 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1546
1547 // Add the register arguments
1548 if (Op.isReg()) {
1549 Op.addRegOperands(Inst, 1);
1550 continue;
1551 }
1552
1553 // Handle optional arguments
1554 OptionalIdx[Op.getImmTy()] = i;
1555 }
1556
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001557 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1558 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001559 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001560
Tom Stellard45bb48e2015-06-13 03:28:10 +00001561 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1562}
1563
1564void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1565
1566 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1567 bool GDSOnly = false;
1568
1569 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1570 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1571
1572 // Add the register arguments
1573 if (Op.isReg()) {
1574 Op.addRegOperands(Inst, 1);
1575 continue;
1576 }
1577
1578 if (Op.isToken() && Op.getToken() == "gds") {
1579 GDSOnly = true;
1580 continue;
1581 }
1582
1583 // Handle optional arguments
1584 OptionalIdx[Op.getImmTy()] = i;
1585 }
1586
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001587 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1588 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001589
1590 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001591 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001592 }
1593 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1594}
1595
1596
1597//===----------------------------------------------------------------------===//
1598// s_waitcnt
1599//===----------------------------------------------------------------------===//
1600
1601bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1602 StringRef CntName = Parser.getTok().getString();
1603 int64_t CntVal;
1604
1605 Parser.Lex();
1606 if (getLexer().isNot(AsmToken::LParen))
1607 return true;
1608
1609 Parser.Lex();
1610 if (getLexer().isNot(AsmToken::Integer))
1611 return true;
1612
1613 if (getParser().parseAbsoluteExpression(CntVal))
1614 return true;
1615
1616 if (getLexer().isNot(AsmToken::RParen))
1617 return true;
1618
1619 Parser.Lex();
1620 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1621 Parser.Lex();
1622
1623 int CntShift;
1624 int CntMask;
1625
1626 if (CntName == "vmcnt") {
1627 CntMask = 0xf;
1628 CntShift = 0;
1629 } else if (CntName == "expcnt") {
1630 CntMask = 0x7;
1631 CntShift = 4;
1632 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001633 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001634 CntShift = 8;
1635 } else {
1636 return true;
1637 }
1638
1639 IntVal &= ~(CntMask << CntShift);
1640 IntVal |= (CntVal << CntShift);
1641 return false;
1642}
1643
1644AMDGPUAsmParser::OperandMatchResultTy
1645AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1646 // Disable all counters by default.
1647 // vmcnt [3:0]
1648 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001649 // lgkmcnt [11:8]
1650 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001651 SMLoc S = Parser.getTok().getLoc();
1652
1653 switch(getLexer().getKind()) {
1654 default: return MatchOperand_ParseFail;
1655 case AsmToken::Integer:
1656 // The operand can be an integer value.
1657 if (getParser().parseAbsoluteExpression(CntVal))
1658 return MatchOperand_ParseFail;
1659 break;
1660
1661 case AsmToken::Identifier:
1662 do {
1663 if (parseCnt(CntVal))
1664 return MatchOperand_ParseFail;
1665 } while(getLexer().isNot(AsmToken::EndOfStatement));
1666 break;
1667 }
1668 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1669 return MatchOperand_Success;
1670}
1671
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001672bool AMDGPUAsmParser::parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001673 if (Parser.getTok().getString() != "hwreg")
1674 return true;
1675 Parser.Lex();
1676
1677 if (getLexer().isNot(AsmToken::LParen))
1678 return true;
1679 Parser.Lex();
1680
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001681 if (getLexer().is(AsmToken::Identifier)) {
1682 IsIdentifier = true;
1683 HwRegCode = StringSwitch<unsigned>(Parser.getTok().getString())
1684 .Case("HW_REG_MODE" , 1)
1685 .Case("HW_REG_STATUS" , 2)
1686 .Case("HW_REG_TRAPSTS" , 3)
1687 .Case("HW_REG_HW_ID" , 4)
1688 .Case("HW_REG_GPR_ALLOC", 5)
1689 .Case("HW_REG_LDS_ALLOC", 6)
1690 .Case("HW_REG_IB_STS" , 7)
1691 .Default(-1);
1692 Parser.Lex();
1693 } else {
1694 IsIdentifier = false;
1695 if (getLexer().isNot(AsmToken::Integer))
1696 return true;
1697 if (getParser().parseAbsoluteExpression(HwRegCode))
1698 return true;
1699 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001700
1701 if (getLexer().is(AsmToken::RParen)) {
1702 Parser.Lex();
1703 return false;
1704 }
1705
1706 // optional params
1707 if (getLexer().isNot(AsmToken::Comma))
1708 return true;
1709 Parser.Lex();
1710
1711 if (getLexer().isNot(AsmToken::Integer))
1712 return true;
1713 if (getParser().parseAbsoluteExpression(Offset))
1714 return true;
1715
1716 if (getLexer().isNot(AsmToken::Comma))
1717 return true;
1718 Parser.Lex();
1719
1720 if (getLexer().isNot(AsmToken::Integer))
1721 return true;
1722 if (getParser().parseAbsoluteExpression(Width))
1723 return true;
1724
1725 if (getLexer().isNot(AsmToken::RParen))
1726 return true;
1727 Parser.Lex();
1728
1729 return false;
1730}
1731
1732AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001733AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001734 int64_t Imm16Val = 0;
1735 SMLoc S = Parser.getTok().getLoc();
1736
1737 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00001738 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00001739 case AsmToken::Integer:
1740 // The operand can be an integer value.
1741 if (getParser().parseAbsoluteExpression(Imm16Val))
1742 return MatchOperand_ParseFail;
1743 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1744 Error(S, "invalid immediate: only 16-bit values are legal");
1745 // Do not return error code, but create an imm operand anyway and proceed
1746 // to the next operand, if any. That avoids unneccessary error messages.
1747 }
1748 break;
1749
1750 case AsmToken::Identifier: {
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001751 bool IsIdentifier = false;
1752 int64_t HwRegCode = -1;
Artem Tamazovd6468662016-04-25 14:13:51 +00001753 int64_t Offset = 0; // default
1754 int64_t Width = 32; // default
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001755 if (parseHwregOperand(HwRegCode, Offset, Width, IsIdentifier))
Artem Tamazovd6468662016-04-25 14:13:51 +00001756 return MatchOperand_ParseFail;
1757 // HwRegCode (6) [5:0]
1758 // Offset (5) [10:6]
1759 // WidthMinusOne (5) [15:11]
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001760 if (HwRegCode < 0 || HwRegCode > 63) {
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001761 if (IsIdentifier)
1762 Error(S, "invalid symbolic name of hardware register");
1763 else
1764 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001765 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001766 if (Offset < 0 || Offset > 31)
1767 Error(S, "invalid bit offset: only 5-bit values are legal");
1768 if (Width < 1 || Width > 32)
1769 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1770 Imm16Val = HwRegCode | (Offset << 6) | ((Width-1) << 11);
1771 }
1772 break;
1773 }
1774 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1775 return MatchOperand_Success;
1776}
1777
Tom Stellard45bb48e2015-06-13 03:28:10 +00001778bool AMDGPUOperand::isSWaitCnt() const {
1779 return isImm();
1780}
1781
Artem Tamazovd6468662016-04-25 14:13:51 +00001782bool AMDGPUOperand::isHwreg() const {
1783 return isImmTy(ImmTyHwreg);
1784}
1785
Sam Kolton5f10a132016-05-06 11:31:17 +00001786AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
1787 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
1788}
1789
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001790bool AMDGPUAsmParser::parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
1791 using namespace llvm::AMDGPU::SendMsg;
1792
1793 if (Parser.getTok().getString() != "sendmsg")
1794 return true;
1795 Parser.Lex();
1796
1797 if (getLexer().isNot(AsmToken::LParen))
1798 return true;
1799 Parser.Lex();
1800
1801 if (getLexer().is(AsmToken::Identifier)) {
1802 Msg.IsSymbolic = true;
1803 Msg.Id = ID_UNKNOWN_;
1804 const std::string tok = Parser.getTok().getString();
1805 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1806 switch(i) {
1807 default: continue; // Omit gaps.
1808 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
1809 }
1810 if (tok == IdSymbolic[i]) {
1811 Msg.Id = i;
1812 break;
1813 }
1814 }
1815 Parser.Lex();
1816 } else {
1817 Msg.IsSymbolic = false;
1818 if (getLexer().isNot(AsmToken::Integer))
1819 return true;
1820 if (getParser().parseAbsoluteExpression(Msg.Id))
1821 return true;
1822 if (getLexer().is(AsmToken::Integer))
1823 if (getParser().parseAbsoluteExpression(Msg.Id))
1824 Msg.Id = ID_UNKNOWN_;
1825 }
1826 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1827 return false;
1828
1829 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1830 if (getLexer().isNot(AsmToken::RParen))
1831 return true;
1832 Parser.Lex();
1833 return false;
1834 }
1835
1836 if (getLexer().isNot(AsmToken::Comma))
1837 return true;
1838 Parser.Lex();
1839
1840 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1841 Operation.Id = ID_UNKNOWN_;
1842 if (getLexer().is(AsmToken::Identifier)) {
1843 Operation.IsSymbolic = true;
1844 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1845 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1846 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1847 const std::string Tok = Parser.getTok().getString();
1848 for (int i = F; i < L; ++i) {
1849 if (Tok == S[i]) {
1850 Operation.Id = i;
1851 break;
1852 }
1853 }
1854 Parser.Lex();
1855 } else {
1856 Operation.IsSymbolic = false;
1857 if (getLexer().isNot(AsmToken::Integer))
1858 return true;
1859 if (getParser().parseAbsoluteExpression(Operation.Id))
1860 return true;
1861 }
1862
1863 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1864 // Stream id is optional.
1865 if (getLexer().is(AsmToken::RParen)) {
1866 Parser.Lex();
1867 return false;
1868 }
1869
1870 if (getLexer().isNot(AsmToken::Comma))
1871 return true;
1872 Parser.Lex();
1873
1874 if (getLexer().isNot(AsmToken::Integer))
1875 return true;
1876 if (getParser().parseAbsoluteExpression(StreamId))
1877 return true;
1878 }
1879
1880 if (getLexer().isNot(AsmToken::RParen))
1881 return true;
1882 Parser.Lex();
1883 return false;
1884}
1885
1886AMDGPUAsmParser::OperandMatchResultTy
1887AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
1888 using namespace llvm::AMDGPU::SendMsg;
1889
1890 int64_t Imm16Val = 0;
1891 SMLoc S = Parser.getTok().getLoc();
1892
1893 switch(getLexer().getKind()) {
1894 default:
1895 return MatchOperand_NoMatch;
1896 case AsmToken::Integer:
1897 // The operand can be an integer value.
1898 if (getParser().parseAbsoluteExpression(Imm16Val))
1899 return MatchOperand_NoMatch;
1900 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1901 Error(S, "invalid immediate: only 16-bit values are legal");
1902 // Do not return error code, but create an imm operand anyway and proceed
1903 // to the next operand, if any. That avoids unneccessary error messages.
1904 }
1905 break;
1906 case AsmToken::Identifier: {
1907 OperandInfoTy Msg(ID_UNKNOWN_);
1908 OperandInfoTy Operation(OP_UNKNOWN_);
1909 int64_t StreamId = STREAM_ID_DEFAULT;
1910 if (parseSendMsg(Msg, Operation, StreamId))
1911 return MatchOperand_NoMatch;
1912 do {
1913 // Validate and encode message ID.
1914 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
1915 || Msg.Id == ID_SYSMSG)) {
1916 if (Msg.IsSymbolic)
1917 Error(S, "invalid/unsupported symbolic name of message");
1918 else
1919 Error(S, "invalid/unsupported code of message");
1920 break;
1921 }
1922 Imm16Val = Msg.Id;
1923 // Validate and encode operation ID.
1924 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
1925 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
1926 if (Operation.IsSymbolic)
1927 Error(S, "invalid symbolic name of GS_OP");
1928 else
1929 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
1930 break;
1931 }
1932 if (Operation.Id == OP_GS_NOP
1933 && Msg.Id != ID_GS_DONE) {
1934 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
1935 break;
1936 }
1937 Imm16Val |= (Operation.Id << OP_SHIFT_);
1938 }
1939 if (Msg.Id == ID_SYSMSG) {
1940 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
1941 if (Operation.IsSymbolic)
1942 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
1943 else
1944 Error(S, "invalid/unsupported code of SYSMSG_OP");
1945 break;
1946 }
1947 Imm16Val |= (Operation.Id << OP_SHIFT_);
1948 }
1949 // Validate and encode stream ID.
1950 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1951 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
1952 Error(S, "invalid stream id: only 2-bit values are legal");
1953 break;
1954 }
1955 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
1956 }
1957 } while (0);
1958 }
1959 break;
1960 }
1961 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
1962 return MatchOperand_Success;
1963}
1964
1965bool AMDGPUOperand::isSendMsg() const {
1966 return isImmTy(ImmTySendMsg);
1967}
1968
Tom Stellard45bb48e2015-06-13 03:28:10 +00001969//===----------------------------------------------------------------------===//
1970// sopp branch targets
1971//===----------------------------------------------------------------------===//
1972
1973AMDGPUAsmParser::OperandMatchResultTy
1974AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1975 SMLoc S = Parser.getTok().getLoc();
1976
1977 switch (getLexer().getKind()) {
1978 default: return MatchOperand_ParseFail;
1979 case AsmToken::Integer: {
1980 int64_t Imm;
1981 if (getParser().parseAbsoluteExpression(Imm))
1982 return MatchOperand_ParseFail;
1983 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1984 return MatchOperand_Success;
1985 }
1986
1987 case AsmToken::Identifier:
1988 Operands.push_back(AMDGPUOperand::CreateExpr(
1989 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1990 Parser.getTok().getString()), getContext()), S));
1991 Parser.Lex();
1992 return MatchOperand_Success;
1993 }
1994}
1995
1996//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001997// mubuf
1998//===----------------------------------------------------------------------===//
1999
Tom Stellard45bb48e2015-06-13 03:28:10 +00002000bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002001 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00002002}
2003
Sam Kolton5f10a132016-05-06 11:31:17 +00002004AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
2005 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2006}
2007
2008AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
2009 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
2010}
2011
2012AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
2013 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
2014}
2015
2016AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
2017 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
2018}
2019
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002020void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2021 const OperandVector &Operands,
2022 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002023 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002024 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002025
2026 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2027 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2028
2029 // Add the register arguments
2030 if (Op.isReg()) {
2031 Op.addRegOperands(Inst, 1);
2032 continue;
2033 }
2034
2035 // Handle the case where soffset is an immediate
2036 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2037 Op.addImmOperands(Inst, 1);
2038 continue;
2039 }
2040
2041 // Handle tokens like 'offen' which are sometimes hard-coded into the
2042 // asm string. There are no MCInst operands for these.
2043 if (Op.isToken()) {
2044 continue;
2045 }
2046 assert(Op.isImm());
2047
2048 // Handle optional arguments
2049 OptionalIdx[Op.getImmTy()] = i;
2050 }
2051
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002052 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2053 if (IsAtomicReturn) {
2054 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2055 Inst.insert(I, *I);
2056 }
2057
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002058 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002059 if (!IsAtomic) { // glc is hard-coded.
2060 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2061 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002062 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2063 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002064}
2065
2066//===----------------------------------------------------------------------===//
2067// mimg
2068//===----------------------------------------------------------------------===//
2069
Sam Kolton1bdcef72016-05-23 09:59:02 +00002070void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2071 unsigned I = 1;
2072 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2073 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2074 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2075 }
2076
2077 OptionalImmIndexMap OptionalIdx;
2078
2079 for (unsigned E = Operands.size(); I != E; ++I) {
2080 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2081
2082 // Add the register arguments
2083 if (Op.isRegOrImm()) {
2084 Op.addRegOrImmOperands(Inst, 1);
2085 continue;
2086 } else if (Op.isImmModifier()) {
2087 OptionalIdx[Op.getImmTy()] = I;
2088 } else {
2089 assert(false);
2090 }
2091 }
2092
2093 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2094 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2095 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2096 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2097 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2098 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2099 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2100 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2101}
2102
2103void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2104 unsigned I = 1;
2105 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2106 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2107 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2108 }
2109
2110 // Add src, same as dst
2111 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2112
2113 OptionalImmIndexMap OptionalIdx;
2114
2115 for (unsigned E = Operands.size(); I != E; ++I) {
2116 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2117
2118 // Add the register arguments
2119 if (Op.isRegOrImm()) {
2120 Op.addRegOrImmOperands(Inst, 1);
2121 continue;
2122 } else if (Op.isImmModifier()) {
2123 OptionalIdx[Op.getImmTy()] = I;
2124 } else {
2125 assert(false);
2126 }
2127 }
2128
2129 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2130 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2131 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2132 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2133 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2134 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2135 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2136 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2137}
2138
Sam Kolton5f10a132016-05-06 11:31:17 +00002139AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2140 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2141}
2142
2143AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2144 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2145}
2146
2147AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2148 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2149}
2150
2151AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2152 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2153}
2154
2155AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2156 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2157}
2158
Tom Stellard45bb48e2015-06-13 03:28:10 +00002159//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002160// smrd
2161//===----------------------------------------------------------------------===//
2162
2163bool AMDGPUOperand::isSMRDOffset() const {
2164
2165 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2166 // information here.
2167 return isImm() && isUInt<8>(getImm());
2168}
2169
2170bool AMDGPUOperand::isSMRDLiteralOffset() const {
2171 // 32-bit literals are only supported on CI and we only want to use them
2172 // when the offset is > 8-bits.
2173 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2174}
2175
Sam Kolton5f10a132016-05-06 11:31:17 +00002176AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2177 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2178}
2179
2180AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2181 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2182}
2183
Tom Stellard217361c2015-08-06 19:28:38 +00002184//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002185// vop3
2186//===----------------------------------------------------------------------===//
2187
2188static bool ConvertOmodMul(int64_t &Mul) {
2189 if (Mul != 1 && Mul != 2 && Mul != 4)
2190 return false;
2191
2192 Mul >>= 1;
2193 return true;
2194}
2195
2196static bool ConvertOmodDiv(int64_t &Div) {
2197 if (Div == 1) {
2198 Div = 0;
2199 return true;
2200 }
2201
2202 if (Div == 2) {
2203 Div = 3;
2204 return true;
2205 }
2206
2207 return false;
2208}
2209
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002210static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2211 if (BoundCtrl == 0) {
2212 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002213 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002214 } else if (BoundCtrl == -1) {
2215 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002216 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002217 }
2218 return false;
2219}
2220
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002221// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002222static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2223 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2224 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2225 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2226 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2227 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2228 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2229 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2230 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2231 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2232 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2233 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2234 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2235 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2236 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2237 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2238 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2239 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2240 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2241 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2242 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
2243 {"sdwa_sel", AMDGPUOperand::ImmTySdwaSel, false, nullptr},
2244 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002245};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002246
Sam Kolton11de3702016-05-24 12:38:33 +00002247AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
2248 OperandMatchResultTy res;
2249 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2250 // try to parse any optional operand here
2251 if (Op.IsBit) {
2252 res = parseNamedBit(Op.Name, Operands, Op.Type);
2253 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2254 res = parseOModOperand(Operands);
2255 } else if (Op.Type == AMDGPUOperand::ImmTySdwaSel) {
2256 res = parseSDWASel(Operands);
2257 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2258 res = parseSDWADstUnused(Operands);
2259 } else {
2260 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2261 }
2262 if (res != MatchOperand_NoMatch) {
2263 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002264 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002265 }
2266 return MatchOperand_NoMatch;
2267}
2268
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002269AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2270{
2271 StringRef Name = Parser.getTok().getString();
2272 if (Name == "mul") {
Sam Kolton11de3702016-05-24 12:38:33 +00002273 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002274 } else if (Name == "div") {
Sam Kolton11de3702016-05-24 12:38:33 +00002275 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002276 } else {
2277 return MatchOperand_NoMatch;
2278 }
2279}
2280
Sam Kolton5f10a132016-05-06 11:31:17 +00002281AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
2282 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
2283}
2284
2285AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
2286 return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
2287}
2288
Tom Stellarda90b9522016-02-11 03:28:15 +00002289void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2290 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002291 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002292 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002293 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2294 }
2295 for (unsigned E = Operands.size(); I != E; ++I)
2296 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2297}
2298
2299void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002300 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2301 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002302 cvtVOP3(Inst, Operands);
2303 } else {
2304 cvtId(Inst, Operands);
2305 }
2306}
2307
Tom Stellarda90b9522016-02-11 03:28:15 +00002308void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002309 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002310 unsigned I = 1;
2311 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002312 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002313 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002314 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002315
Tom Stellarda90b9522016-02-11 03:28:15 +00002316 for (unsigned E = Operands.size(); I != E; ++I) {
2317 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002318 if (Op.isRegOrImmWithInputMods()) {
2319 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002320 } else if (Op.isImm()) {
2321 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002322 } else {
2323 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002324 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002325 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002326
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002327 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2328 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002329}
2330
Sam Koltondfa29f72016-03-09 12:29:31 +00002331//===----------------------------------------------------------------------===//
2332// dpp
2333//===----------------------------------------------------------------------===//
2334
2335bool AMDGPUOperand::isDPPCtrl() const {
2336 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2337 if (result) {
2338 int64_t Imm = getImm();
2339 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2340 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2341 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2342 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2343 (Imm == 0x130) ||
2344 (Imm == 0x134) ||
2345 (Imm == 0x138) ||
2346 (Imm == 0x13c) ||
2347 (Imm == 0x140) ||
2348 (Imm == 0x141) ||
2349 (Imm == 0x142) ||
2350 (Imm == 0x143);
2351 }
2352 return false;
2353}
2354
Sam Koltona74cd522016-03-18 15:35:51 +00002355AMDGPUAsmParser::OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002356AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002357 SMLoc S = Parser.getTok().getLoc();
2358 StringRef Prefix;
2359 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002360
Sam Koltona74cd522016-03-18 15:35:51 +00002361 if (getLexer().getKind() == AsmToken::Identifier) {
2362 Prefix = Parser.getTok().getString();
2363 } else {
2364 return MatchOperand_NoMatch;
2365 }
2366
2367 if (Prefix == "row_mirror") {
2368 Int = 0x140;
2369 } else if (Prefix == "row_half_mirror") {
2370 Int = 0x141;
2371 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002372 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2373 if (Prefix != "quad_perm"
2374 && Prefix != "row_shl"
2375 && Prefix != "row_shr"
2376 && Prefix != "row_ror"
2377 && Prefix != "wave_shl"
2378 && Prefix != "wave_rol"
2379 && Prefix != "wave_shr"
2380 && Prefix != "wave_ror"
2381 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00002382 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00002383 }
2384
Sam Koltona74cd522016-03-18 15:35:51 +00002385 Parser.Lex();
2386 if (getLexer().isNot(AsmToken::Colon))
2387 return MatchOperand_ParseFail;
2388
2389 if (Prefix == "quad_perm") {
2390 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002391 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002392 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002393 return MatchOperand_ParseFail;
2394
2395 Parser.Lex();
2396 if (getLexer().isNot(AsmToken::Integer))
2397 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002398 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002399
Sam Koltona74cd522016-03-18 15:35:51 +00002400 Parser.Lex();
2401 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002402 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002403 Parser.Lex();
2404 if (getLexer().isNot(AsmToken::Integer))
2405 return MatchOperand_ParseFail;
2406 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002407
Sam Koltona74cd522016-03-18 15:35:51 +00002408 Parser.Lex();
2409 if (getLexer().isNot(AsmToken::Comma))
2410 return MatchOperand_ParseFail;
2411 Parser.Lex();
2412 if (getLexer().isNot(AsmToken::Integer))
2413 return MatchOperand_ParseFail;
2414 Int += (getLexer().getTok().getIntVal() << 4);
2415
2416 Parser.Lex();
2417 if (getLexer().isNot(AsmToken::Comma))
2418 return MatchOperand_ParseFail;
2419 Parser.Lex();
2420 if (getLexer().isNot(AsmToken::Integer))
2421 return MatchOperand_ParseFail;
2422 Int += (getLexer().getTok().getIntVal() << 6);
2423
2424 Parser.Lex();
2425 if (getLexer().isNot(AsmToken::RBrac))
2426 return MatchOperand_ParseFail;
2427
2428 } else {
2429 // sel:%d
2430 Parser.Lex();
2431 if (getLexer().isNot(AsmToken::Integer))
2432 return MatchOperand_ParseFail;
2433 Int = getLexer().getTok().getIntVal();
2434
2435 if (Prefix == "row_shl") {
2436 Int |= 0x100;
2437 } else if (Prefix == "row_shr") {
2438 Int |= 0x110;
2439 } else if (Prefix == "row_ror") {
2440 Int |= 0x120;
2441 } else if (Prefix == "wave_shl") {
2442 Int = 0x130;
2443 } else if (Prefix == "wave_rol") {
2444 Int = 0x134;
2445 } else if (Prefix == "wave_shr") {
2446 Int = 0x138;
2447 } else if (Prefix == "wave_ror") {
2448 Int = 0x13C;
2449 } else if (Prefix == "row_bcast") {
2450 if (Int == 15) {
2451 Int = 0x142;
2452 } else if (Int == 31) {
2453 Int = 0x143;
2454 }
2455 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002456 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002457 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002458 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002459 }
Sam Koltona74cd522016-03-18 15:35:51 +00002460 Parser.Lex(); // eat last token
2461
2462 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002463 AMDGPUOperand::ImmTyDppCtrl));
2464 return MatchOperand_Success;
2465}
2466
Sam Kolton5f10a132016-05-06 11:31:17 +00002467AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2468 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002469}
2470
Sam Kolton5f10a132016-05-06 11:31:17 +00002471AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2472 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002473}
2474
Sam Kolton5f10a132016-05-06 11:31:17 +00002475AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2476 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2477}
2478
2479void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002480 OptionalImmIndexMap OptionalIdx;
2481
2482 unsigned I = 1;
2483 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2484 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2485 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2486 }
2487
2488 for (unsigned E = Operands.size(); I != E; ++I) {
2489 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2490 // Add the register arguments
Sam Kolton5f10a132016-05-06 11:31:17 +00002491 if (Op.isRegOrImmWithInputMods()) {
2492 // We convert only instructions with modifiers
Sam Koltondfa29f72016-03-09 12:29:31 +00002493 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2494 } else if (Op.isDPPCtrl()) {
2495 Op.addImmOperands(Inst, 1);
2496 } else if (Op.isImm()) {
2497 // Handle optional arguments
2498 OptionalIdx[Op.getImmTy()] = I;
2499 } else {
2500 llvm_unreachable("Invalid operand type");
2501 }
2502 }
2503
2504 // ToDo: fix default values for row_mask and bank_mask
2505 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2506 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2507 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2508}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002509
Sam Kolton3025e7f2016-04-26 13:33:56 +00002510//===----------------------------------------------------------------------===//
2511// sdwa
2512//===----------------------------------------------------------------------===//
2513
2514AMDGPUAsmParser::OperandMatchResultTy
2515AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2516 SMLoc S = Parser.getTok().getLoc();
2517 StringRef Value;
2518 AMDGPUAsmParser::OperandMatchResultTy res;
2519
2520 res = parseStringWithPrefix("dst_sel", Value);
2521 if (res == MatchOperand_ParseFail) {
2522 return MatchOperand_ParseFail;
2523 } else if (res == MatchOperand_NoMatch) {
2524 res = parseStringWithPrefix("src0_sel", Value);
2525 if (res == MatchOperand_ParseFail) {
2526 return MatchOperand_ParseFail;
2527 } else if (res == MatchOperand_NoMatch) {
2528 res = parseStringWithPrefix("src1_sel", Value);
2529 if (res != MatchOperand_Success) {
2530 return res;
2531 }
2532 }
2533 }
2534
2535 int64_t Int;
2536 Int = StringSwitch<int64_t>(Value)
2537 .Case("BYTE_0", 0)
2538 .Case("BYTE_1", 1)
2539 .Case("BYTE_2", 2)
2540 .Case("BYTE_3", 3)
2541 .Case("WORD_0", 4)
2542 .Case("WORD_1", 5)
2543 .Case("DWORD", 6)
2544 .Default(0xffffffff);
2545 Parser.Lex(); // eat last token
2546
2547 if (Int == 0xffffffff) {
2548 return MatchOperand_ParseFail;
2549 }
2550
2551 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2552 AMDGPUOperand::ImmTySdwaSel));
2553 return MatchOperand_Success;
2554}
2555
2556AMDGPUAsmParser::OperandMatchResultTy
2557AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2558 SMLoc S = Parser.getTok().getLoc();
2559 StringRef Value;
2560 AMDGPUAsmParser::OperandMatchResultTy res;
2561
2562 res = parseStringWithPrefix("dst_unused", Value);
2563 if (res != MatchOperand_Success) {
2564 return res;
2565 }
2566
2567 int64_t Int;
2568 Int = StringSwitch<int64_t>(Value)
2569 .Case("UNUSED_PAD", 0)
2570 .Case("UNUSED_SEXT", 1)
2571 .Case("UNUSED_PRESERVE", 2)
2572 .Default(0xffffffff);
2573 Parser.Lex(); // eat last token
2574
2575 if (Int == 0xffffffff) {
2576 return MatchOperand_ParseFail;
2577 }
2578
2579 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2580 AMDGPUOperand::ImmTySdwaDstUnused));
2581 return MatchOperand_Success;
2582}
2583
Sam Kolton5f10a132016-05-06 11:31:17 +00002584AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
2585 return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
2586}
2587
2588AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
2589 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
2590}
2591
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002592
Tom Stellard45bb48e2015-06-13 03:28:10 +00002593/// Force static initialization.
2594extern "C" void LLVMInitializeAMDGPUAsmParser() {
2595 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2596 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2597}
2598
2599#define GET_REGISTER_MATCHER
2600#define GET_MATCHER_IMPLEMENTATION
2601#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00002602
2603
2604// This fuction should be defined after auto-generated include so that we have
2605// MatchClassKind enum defined
2606unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
2607 unsigned Kind) {
2608 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
2609 // But MatchInstructionImpl() expects to meet token and fails to validate
2610 // operand. This method checks if we are given immediate operand but expect to
2611 // get corresponding token.
2612 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
2613 switch (Kind) {
2614 case MCK_addr64:
2615 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
2616 case MCK_gds:
2617 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
2618 case MCK_glc:
2619 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
2620 case MCK_idxen:
2621 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
2622 case MCK_offen:
2623 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
2624 default: return Match_InvalidOperand;
2625 }
2626}