blob: 65a21e08cf96eeaf746fd947f4a16236252d8edf [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000018#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/StringSwitch.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000029#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000034#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetRegistry.h"
38#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039
Artem Tamazovebe71ce2016-05-06 17:48:48 +000040// FIXME ODR: Move this to some common place for AsmParser and InstPrinter
41namespace llvm {
42namespace AMDGPU {
43namespace SendMsg {
44
45// This must be in sync with llvm::AMDGPU::SendMsg::Id enum members.
46static
47const char* const IdSymbolic[] = {
48 nullptr,
49 "MSG_INTERRUPT",
50 "MSG_GS",
51 "MSG_GS_DONE",
52 nullptr,
53 nullptr,
54 nullptr,
55 nullptr,
56 nullptr,
57 nullptr,
58 nullptr,
59 nullptr,
60 nullptr,
61 nullptr,
62 nullptr,
63 "MSG_SYSMSG"
64};
65
66// These two must be in sync with llvm::AMDGPU::SendMsg::Op enum members.
67static
68const char* const OpSysSymbolic[] = {
69 nullptr,
70 "SYSMSG_OP_ECC_ERR_INTERRUPT",
71 "SYSMSG_OP_REG_RD",
72 "SYSMSG_OP_HOST_TRAP_ACK",
73 "SYSMSG_OP_TTRACE_PC"
74};
75
76static
77const char* const OpGsSymbolic[] = {
78 "GS_OP_NOP",
79 "GS_OP_CUT",
80 "GS_OP_EMIT",
81 "GS_OP_EMIT_CUT"
82};
83
84} // namespace SendMsg
85} // namespace AMDGPU
86} // namespace llvm
87
Tom Stellard45bb48e2015-06-13 03:28:10 +000088using namespace llvm;
89
Artem Tamazov8ce1f712016-05-19 12:22:39 +000090// In some cases (e.g. buffer atomic instructions) MatchOperandParserImpl()
91// may invoke tryCustomParseOperand() multiple times with the same MCK value.
92// That leads to adding of the same "default" operand multiple times in a row,
93// which is wrong. The workaround adds only the 1st default operand, while for
94// the rest the "dummy" operands being added. The reason for dummies is that if
95// we just skip adding an operand, then parser would get stuck in endless loop.
96// Dummies shall be removed prior matching & emitting MCInsts.
97//
98// Comment out this macro to disable the workaround.
99#define WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
100
Tom Stellard45bb48e2015-06-13 03:28:10 +0000101namespace {
102
103struct OptionalOperand;
104
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000105enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
106
Tom Stellard45bb48e2015-06-13 03:28:10 +0000107class AMDGPUOperand : public MCParsedAsmOperand {
108 enum KindTy {
109 Token,
110 Immediate,
111 Register,
112 Expression
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000113#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
114 ,Dummy
115#endif
Tom Stellard45bb48e2015-06-13 03:28:10 +0000116 } Kind;
117
118 SMLoc StartLoc, EndLoc;
119
120public:
121 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
122
123 MCContext *Ctx;
124
Sam Kolton5f10a132016-05-06 11:31:17 +0000125 typedef std::unique_ptr<AMDGPUOperand> Ptr;
126
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 enum ImmTy {
128 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000129 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000130 ImmTyOffen,
131 ImmTyIdxen,
132 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000133 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000134 ImmTyOffset0,
135 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000136 ImmTyGLC,
137 ImmTySLC,
138 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000139 ImmTyClampSI,
140 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000141 ImmTyDppCtrl,
142 ImmTyDppRowMask,
143 ImmTyDppBankMask,
144 ImmTyDppBoundCtrl,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000145 ImmTySdwaSel,
146 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000147 ImmTyDMask,
148 ImmTyUNorm,
149 ImmTyDA,
150 ImmTyR128,
151 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000152 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000153 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154 };
155
156 struct TokOp {
157 const char *Data;
158 unsigned Length;
159 };
160
161 struct ImmOp {
162 bool IsFPImm;
163 ImmTy Type;
164 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000165 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000166 };
167
168 struct RegOp {
169 unsigned RegNo;
170 int Modifiers;
171 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000172 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000173 bool IsForcedVOP3;
174 };
175
176 union {
177 TokOp Tok;
178 ImmOp Imm;
179 RegOp Reg;
180 const MCExpr *Expr;
181 };
182
Sam Kolton1bdcef72016-05-23 09:59:02 +0000183 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const {
184 if (Imm.Type == ImmTyNone && ApplyModifiers && Imm.Modifiers != 0) {
185 // Apply modifiers to immediate value
186 int64_t Val = Imm.Val;
187 bool Negate = Imm.Modifiers & 0x1;
188 bool Abs = Imm.Modifiers & 0x2;
189 if (Imm.IsFPImm) {
190 APFloat F(BitsToFloat(Val));
191 if (Abs) {
192 F.clearSign();
193 }
194 if (Negate) {
195 F.changeSign();
196 }
197 Val = F.bitcastToAPInt().getZExtValue();
198 } else {
199 Val = Abs ? std::abs(Val) : Val;
200 Val = Negate ? -Val : Val;
201 }
202 Inst.addOperand(MCOperand::createImm(Val));
203 } else {
204 Inst.addOperand(MCOperand::createImm(getImm()));
205 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206 }
207
208 StringRef getToken() const {
209 return StringRef(Tok.Data, Tok.Length);
210 }
211
212 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000213 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000214 }
215
216 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000217 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000218 addRegOperands(Inst, N);
219 else
220 addImmOperands(Inst, N);
221 }
222
Tom Stellardd93a34f2016-02-22 19:17:56 +0000223 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
224 if (isRegKind()) {
225 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
226 addRegOperands(Inst, N);
227 } else {
228 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
Sam Kolton1bdcef72016-05-23 09:59:02 +0000229 addImmOperands(Inst, N, false);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000230 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000231 }
232
233 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
234 if (isImm())
235 addImmOperands(Inst, N);
236 else {
237 assert(isExpr());
238 Inst.addOperand(MCOperand::createExpr(Expr));
239 }
240 }
241
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000242#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
243 bool isDummy() const {
244 return Kind == Dummy;
245 }
246#endif
247
Tom Stellard45bb48e2015-06-13 03:28:10 +0000248 bool isToken() const override {
249 return Kind == Token;
250 }
251
252 bool isImm() const override {
253 return Kind == Immediate;
254 }
255
Tom Stellardd93a34f2016-02-22 19:17:56 +0000256 bool isInlinableImm() const {
257 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
258 immediates are inlinable (e.g. "clamp" attribute is not) */ )
259 return false;
260 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000261 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000262 // We've had bot failures before due to weird NaN support on mips hosts.
263 const float F = BitsToFloat(Imm.Val);
264 // TODO: Add 1/(2*pi) for VI
265 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000266 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000267 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000268 }
269
Tom Stellard45bb48e2015-06-13 03:28:10 +0000270 int64_t getImm() const {
271 return Imm.Val;
272 }
273
274 enum ImmTy getImmTy() const {
275 assert(isImm());
276 return Imm.Type;
277 }
278
279 bool isRegKind() const {
280 return Kind == Register;
281 }
282
283 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000284 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000285 }
286
Tom Stellardd93a34f2016-02-22 19:17:56 +0000287 bool isRegOrImmWithInputMods() const {
288 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000289 }
290
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000291 bool isImmTy(ImmTy ImmT) const {
292 return isImm() && Imm.Type == ImmT;
293 }
294
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000295 bool isClampSI() const {
296 return isImmTy(ImmTyClampSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000297 }
298
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000299 bool isOModSI() const {
300 return isImmTy(ImmTyOModSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000301 }
302
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000303 bool isImmModifier() const {
304 return Kind == Immediate && Imm.Type != ImmTyNone;
305 }
306
307 bool isDMask() const {
308 return isImmTy(ImmTyDMask);
309 }
310
311 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
312 bool isDA() const { return isImmTy(ImmTyDA); }
313 bool isR128() const { return isImmTy(ImmTyUNorm); }
314 bool isLWE() const { return isImmTy(ImmTyLWE); }
315
Tom Stellarda90b9522016-02-11 03:28:15 +0000316 bool isMod() const {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000317 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000318 }
319
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000320 bool isOffen() const { return isImmTy(ImmTyOffen); }
321 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
322 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
323 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
324 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
325 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000326 bool isGDS() const { return isImmTy(ImmTyGDS); }
327 bool isGLC() const { return isImmTy(ImmTyGLC); }
328 bool isSLC() const { return isImmTy(ImmTySLC); }
329 bool isTFE() const { return isImmTy(ImmTyTFE); }
330
Sam Koltondfa29f72016-03-09 12:29:31 +0000331 bool isBankMask() const {
332 return isImmTy(ImmTyDppBankMask);
333 }
334
335 bool isRowMask() const {
336 return isImmTy(ImmTyDppRowMask);
337 }
338
339 bool isBoundCtrl() const {
340 return isImmTy(ImmTyDppBoundCtrl);
341 }
Sam Koltona74cd522016-03-18 15:35:51 +0000342
Sam Kolton3025e7f2016-04-26 13:33:56 +0000343 bool isSDWASel() const {
344 return isImmTy(ImmTySdwaSel);
345 }
346
347 bool isSDWADstUnused() const {
348 return isImmTy(ImmTySdwaDstUnused);
349 }
350
Tom Stellard45bb48e2015-06-13 03:28:10 +0000351 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000352 assert(isReg() || (isImm() && Imm.Modifiers == 0));
353 if (isReg())
354 Reg.Modifiers = Mods;
355 else
356 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000357 }
358
359 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000360 assert(isRegKind() || isImm());
361 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000362 }
363
364 unsigned getReg() const override {
365 return Reg.RegNo;
366 }
367
368 bool isRegOrImm() const {
369 return isReg() || isImm();
370 }
371
372 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000373 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000374 }
375
376 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000377 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000378 }
379
Matt Arsenault86d336e2015-09-08 21:15:00 +0000380 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000381 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000382 }
383
384 bool isSSrc32() const {
385 return isImm() || isSCSrc32();
386 }
387
388 bool isSSrc64() const {
389 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
390 // See isVSrc64().
391 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000392 }
393
Tom Stellard45bb48e2015-06-13 03:28:10 +0000394 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000395 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000396 }
397
398 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000399 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000400 }
401
402 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000403 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000404 }
405
406 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000407 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000408 // narrowed to 32 bits (in the instruction stream). That require knowledge
409 // of instruction type (unsigned/signed, floating or "untyped"/B64),
410 // see [AMD GCN3 ISA 6.3.1].
411 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
412 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000413 }
414
415 bool isMem() const override {
416 return false;
417 }
418
419 bool isExpr() const {
420 return Kind == Expression;
421 }
422
423 bool isSoppBrTarget() const {
424 return isExpr() || isImm();
425 }
426
427 SMLoc getStartLoc() const override {
428 return StartLoc;
429 }
430
431 SMLoc getEndLoc() const override {
432 return EndLoc;
433 }
434
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000435 void printImmTy(raw_ostream& OS, ImmTy Type) const {
436 switch (Type) {
437 case ImmTyNone: OS << "None"; break;
438 case ImmTyGDS: OS << "GDS"; break;
439 case ImmTyOffen: OS << "Offen"; break;
440 case ImmTyIdxen: OS << "Idxen"; break;
441 case ImmTyAddr64: OS << "Addr64"; break;
442 case ImmTyOffset: OS << "Offset"; break;
443 case ImmTyOffset0: OS << "Offset0"; break;
444 case ImmTyOffset1: OS << "Offset1"; break;
445 case ImmTyGLC: OS << "GLC"; break;
446 case ImmTySLC: OS << "SLC"; break;
447 case ImmTyTFE: OS << "TFE"; break;
448 case ImmTyClampSI: OS << "ClampSI"; break;
449 case ImmTyOModSI: OS << "OModSI"; break;
450 case ImmTyDppCtrl: OS << "DppCtrl"; break;
451 case ImmTyDppRowMask: OS << "DppRowMask"; break;
452 case ImmTyDppBankMask: OS << "DppBankMask"; break;
453 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
454 case ImmTySdwaSel: OS << "SdwaSel"; break;
455 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
456 case ImmTyDMask: OS << "DMask"; break;
457 case ImmTyUNorm: OS << "UNorm"; break;
458 case ImmTyDA: OS << "DA"; break;
459 case ImmTyR128: OS << "R128"; break;
460 case ImmTyLWE: OS << "LWE"; break;
461 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000462 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000463 }
464 }
465
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000466 void print(raw_ostream &OS) const override {
467 switch (Kind) {
468 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000469 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000470 break;
471 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000472 OS << '<' << getImm();
473 if (getImmTy() != ImmTyNone) {
474 OS << " type: "; printImmTy(OS, getImmTy());
475 }
476 OS << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000477 break;
478 case Token:
479 OS << '\'' << getToken() << '\'';
480 break;
481 case Expression:
482 OS << "<expr " << *Expr << '>';
483 break;
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000484#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
485 case Dummy:
486 OS << "<dummy>";
487 break;
488#endif
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000489 }
490 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000491
Sam Kolton5f10a132016-05-06 11:31:17 +0000492 static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
493 enum ImmTy Type = ImmTyNone,
494 bool IsFPImm = false) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000495 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
496 Op->Imm.Val = Val;
497 Op->Imm.IsFPImm = IsFPImm;
498 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000499 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000500 Op->StartLoc = Loc;
501 Op->EndLoc = Loc;
502 return Op;
503 }
504
Sam Kolton5f10a132016-05-06 11:31:17 +0000505 static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
506 bool HasExplicitEncodingSize = true) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000507 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
508 Res->Tok.Data = Str.data();
509 Res->Tok.Length = Str.size();
510 Res->StartLoc = Loc;
511 Res->EndLoc = Loc;
512 return Res;
513 }
514
Sam Kolton5f10a132016-05-06 11:31:17 +0000515 static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
516 SMLoc E,
517 const MCRegisterInfo *TRI,
518 const MCSubtargetInfo *STI,
519 bool ForceVOP3) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000520 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
521 Op->Reg.RegNo = RegNo;
522 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000523 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000524 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000525 Op->Reg.IsForcedVOP3 = ForceVOP3;
526 Op->StartLoc = S;
527 Op->EndLoc = E;
528 return Op;
529 }
530
Sam Kolton5f10a132016-05-06 11:31:17 +0000531 static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000532 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
533 Op->Expr = Expr;
534 Op->StartLoc = S;
535 Op->EndLoc = S;
536 return Op;
537 }
538
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000539#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
540 static AMDGPUOperand::Ptr CreateDummy(SMLoc S) {
541 auto Op = llvm::make_unique<AMDGPUOperand>(Dummy);
542 Op->StartLoc = S;
543 Op->EndLoc = S;
544 return Op;
545 }
546#endif
547
Tom Stellard45bb48e2015-06-13 03:28:10 +0000548 bool isSWaitCnt() const;
Artem Tamazovd6468662016-04-25 14:13:51 +0000549 bool isHwreg() const;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000550 bool isSendMsg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000551 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000552 bool isSMRDOffset() const;
553 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000554 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000555};
556
557class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000558 const MCInstrInfo &MII;
559 MCAsmParser &Parser;
560
561 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000562
Matt Arsenault3b159672015-12-01 20:31:08 +0000563 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000564 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000565 }
566
567 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000568 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000569 }
570
Matt Arsenault68802d32015-11-05 03:11:27 +0000571 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000572 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000573 }
574
575 bool hasSGPR102_SGPR103() const {
576 return !isVI();
577 }
578
Tom Stellard45bb48e2015-06-13 03:28:10 +0000579 /// @name Auto-generated Match Functions
580 /// {
581
582#define GET_ASSEMBLER_HEADER
583#include "AMDGPUGenAsmMatcher.inc"
584
585 /// }
586
Tom Stellard347ac792015-06-26 21:15:07 +0000587private:
588 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
589 bool ParseDirectiveHSACodeObjectVersion();
590 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000591 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
592 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000593 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000594 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000595 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000596 bool ParseDirectiveAMDGPUHsaModuleGlobal();
597 bool ParseDirectiveAMDGPUHsaProgramGlobal();
598 bool ParseSectionDirectiveHSADataGlobalAgent();
599 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000600 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000601 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
602 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000603 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000604
Tom Stellard45bb48e2015-06-13 03:28:10 +0000605public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000606 enum AMDGPUMatchResultTy {
607 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
608 };
609
Akira Hatanakab11ef082015-11-14 06:35:56 +0000610 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000611 const MCInstrInfo &MII,
612 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000613 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000614 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000615 MCAsmParserExtension::Initialize(Parser);
616
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000617 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000618 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000619 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000620 }
621
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000622 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000623 }
624
Tom Stellard347ac792015-06-26 21:15:07 +0000625 AMDGPUTargetStreamer &getTargetStreamer() {
626 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
627 return static_cast<AMDGPUTargetStreamer &>(TS);
628 }
629
Tom Stellard45bb48e2015-06-13 03:28:10 +0000630 unsigned getForcedEncodingSize() const {
631 return ForcedEncodingSize;
632 }
633
634 void setForcedEncodingSize(unsigned Size) {
635 ForcedEncodingSize = Size;
636 }
637
638 bool isForcedVOP3() const {
639 return ForcedEncodingSize == 64;
640 }
641
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000642 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000643 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
644 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
645 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
646 OperandVector &Operands, MCStreamer &Out,
647 uint64_t &ErrorInfo,
648 bool MatchingInlineAsm) override;
649 bool ParseDirective(AsmToken DirectiveID) override;
650 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
651 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
652 SMLoc NameLoc, OperandVector &Operands) override;
653
654 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000655 int64_t Default = 0, bool AddDefault = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
657 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000658 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
659 int64_t Default = 0, bool AddDefault = false,
660 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000661 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
662 enum AMDGPUOperand::ImmTy ImmTy =
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000663 AMDGPUOperand::ImmTyNone,
664 bool AddDefault = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000665 OperandMatchResultTy parseOptionalOps(
666 const ArrayRef<OptionalOperand> &OptionalOps,
667 OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000668 OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000669
Sam Kolton1bdcef72016-05-23 09:59:02 +0000670 OperandMatchResultTy parseImm(OperandVector &Operands);
671 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
672 OperandMatchResultTy parseRegOrImmWithInputMods(OperandVector &Operands);
673
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000674 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault);
675 OperandMatchResultTy parseAMDGPUOperand(OperandVector &Operands, StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000676
677 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
678 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000679
680 bool parseCnt(int64_t &IntVal);
681 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000682 bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
683 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000684private:
685 struct OperandInfoTy {
686 int64_t Id;
687 bool IsSymbolic;
688 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
689 };
690 bool parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
691public:
692 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000694 AMDGPUOperand::Ptr defaultHwreg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000695
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000696 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
697 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
698 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000699 AMDGPUOperand::Ptr defaultMubufOffset() const;
700 AMDGPUOperand::Ptr defaultGLC() const;
701 AMDGPUOperand::Ptr defaultSLC() const;
702 AMDGPUOperand::Ptr defaultTFE() const;
703
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000704 OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); }
705 OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); }
706 OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); }
707 OperandMatchResultTy parseSMRDLiteralOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_literal_offset"); }
708 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "dpp_ctrl"); }
709 OperandMatchResultTy parseRowMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "row_mask"); }
710 OperandMatchResultTy parseBankMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bank_mask"); }
711 OperandMatchResultTy parseBoundCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bound_ctrl"); }
712 OperandMatchResultTy parseOffen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offen"); }
713 OperandMatchResultTy parseIdxen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "idxen"); }
714 OperandMatchResultTy parseAddr64(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "addr64"); }
715 OperandMatchResultTy parseOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset"); }
716 OperandMatchResultTy parseOffset0(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset0"); }
717 OperandMatchResultTy parseOffset1(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset1"); }
718 OperandMatchResultTy parseGLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "glc"); }
719 OperandMatchResultTy parseSLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "slc"); }
720 OperandMatchResultTy parseTFE(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "tfe"); }
721 OperandMatchResultTy parseGDS(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "gds"); }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000722
723 OperandMatchResultTy parseDMask(OperandVector &Operands);
724 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000725 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000726 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000727 OperandMatchResultTy parseLWE(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000728 AMDGPUOperand::Ptr defaultDMask() const;
729 AMDGPUOperand::Ptr defaultUNorm() const;
730 AMDGPUOperand::Ptr defaultDA() const;
731 AMDGPUOperand::Ptr defaultR128() const;
732 AMDGPUOperand::Ptr defaultLWE() const;
733 AMDGPUOperand::Ptr defaultSMRDOffset() const;
734 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
735
736 AMDGPUOperand::Ptr defaultClampSI() const;
737 AMDGPUOperand::Ptr defaultOModSI() const;
738
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000739 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
740
Tom Stellarda90b9522016-02-11 03:28:15 +0000741 void cvtId(MCInst &Inst, const OperandVector &Operands);
742 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000743 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000744
745 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000746 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000747
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000748 OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault);
Sam Kolton5f10a132016-05-06 11:31:17 +0000749 AMDGPUOperand::Ptr defaultRowMask() const;
750 AMDGPUOperand::Ptr defaultBankMask() const;
751 AMDGPUOperand::Ptr defaultBoundCtrl() const;
752 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000753
754 OperandMatchResultTy parseSDWASel(OperandVector &Operands);
755 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000756 AMDGPUOperand::Ptr defaultSDWASel() const;
757 AMDGPUOperand::Ptr defaultSDWADstUnused() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000758};
759
760struct OptionalOperand {
761 const char *Name;
762 AMDGPUOperand::ImmTy Type;
763 bool IsBit;
764 int64_t Default;
765 bool (*ConvertResult)(int64_t&);
766};
767
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000768}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000769
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000770static int getRegClass(RegisterKind Is, unsigned RegWidth) {
771 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000772 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000773 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000774 case 1: return AMDGPU::VGPR_32RegClassID;
775 case 2: return AMDGPU::VReg_64RegClassID;
776 case 3: return AMDGPU::VReg_96RegClassID;
777 case 4: return AMDGPU::VReg_128RegClassID;
778 case 8: return AMDGPU::VReg_256RegClassID;
779 case 16: return AMDGPU::VReg_512RegClassID;
780 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000781 } else if (Is == IS_TTMP) {
782 switch (RegWidth) {
783 default: return -1;
784 case 1: return AMDGPU::TTMP_32RegClassID;
785 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000786 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000787 }
788 } else if (Is == IS_SGPR) {
789 switch (RegWidth) {
790 default: return -1;
791 case 1: return AMDGPU::SGPR_32RegClassID;
792 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000793 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000794 case 8: return AMDGPU::SReg_256RegClassID;
795 case 16: return AMDGPU::SReg_512RegClassID;
796 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000797 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000798 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000799}
800
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000801static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000802 return StringSwitch<unsigned>(RegName)
803 .Case("exec", AMDGPU::EXEC)
804 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000805 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000806 .Case("m0", AMDGPU::M0)
807 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000808 .Case("tba", AMDGPU::TBA)
809 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000810 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
811 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000812 .Case("vcc_lo", AMDGPU::VCC_LO)
813 .Case("vcc_hi", AMDGPU::VCC_HI)
814 .Case("exec_lo", AMDGPU::EXEC_LO)
815 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000816 .Case("tma_lo", AMDGPU::TMA_LO)
817 .Case("tma_hi", AMDGPU::TMA_HI)
818 .Case("tba_lo", AMDGPU::TBA_LO)
819 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000820 .Default(0);
821}
822
823bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000824 auto R = parseRegister();
825 if (!R) return true;
826 assert(R->isReg());
827 RegNo = R->getReg();
828 StartLoc = R->getStartLoc();
829 EndLoc = R->getEndLoc();
830 return false;
831}
832
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000833bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
834{
835 switch (RegKind) {
836 case IS_SPECIAL:
837 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
838 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
839 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
840 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
841 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
842 return false;
843 case IS_VGPR:
844 case IS_SGPR:
845 case IS_TTMP:
846 if (Reg1 != Reg + RegWidth) { return false; }
847 RegWidth++;
848 return true;
849 default:
850 assert(false); return false;
851 }
852}
853
854bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
855{
856 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
857 if (getLexer().is(AsmToken::Identifier)) {
858 StringRef RegName = Parser.getTok().getString();
859 if ((Reg = getSpecialRegForName(RegName))) {
860 Parser.Lex();
861 RegKind = IS_SPECIAL;
862 } else {
863 unsigned RegNumIndex = 0;
864 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
865 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
866 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
867 else { return false; }
868 if (RegName.size() > RegNumIndex) {
869 // Single 32-bit register: vXX.
870 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
871 Parser.Lex();
872 RegWidth = 1;
873 } else {
874 // Range of registers: v[XX:YY].
875 Parser.Lex();
876 int64_t RegLo, RegHi;
877 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
878 Parser.Lex();
879
880 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
881
882 if (getLexer().isNot(AsmToken::Colon)) { return false; }
883 Parser.Lex();
884
885 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
886
887 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
888 Parser.Lex();
889
890 RegNum = (unsigned) RegLo;
891 RegWidth = (RegHi - RegLo) + 1;
892 }
893 }
894 } else if (getLexer().is(AsmToken::LBrac)) {
895 // List of consecutive registers: [s0,s1,s2,s3]
896 Parser.Lex();
897 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
898 if (RegWidth != 1) { return false; }
899 RegisterKind RegKind1;
900 unsigned Reg1, RegNum1, RegWidth1;
901 do {
902 if (getLexer().is(AsmToken::Comma)) {
903 Parser.Lex();
904 } else if (getLexer().is(AsmToken::RBrac)) {
905 Parser.Lex();
906 break;
907 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
908 if (RegWidth1 != 1) { return false; }
909 if (RegKind1 != RegKind) { return false; }
910 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
911 } else {
912 return false;
913 }
914 } while (true);
915 } else {
916 return false;
917 }
918 switch (RegKind) {
919 case IS_SPECIAL:
920 RegNum = 0;
921 RegWidth = 1;
922 break;
923 case IS_VGPR:
924 case IS_SGPR:
925 case IS_TTMP:
926 {
927 unsigned Size = 1;
928 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
929 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
930 Size = std::min(RegWidth, 4u);
931 }
932 if (RegNum % Size != 0) { return false; }
933 RegNum = RegNum / Size;
934 int RCID = getRegClass(RegKind, RegWidth);
935 if (RCID == -1) { return false; }
936 const MCRegisterClass RC = TRI->getRegClass(RCID);
937 if (RegNum >= RC.getNumRegs()) { return false; }
938 Reg = RC.getRegister(RegNum);
939 break;
940 }
941
942 default:
943 assert(false); return false;
944 }
945
946 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
947 return true;
948}
949
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000950std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000951 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000952 SMLoc StartLoc = Tok.getLoc();
953 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000954 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
955
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000956 RegisterKind RegKind;
957 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000958
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000959 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
960 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000961 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000962 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000963 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000964}
965
Sam Kolton1bdcef72016-05-23 09:59:02 +0000966AMDGPUAsmParser::OperandMatchResultTy
967AMDGPUAsmParser::parseImm(OperandVector &Operands) {
968 bool Minus = false;
969 if (getLexer().getKind() == AsmToken::Minus) {
970 Minus = true;
971 Parser.Lex();
972 }
973
974 SMLoc S = Parser.getTok().getLoc();
975 switch(getLexer().getKind()) {
976 case AsmToken::Integer: {
977 int64_t IntVal;
978 if (getParser().parseAbsoluteExpression(IntVal))
979 return MatchOperand_ParseFail;
980 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
981 Error(S, "invalid immediate: only 32-bit values are legal");
982 return MatchOperand_ParseFail;
983 }
984
985 if (Minus)
986 IntVal *= -1;
987 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
988 return MatchOperand_Success;
989 }
990 case AsmToken::Real: {
991 // FIXME: We should emit an error if a double precisions floating-point
992 // value is used. I'm not sure the best way to detect this.
993 int64_t IntVal;
994 if (getParser().parseAbsoluteExpression(IntVal))
995 return MatchOperand_ParseFail;
996
997 APFloat F((float)BitsToDouble(IntVal));
998 if (Minus)
999 F.changeSign();
1000 Operands.push_back(
1001 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S,
1002 AMDGPUOperand::ImmTyNone, true));
1003 return MatchOperand_Success;
1004 }
1005 default:
1006 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1007 }
1008}
1009
1010AMDGPUAsmParser::OperandMatchResultTy
1011AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1012 auto res = parseImm(Operands);
1013 if (res != MatchOperand_NoMatch) {
1014 return res;
1015 }
1016
1017 if (auto R = parseRegister()) {
1018 assert(R->isReg());
1019 R->Reg.IsForcedVOP3 = isForcedVOP3();
1020 Operands.push_back(std::move(R));
1021 return MatchOperand_Success;
1022 }
1023 return MatchOperand_ParseFail;
1024}
1025
1026AMDGPUAsmParser::OperandMatchResultTy
1027AMDGPUAsmParser::parseRegOrImmWithInputMods(OperandVector &Operands) {
1028 // XXX: During parsing we can't determine if minus sign means
1029 // negate-modifier or negative immediate value.
1030 // By default we suppose it is modifier.
1031 bool Negate = false, Abs = false, Abs2 = false;
1032
1033 if (getLexer().getKind()== AsmToken::Minus) {
1034 Parser.Lex();
1035 Negate = true;
1036 }
1037
1038 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1039 Parser.Lex();
1040 Abs2 = true;
1041 if (getLexer().isNot(AsmToken::LParen)) {
1042 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1043 return MatchOperand_ParseFail;
1044 }
1045 Parser.Lex();
1046 }
1047
1048 if (getLexer().getKind() == AsmToken::Pipe) {
1049 if (Abs2) {
1050 Error(Parser.getTok().getLoc(), "expected register or immediate");
1051 return MatchOperand_ParseFail;
1052 }
1053 Parser.Lex();
1054 Abs = true;
1055 }
1056
1057 auto Res = parseRegOrImm(Operands);
1058 if (Res != MatchOperand_Success) {
1059 return Res;
1060 }
1061
1062 unsigned Modifiers = 0;
1063 if (Negate) {
1064 Modifiers |= 0x1;
1065 }
1066 if (Abs) {
1067 if (getLexer().getKind() != AsmToken::Pipe) {
1068 Error(Parser.getTok().getLoc(), "expected vertical bar");
1069 return MatchOperand_ParseFail;
1070 }
1071 Parser.Lex();
1072 Modifiers |= 0x2;
1073 }
1074 if (Abs2) {
1075 if (getLexer().isNot(AsmToken::RParen)) {
1076 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1077 return MatchOperand_ParseFail;
1078 }
1079 Parser.Lex();
1080 Modifiers |= 0x2;
1081 }
1082
1083 if (Modifiers) {
1084 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
1085 Op.setModifiers(Modifiers);
1086 }
1087 return MatchOperand_Success;
1088}
1089
1090
Tom Stellard45bb48e2015-06-13 03:28:10 +00001091unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1092
1093 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1094
1095 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
1096 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
1097 return Match_InvalidOperand;
1098
Tom Stellard88e0b252015-10-06 15:57:53 +00001099 if ((TSFlags & SIInstrFlags::VOP3) &&
1100 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1101 getForcedEncodingSize() != 64)
1102 return Match_PreferE32;
1103
Tom Stellard45bb48e2015-06-13 03:28:10 +00001104 return Match_Success;
1105}
1106
1107
1108bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1109 OperandVector &Operands,
1110 MCStreamer &Out,
1111 uint64_t &ErrorInfo,
1112 bool MatchingInlineAsm) {
1113 MCInst Inst;
1114
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001115#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
1116 // Remove dummies prior matching. Iterate backwards becase vector::erase()
1117 // invalidates all iterators which refer after erase point.
1118 for (auto I = Operands.rbegin(), E = Operands.rend(); I != E; ) {
1119 auto X = I++;
1120 if (static_cast<AMDGPUOperand*>(X->get())->isDummy()) {
1121 Operands.erase(X.base() -1);
1122 }
1123 }
1124#endif
1125
Ranjeet Singh86ecbb72015-06-30 12:32:53 +00001126 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001127 default: break;
1128 case Match_Success:
1129 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001130 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001131 return false;
1132 case Match_MissingFeature:
1133 return Error(IDLoc, "instruction not supported on this GPU");
1134
1135 case Match_MnemonicFail:
1136 return Error(IDLoc, "unrecognized instruction mnemonic");
1137
1138 case Match_InvalidOperand: {
1139 SMLoc ErrorLoc = IDLoc;
1140 if (ErrorInfo != ~0ULL) {
1141 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001142 return Error(IDLoc, "too few operands for instruction");
1143 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001144 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1145 if (ErrorLoc == SMLoc())
1146 ErrorLoc = IDLoc;
1147 }
1148 return Error(ErrorLoc, "invalid operand for instruction");
1149 }
Tom Stellard88e0b252015-10-06 15:57:53 +00001150 case Match_PreferE32:
1151 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1152 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001153 }
1154 llvm_unreachable("Implement any new match types added!");
1155}
1156
Tom Stellard347ac792015-06-26 21:15:07 +00001157bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1158 uint32_t &Minor) {
1159 if (getLexer().isNot(AsmToken::Integer))
1160 return TokError("invalid major version");
1161
1162 Major = getLexer().getTok().getIntVal();
1163 Lex();
1164
1165 if (getLexer().isNot(AsmToken::Comma))
1166 return TokError("minor version number required, comma expected");
1167 Lex();
1168
1169 if (getLexer().isNot(AsmToken::Integer))
1170 return TokError("invalid minor version");
1171
1172 Minor = getLexer().getTok().getIntVal();
1173 Lex();
1174
1175 return false;
1176}
1177
1178bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1179
1180 uint32_t Major;
1181 uint32_t Minor;
1182
1183 if (ParseDirectiveMajorMinor(Major, Minor))
1184 return true;
1185
1186 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1187 return false;
1188}
1189
1190bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
1191
1192 uint32_t Major;
1193 uint32_t Minor;
1194 uint32_t Stepping;
1195 StringRef VendorName;
1196 StringRef ArchName;
1197
1198 // If this directive has no arguments, then use the ISA version for the
1199 // targeted GPU.
1200 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001201 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001202 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1203 Isa.Stepping,
1204 "AMD", "AMDGPU");
1205 return false;
1206 }
1207
1208
1209 if (ParseDirectiveMajorMinor(Major, Minor))
1210 return true;
1211
1212 if (getLexer().isNot(AsmToken::Comma))
1213 return TokError("stepping version number required, comma expected");
1214 Lex();
1215
1216 if (getLexer().isNot(AsmToken::Integer))
1217 return TokError("invalid stepping version");
1218
1219 Stepping = getLexer().getTok().getIntVal();
1220 Lex();
1221
1222 if (getLexer().isNot(AsmToken::Comma))
1223 return TokError("vendor name required, comma expected");
1224 Lex();
1225
1226 if (getLexer().isNot(AsmToken::String))
1227 return TokError("invalid vendor name");
1228
1229 VendorName = getLexer().getTok().getStringContents();
1230 Lex();
1231
1232 if (getLexer().isNot(AsmToken::Comma))
1233 return TokError("arch name required, comma expected");
1234 Lex();
1235
1236 if (getLexer().isNot(AsmToken::String))
1237 return TokError("invalid arch name");
1238
1239 ArchName = getLexer().getTok().getStringContents();
1240 Lex();
1241
1242 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1243 VendorName, ArchName);
1244 return false;
1245}
1246
Tom Stellardff7416b2015-06-26 21:58:31 +00001247bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1248 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001249 SmallString<40> ErrStr;
1250 raw_svector_ostream Err(ErrStr);
1251 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
1252 return TokError(Err.str());
1253 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001254 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001255 return false;
1256}
1257
1258bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1259
1260 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001261 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001262
1263 while (true) {
1264
1265 if (getLexer().isNot(AsmToken::EndOfStatement))
1266 return TokError("amd_kernel_code_t values must begin on a new line");
1267
1268 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1269 // will set the current token to EndOfStatement.
1270 while(getLexer().is(AsmToken::EndOfStatement))
1271 Lex();
1272
1273 if (getLexer().isNot(AsmToken::Identifier))
1274 return TokError("expected value identifier or .end_amd_kernel_code_t");
1275
1276 StringRef ID = getLexer().getTok().getIdentifier();
1277 Lex();
1278
1279 if (ID == ".end_amd_kernel_code_t")
1280 break;
1281
1282 if (ParseAMDKernelCodeTValue(ID, Header))
1283 return true;
1284 }
1285
1286 getTargetStreamer().EmitAMDKernelCodeT(Header);
1287
1288 return false;
1289}
1290
Tom Stellarde135ffd2015-09-25 21:41:28 +00001291bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1292 getParser().getStreamer().SwitchSection(
1293 AMDGPU::getHSATextSection(getContext()));
1294 return false;
1295}
1296
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001297bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1298 if (getLexer().isNot(AsmToken::Identifier))
1299 return TokError("expected symbol name");
1300
1301 StringRef KernelName = Parser.getTok().getString();
1302
1303 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1304 ELF::STT_AMDGPU_HSA_KERNEL);
1305 Lex();
1306 return false;
1307}
1308
Tom Stellard00f2f912015-12-02 19:47:57 +00001309bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1310 if (getLexer().isNot(AsmToken::Identifier))
1311 return TokError("expected symbol name");
1312
1313 StringRef GlobalName = Parser.getTok().getIdentifier();
1314
1315 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1316 Lex();
1317 return false;
1318}
1319
1320bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1321 if (getLexer().isNot(AsmToken::Identifier))
1322 return TokError("expected symbol name");
1323
1324 StringRef GlobalName = Parser.getTok().getIdentifier();
1325
1326 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1327 Lex();
1328 return false;
1329}
1330
1331bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1332 getParser().getStreamer().SwitchSection(
1333 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1334 return false;
1335}
1336
1337bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1338 getParser().getStreamer().SwitchSection(
1339 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1340 return false;
1341}
1342
Tom Stellard9760f032015-12-03 03:34:32 +00001343bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1344 getParser().getStreamer().SwitchSection(
1345 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1346 return false;
1347}
1348
Tom Stellard45bb48e2015-06-13 03:28:10 +00001349bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001350 StringRef IDVal = DirectiveID.getString();
1351
1352 if (IDVal == ".hsa_code_object_version")
1353 return ParseDirectiveHSACodeObjectVersion();
1354
1355 if (IDVal == ".hsa_code_object_isa")
1356 return ParseDirectiveHSACodeObjectISA();
1357
Tom Stellardff7416b2015-06-26 21:58:31 +00001358 if (IDVal == ".amd_kernel_code_t")
1359 return ParseDirectiveAMDKernelCodeT();
1360
Tom Stellardfcfaea42016-05-05 17:03:33 +00001361 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001362 return ParseSectionDirectiveHSAText();
1363
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001364 if (IDVal == ".amdgpu_hsa_kernel")
1365 return ParseDirectiveAMDGPUHsaKernel();
1366
Tom Stellard00f2f912015-12-02 19:47:57 +00001367 if (IDVal == ".amdgpu_hsa_module_global")
1368 return ParseDirectiveAMDGPUHsaModuleGlobal();
1369
1370 if (IDVal == ".amdgpu_hsa_program_global")
1371 return ParseDirectiveAMDGPUHsaProgramGlobal();
1372
1373 if (IDVal == ".hsadata_global_agent")
1374 return ParseSectionDirectiveHSADataGlobalAgent();
1375
1376 if (IDVal == ".hsadata_global_program")
1377 return ParseSectionDirectiveHSADataGlobalProgram();
1378
Tom Stellard9760f032015-12-03 03:34:32 +00001379 if (IDVal == ".hsarodata_readonly_agent")
1380 return ParseSectionDirectiveHSARodataReadonlyAgent();
1381
Tom Stellard45bb48e2015-06-13 03:28:10 +00001382 return true;
1383}
1384
Matt Arsenault68802d32015-11-05 03:11:27 +00001385bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1386 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001387 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001388 return true;
1389
Matt Arsenault3b159672015-12-01 20:31:08 +00001390 if (isSI()) {
1391 // No flat_scr
1392 switch (RegNo) {
1393 case AMDGPU::FLAT_SCR:
1394 case AMDGPU::FLAT_SCR_LO:
1395 case AMDGPU::FLAT_SCR_HI:
1396 return false;
1397 default:
1398 return true;
1399 }
1400 }
1401
Matt Arsenault68802d32015-11-05 03:11:27 +00001402 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1403 // SI/CI have.
1404 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1405 R.isValid(); ++R) {
1406 if (*R == RegNo)
1407 return false;
1408 }
1409
1410 return true;
1411}
1412
Tom Stellard45bb48e2015-06-13 03:28:10 +00001413AMDGPUAsmParser::OperandMatchResultTy
1414AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1415
1416 // Try to parse with a custom parser
1417 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1418
1419 // If we successfully parsed the operand or if there as an error parsing,
1420 // we are done.
1421 //
1422 // If we are parsing after we reach EndOfStatement then this means we
1423 // are appending default values to the Operands list. This is only done
1424 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001425 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001426 getLexer().is(AsmToken::EndOfStatement))
1427 return ResTy;
1428
Sam Kolton1bdcef72016-05-23 09:59:02 +00001429 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001430
Sam Kolton1bdcef72016-05-23 09:59:02 +00001431 if (ResTy == MatchOperand_Success)
1432 return ResTy;
1433
1434 if (getLexer().getKind() == AsmToken::Identifier) {
1435 const auto &Tok = Parser.getTok();
1436 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001437 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001438 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001439 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001440 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001441}
1442
1443bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1444 StringRef Name,
1445 SMLoc NameLoc, OperandVector &Operands) {
1446
1447 // Clear any forced encodings from the previous instruction.
1448 setForcedEncodingSize(0);
1449
1450 if (Name.endswith("_e64"))
1451 setForcedEncodingSize(64);
1452 else if (Name.endswith("_e32"))
1453 setForcedEncodingSize(32);
1454
1455 // Add the instruction mnemonic
1456 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1457
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001458
1459 if (Name.endswith("_e64")) { Name = Name.substr(0, Name.size() - 4); }
1460 if (Name.endswith("_e32")) { Name = Name.substr(0, Name.size() - 4); }
1461
Tom Stellard45bb48e2015-06-13 03:28:10 +00001462 while (!getLexer().is(AsmToken::EndOfStatement)) {
1463 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1464
1465 // Eat the comma or space if there is one.
1466 if (getLexer().is(AsmToken::Comma))
1467 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001468
Tom Stellard45bb48e2015-06-13 03:28:10 +00001469 switch (Res) {
1470 case MatchOperand_Success: break;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001471 case MatchOperand_ParseFail:
1472 Error(getLexer().getLoc(), "failed parsing operand.");
1473 while (!getLexer().is(AsmToken::EndOfStatement)) {
1474 Parser.Lex();
1475 }
1476 return true;
1477 case MatchOperand_NoMatch:
1478 Error(getLexer().getLoc(), "not a valid operand.");
1479 while (!getLexer().is(AsmToken::EndOfStatement)) {
1480 Parser.Lex();
1481 }
1482 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001483 }
1484 }
1485
Tom Stellard45bb48e2015-06-13 03:28:10 +00001486 return false;
1487}
1488
1489//===----------------------------------------------------------------------===//
1490// Utility functions
1491//===----------------------------------------------------------------------===//
1492
1493AMDGPUAsmParser::OperandMatchResultTy
1494AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001495 int64_t Default, bool AddDefault) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001496 // We are at the end of the statement, and this is a default argument, so
1497 // use a default value.
1498 if (getLexer().is(AsmToken::EndOfStatement)) {
1499 Int = Default;
1500 return MatchOperand_Success;
1501 }
1502
1503 switch(getLexer().getKind()) {
1504 default: return MatchOperand_NoMatch;
1505 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001506 StringRef Name = Parser.getTok().getString();
1507 if (!Name.equals(Prefix)) {
1508 if (AddDefault) {
1509 Int = Default;
1510 return MatchOperand_Success;
1511 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001512 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001513 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001514
1515 Parser.Lex();
1516 if (getLexer().isNot(AsmToken::Colon))
1517 return MatchOperand_ParseFail;
1518
1519 Parser.Lex();
1520 if (getLexer().isNot(AsmToken::Integer))
1521 return MatchOperand_ParseFail;
1522
1523 if (getParser().parseAbsoluteExpression(Int))
1524 return MatchOperand_ParseFail;
1525 break;
1526 }
1527 }
1528 return MatchOperand_Success;
1529}
1530
1531AMDGPUAsmParser::OperandMatchResultTy
1532AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001533 enum AMDGPUOperand::ImmTy ImmTy,
1534 int64_t Default, bool AddDefault,
1535 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001536
1537 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001538 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001539
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001540 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value, Default, AddDefault);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001541 if (Res != MatchOperand_Success)
1542 return Res;
1543
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001544 if (ConvertResult && !ConvertResult(Value)) {
1545 return MatchOperand_ParseFail;
1546 }
1547
1548 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001549
1550#ifdef WORKAROUND_USE_DUMMY_OPERANDS_INSTEAD_MUTIPLE_DEFAULT_OPERANDS
1551 if (Value == Default && AddDefault) {
1552 // Reverse lookup in previously added operands (skip just added one)
1553 // for the first non-dummy operand. If it is of the same type,
1554 // then replace just added default operand with dummy.
1555 for (auto I = Operands.rbegin(), E = Operands.rend(); I != E; ++I) {
1556 if (I == Operands.rbegin())
1557 continue;
1558 if (static_cast<AMDGPUOperand*>(I->get())->isDummy())
1559 continue;
1560 if (static_cast<AMDGPUOperand*>(I->get())->isImmTy(ImmTy)) {
1561 Operands.pop_back();
1562 Operands.push_back(AMDGPUOperand::CreateDummy(S)); // invalidates iterators
1563 break;
1564 }
1565 }
1566 }
1567#endif
Tom Stellard45bb48e2015-06-13 03:28:10 +00001568 return MatchOperand_Success;
1569}
1570
1571AMDGPUAsmParser::OperandMatchResultTy
1572AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001573 enum AMDGPUOperand::ImmTy ImmTy,
1574 bool AddDefault) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001575 int64_t Bit = 0;
1576 SMLoc S = Parser.getTok().getLoc();
1577
1578 // We are at the end of the statement, and this is a default argument, so
1579 // use a default value.
1580 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1581 switch(getLexer().getKind()) {
1582 case AsmToken::Identifier: {
1583 StringRef Tok = Parser.getTok().getString();
1584 if (Tok == Name) {
1585 Bit = 1;
1586 Parser.Lex();
1587 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1588 Bit = 0;
1589 Parser.Lex();
1590 } else {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001591 if (AddDefault) {
1592 Bit = 0;
1593 } else {
1594 return MatchOperand_NoMatch;
1595 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001596 }
1597 break;
1598 }
1599 default:
1600 return MatchOperand_NoMatch;
1601 }
1602 }
1603
1604 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1605 return MatchOperand_Success;
1606}
1607
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001608typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1609
Sam Koltona74cd522016-03-18 15:35:51 +00001610void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1611 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001612 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001613 auto i = OptionalIdx.find(ImmT);
1614 if (i != OptionalIdx.end()) {
1615 unsigned Idx = i->second;
1616 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1617 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001618 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001619 }
1620}
1621
Tom Stellard45bb48e2015-06-13 03:28:10 +00001622static bool operandsHasOptionalOp(const OperandVector &Operands,
1623 const OptionalOperand &OOp) {
1624 for (unsigned i = 0; i < Operands.size(); i++) {
1625 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1626 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1627 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1628 return true;
1629
1630 }
1631 return false;
1632}
1633
1634AMDGPUAsmParser::OperandMatchResultTy
1635AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1636 OperandVector &Operands) {
1637 SMLoc S = Parser.getTok().getLoc();
1638 for (const OptionalOperand &Op : OptionalOps) {
1639 if (operandsHasOptionalOp(Operands, Op))
1640 continue;
1641 AMDGPUAsmParser::OperandMatchResultTy Res;
1642 int64_t Value;
1643 if (Op.IsBit) {
1644 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1645 if (Res == MatchOperand_NoMatch)
1646 continue;
1647 return Res;
1648 }
1649
1650 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1651
1652 if (Res == MatchOperand_NoMatch)
1653 continue;
1654
1655 if (Res != MatchOperand_Success)
1656 return Res;
1657
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001658 bool DefaultValue = (Value == Op.Default);
1659
Tom Stellard45bb48e2015-06-13 03:28:10 +00001660 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1661 return MatchOperand_ParseFail;
1662 }
1663
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001664 if (!DefaultValue) {
1665 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1666 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001667 return MatchOperand_Success;
1668 }
1669 return MatchOperand_NoMatch;
1670}
1671
Sam Kolton3025e7f2016-04-26 13:33:56 +00001672AMDGPUAsmParser::OperandMatchResultTy
1673AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1674 if (getLexer().isNot(AsmToken::Identifier)) {
1675 return MatchOperand_NoMatch;
1676 }
1677 StringRef Tok = Parser.getTok().getString();
1678 if (Tok != Prefix) {
1679 return MatchOperand_NoMatch;
1680 }
1681
1682 Parser.Lex();
1683 if (getLexer().isNot(AsmToken::Colon)) {
1684 return MatchOperand_ParseFail;
1685 }
1686
1687 Parser.Lex();
1688 if (getLexer().isNot(AsmToken::Identifier)) {
1689 return MatchOperand_ParseFail;
1690 }
1691
1692 Value = Parser.getTok().getString();
1693 return MatchOperand_Success;
1694}
1695
Tom Stellard45bb48e2015-06-13 03:28:10 +00001696//===----------------------------------------------------------------------===//
1697// ds
1698//===----------------------------------------------------------------------===//
1699
Tom Stellard45bb48e2015-06-13 03:28:10 +00001700void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1701 const OperandVector &Operands) {
1702
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001703 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001704
1705 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1706 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1707
1708 // Add the register arguments
1709 if (Op.isReg()) {
1710 Op.addRegOperands(Inst, 1);
1711 continue;
1712 }
1713
1714 // Handle optional arguments
1715 OptionalIdx[Op.getImmTy()] = i;
1716 }
1717
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001718 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1719 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001720 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001721
Tom Stellard45bb48e2015-06-13 03:28:10 +00001722 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1723}
1724
1725void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1726
1727 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1728 bool GDSOnly = false;
1729
1730 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1731 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1732
1733 // Add the register arguments
1734 if (Op.isReg()) {
1735 Op.addRegOperands(Inst, 1);
1736 continue;
1737 }
1738
1739 if (Op.isToken() && Op.getToken() == "gds") {
1740 GDSOnly = true;
1741 continue;
1742 }
1743
1744 // Handle optional arguments
1745 OptionalIdx[Op.getImmTy()] = i;
1746 }
1747
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001748 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1749 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001750
1751 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001752 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001753 }
1754 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1755}
1756
1757
1758//===----------------------------------------------------------------------===//
1759// s_waitcnt
1760//===----------------------------------------------------------------------===//
1761
1762bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1763 StringRef CntName = Parser.getTok().getString();
1764 int64_t CntVal;
1765
1766 Parser.Lex();
1767 if (getLexer().isNot(AsmToken::LParen))
1768 return true;
1769
1770 Parser.Lex();
1771 if (getLexer().isNot(AsmToken::Integer))
1772 return true;
1773
1774 if (getParser().parseAbsoluteExpression(CntVal))
1775 return true;
1776
1777 if (getLexer().isNot(AsmToken::RParen))
1778 return true;
1779
1780 Parser.Lex();
1781 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1782 Parser.Lex();
1783
1784 int CntShift;
1785 int CntMask;
1786
1787 if (CntName == "vmcnt") {
1788 CntMask = 0xf;
1789 CntShift = 0;
1790 } else if (CntName == "expcnt") {
1791 CntMask = 0x7;
1792 CntShift = 4;
1793 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001794 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001795 CntShift = 8;
1796 } else {
1797 return true;
1798 }
1799
1800 IntVal &= ~(CntMask << CntShift);
1801 IntVal |= (CntVal << CntShift);
1802 return false;
1803}
1804
1805AMDGPUAsmParser::OperandMatchResultTy
1806AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1807 // Disable all counters by default.
1808 // vmcnt [3:0]
1809 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001810 // lgkmcnt [11:8]
1811 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001812 SMLoc S = Parser.getTok().getLoc();
1813
1814 switch(getLexer().getKind()) {
1815 default: return MatchOperand_ParseFail;
1816 case AsmToken::Integer:
1817 // The operand can be an integer value.
1818 if (getParser().parseAbsoluteExpression(CntVal))
1819 return MatchOperand_ParseFail;
1820 break;
1821
1822 case AsmToken::Identifier:
1823 do {
1824 if (parseCnt(CntVal))
1825 return MatchOperand_ParseFail;
1826 } while(getLexer().isNot(AsmToken::EndOfStatement));
1827 break;
1828 }
1829 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1830 return MatchOperand_Success;
1831}
1832
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001833bool AMDGPUAsmParser::parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001834 if (Parser.getTok().getString() != "hwreg")
1835 return true;
1836 Parser.Lex();
1837
1838 if (getLexer().isNot(AsmToken::LParen))
1839 return true;
1840 Parser.Lex();
1841
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001842 if (getLexer().is(AsmToken::Identifier)) {
1843 IsIdentifier = true;
1844 HwRegCode = StringSwitch<unsigned>(Parser.getTok().getString())
1845 .Case("HW_REG_MODE" , 1)
1846 .Case("HW_REG_STATUS" , 2)
1847 .Case("HW_REG_TRAPSTS" , 3)
1848 .Case("HW_REG_HW_ID" , 4)
1849 .Case("HW_REG_GPR_ALLOC", 5)
1850 .Case("HW_REG_LDS_ALLOC", 6)
1851 .Case("HW_REG_IB_STS" , 7)
1852 .Default(-1);
1853 Parser.Lex();
1854 } else {
1855 IsIdentifier = false;
1856 if (getLexer().isNot(AsmToken::Integer))
1857 return true;
1858 if (getParser().parseAbsoluteExpression(HwRegCode))
1859 return true;
1860 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001861
1862 if (getLexer().is(AsmToken::RParen)) {
1863 Parser.Lex();
1864 return false;
1865 }
1866
1867 // optional params
1868 if (getLexer().isNot(AsmToken::Comma))
1869 return true;
1870 Parser.Lex();
1871
1872 if (getLexer().isNot(AsmToken::Integer))
1873 return true;
1874 if (getParser().parseAbsoluteExpression(Offset))
1875 return true;
1876
1877 if (getLexer().isNot(AsmToken::Comma))
1878 return true;
1879 Parser.Lex();
1880
1881 if (getLexer().isNot(AsmToken::Integer))
1882 return true;
1883 if (getParser().parseAbsoluteExpression(Width))
1884 return true;
1885
1886 if (getLexer().isNot(AsmToken::RParen))
1887 return true;
1888 Parser.Lex();
1889
1890 return false;
1891}
1892
1893AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001894AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001895 int64_t Imm16Val = 0;
1896 SMLoc S = Parser.getTok().getLoc();
1897
1898 switch(getLexer().getKind()) {
1899 default: return MatchOperand_ParseFail;
1900 case AsmToken::Integer:
1901 // The operand can be an integer value.
1902 if (getParser().parseAbsoluteExpression(Imm16Val))
1903 return MatchOperand_ParseFail;
1904 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1905 Error(S, "invalid immediate: only 16-bit values are legal");
1906 // Do not return error code, but create an imm operand anyway and proceed
1907 // to the next operand, if any. That avoids unneccessary error messages.
1908 }
1909 break;
1910
1911 case AsmToken::Identifier: {
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001912 bool IsIdentifier = false;
1913 int64_t HwRegCode = -1;
Artem Tamazovd6468662016-04-25 14:13:51 +00001914 int64_t Offset = 0; // default
1915 int64_t Width = 32; // default
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001916 if (parseHwregOperand(HwRegCode, Offset, Width, IsIdentifier))
Artem Tamazovd6468662016-04-25 14:13:51 +00001917 return MatchOperand_ParseFail;
1918 // HwRegCode (6) [5:0]
1919 // Offset (5) [10:6]
1920 // WidthMinusOne (5) [15:11]
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001921 if (HwRegCode < 0 || HwRegCode > 63) {
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001922 if (IsIdentifier)
1923 Error(S, "invalid symbolic name of hardware register");
1924 else
1925 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001926 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001927 if (Offset < 0 || Offset > 31)
1928 Error(S, "invalid bit offset: only 5-bit values are legal");
1929 if (Width < 1 || Width > 32)
1930 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1931 Imm16Val = HwRegCode | (Offset << 6) | ((Width-1) << 11);
1932 }
1933 break;
1934 }
1935 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1936 return MatchOperand_Success;
1937}
1938
Tom Stellard45bb48e2015-06-13 03:28:10 +00001939bool AMDGPUOperand::isSWaitCnt() const {
1940 return isImm();
1941}
1942
Artem Tamazovd6468662016-04-25 14:13:51 +00001943bool AMDGPUOperand::isHwreg() const {
1944 return isImmTy(ImmTyHwreg);
1945}
1946
Sam Kolton5f10a132016-05-06 11:31:17 +00001947AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
1948 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
1949}
1950
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001951bool AMDGPUAsmParser::parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
1952 using namespace llvm::AMDGPU::SendMsg;
1953
1954 if (Parser.getTok().getString() != "sendmsg")
1955 return true;
1956 Parser.Lex();
1957
1958 if (getLexer().isNot(AsmToken::LParen))
1959 return true;
1960 Parser.Lex();
1961
1962 if (getLexer().is(AsmToken::Identifier)) {
1963 Msg.IsSymbolic = true;
1964 Msg.Id = ID_UNKNOWN_;
1965 const std::string tok = Parser.getTok().getString();
1966 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1967 switch(i) {
1968 default: continue; // Omit gaps.
1969 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
1970 }
1971 if (tok == IdSymbolic[i]) {
1972 Msg.Id = i;
1973 break;
1974 }
1975 }
1976 Parser.Lex();
1977 } else {
1978 Msg.IsSymbolic = false;
1979 if (getLexer().isNot(AsmToken::Integer))
1980 return true;
1981 if (getParser().parseAbsoluteExpression(Msg.Id))
1982 return true;
1983 if (getLexer().is(AsmToken::Integer))
1984 if (getParser().parseAbsoluteExpression(Msg.Id))
1985 Msg.Id = ID_UNKNOWN_;
1986 }
1987 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1988 return false;
1989
1990 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1991 if (getLexer().isNot(AsmToken::RParen))
1992 return true;
1993 Parser.Lex();
1994 return false;
1995 }
1996
1997 if (getLexer().isNot(AsmToken::Comma))
1998 return true;
1999 Parser.Lex();
2000
2001 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2002 Operation.Id = ID_UNKNOWN_;
2003 if (getLexer().is(AsmToken::Identifier)) {
2004 Operation.IsSymbolic = true;
2005 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2006 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2007 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
2008 const std::string Tok = Parser.getTok().getString();
2009 for (int i = F; i < L; ++i) {
2010 if (Tok == S[i]) {
2011 Operation.Id = i;
2012 break;
2013 }
2014 }
2015 Parser.Lex();
2016 } else {
2017 Operation.IsSymbolic = false;
2018 if (getLexer().isNot(AsmToken::Integer))
2019 return true;
2020 if (getParser().parseAbsoluteExpression(Operation.Id))
2021 return true;
2022 }
2023
2024 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2025 // Stream id is optional.
2026 if (getLexer().is(AsmToken::RParen)) {
2027 Parser.Lex();
2028 return false;
2029 }
2030
2031 if (getLexer().isNot(AsmToken::Comma))
2032 return true;
2033 Parser.Lex();
2034
2035 if (getLexer().isNot(AsmToken::Integer))
2036 return true;
2037 if (getParser().parseAbsoluteExpression(StreamId))
2038 return true;
2039 }
2040
2041 if (getLexer().isNot(AsmToken::RParen))
2042 return true;
2043 Parser.Lex();
2044 return false;
2045}
2046
2047AMDGPUAsmParser::OperandMatchResultTy
2048AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2049 using namespace llvm::AMDGPU::SendMsg;
2050
2051 int64_t Imm16Val = 0;
2052 SMLoc S = Parser.getTok().getLoc();
2053
2054 switch(getLexer().getKind()) {
2055 default:
2056 return MatchOperand_NoMatch;
2057 case AsmToken::Integer:
2058 // The operand can be an integer value.
2059 if (getParser().parseAbsoluteExpression(Imm16Val))
2060 return MatchOperand_NoMatch;
2061 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
2062 Error(S, "invalid immediate: only 16-bit values are legal");
2063 // Do not return error code, but create an imm operand anyway and proceed
2064 // to the next operand, if any. That avoids unneccessary error messages.
2065 }
2066 break;
2067 case AsmToken::Identifier: {
2068 OperandInfoTy Msg(ID_UNKNOWN_);
2069 OperandInfoTy Operation(OP_UNKNOWN_);
2070 int64_t StreamId = STREAM_ID_DEFAULT;
2071 if (parseSendMsg(Msg, Operation, StreamId))
2072 return MatchOperand_NoMatch;
2073 do {
2074 // Validate and encode message ID.
2075 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2076 || Msg.Id == ID_SYSMSG)) {
2077 if (Msg.IsSymbolic)
2078 Error(S, "invalid/unsupported symbolic name of message");
2079 else
2080 Error(S, "invalid/unsupported code of message");
2081 break;
2082 }
2083 Imm16Val = Msg.Id;
2084 // Validate and encode operation ID.
2085 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2086 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2087 if (Operation.IsSymbolic)
2088 Error(S, "invalid symbolic name of GS_OP");
2089 else
2090 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2091 break;
2092 }
2093 if (Operation.Id == OP_GS_NOP
2094 && Msg.Id != ID_GS_DONE) {
2095 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2096 break;
2097 }
2098 Imm16Val |= (Operation.Id << OP_SHIFT_);
2099 }
2100 if (Msg.Id == ID_SYSMSG) {
2101 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2102 if (Operation.IsSymbolic)
2103 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2104 else
2105 Error(S, "invalid/unsupported code of SYSMSG_OP");
2106 break;
2107 }
2108 Imm16Val |= (Operation.Id << OP_SHIFT_);
2109 }
2110 // Validate and encode stream ID.
2111 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2112 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2113 Error(S, "invalid stream id: only 2-bit values are legal");
2114 break;
2115 }
2116 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2117 }
2118 } while (0);
2119 }
2120 break;
2121 }
2122 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
2123 return MatchOperand_Success;
2124}
2125
2126bool AMDGPUOperand::isSendMsg() const {
2127 return isImmTy(ImmTySendMsg);
2128}
2129
Tom Stellard45bb48e2015-06-13 03:28:10 +00002130//===----------------------------------------------------------------------===//
2131// sopp branch targets
2132//===----------------------------------------------------------------------===//
2133
2134AMDGPUAsmParser::OperandMatchResultTy
2135AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2136 SMLoc S = Parser.getTok().getLoc();
2137
2138 switch (getLexer().getKind()) {
2139 default: return MatchOperand_ParseFail;
2140 case AsmToken::Integer: {
2141 int64_t Imm;
2142 if (getParser().parseAbsoluteExpression(Imm))
2143 return MatchOperand_ParseFail;
2144 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
2145 return MatchOperand_Success;
2146 }
2147
2148 case AsmToken::Identifier:
2149 Operands.push_back(AMDGPUOperand::CreateExpr(
2150 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2151 Parser.getTok().getString()), getContext()), S));
2152 Parser.Lex();
2153 return MatchOperand_Success;
2154 }
2155}
2156
2157//===----------------------------------------------------------------------===//
2158// flat
2159//===----------------------------------------------------------------------===//
2160
Tom Stellard45bb48e2015-06-13 03:28:10 +00002161//===----------------------------------------------------------------------===//
2162// mubuf
2163//===----------------------------------------------------------------------===//
2164
Tom Stellard45bb48e2015-06-13 03:28:10 +00002165bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002166 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00002167}
2168
Sam Kolton5f10a132016-05-06 11:31:17 +00002169AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
2170 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2171}
2172
2173AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
2174 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
2175}
2176
2177AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
2178 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
2179}
2180
2181AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
2182 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
2183}
2184
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002185void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2186 const OperandVector &Operands,
2187 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002188 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002189 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002190
2191 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2192 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2193
2194 // Add the register arguments
2195 if (Op.isReg()) {
2196 Op.addRegOperands(Inst, 1);
2197 continue;
2198 }
2199
2200 // Handle the case where soffset is an immediate
2201 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2202 Op.addImmOperands(Inst, 1);
2203 continue;
2204 }
2205
2206 // Handle tokens like 'offen' which are sometimes hard-coded into the
2207 // asm string. There are no MCInst operands for these.
2208 if (Op.isToken()) {
2209 continue;
2210 }
2211 assert(Op.isImm());
2212
2213 // Handle optional arguments
2214 OptionalIdx[Op.getImmTy()] = i;
2215 }
2216
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002217 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2218 if (IsAtomicReturn) {
2219 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2220 Inst.insert(I, *I);
2221 }
2222
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002223 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002224 if (!IsAtomic) { // glc is hard-coded.
2225 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2226 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002227 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2228 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002229}
2230
2231//===----------------------------------------------------------------------===//
2232// mimg
2233//===----------------------------------------------------------------------===//
2234
2235AMDGPUAsmParser::OperandMatchResultTy
2236AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002237 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002238}
2239
2240AMDGPUAsmParser::OperandMatchResultTy
2241AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002242 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
2243}
2244
2245AMDGPUAsmParser::OperandMatchResultTy
2246AMDGPUAsmParser::parseDA(OperandVector &Operands) {
2247 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002248}
2249
2250AMDGPUAsmParser::OperandMatchResultTy
2251AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002252 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
2253}
2254
2255AMDGPUAsmParser::OperandMatchResultTy
2256AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
2257 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002258}
2259
Sam Kolton1bdcef72016-05-23 09:59:02 +00002260void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2261 unsigned I = 1;
2262 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2263 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2264 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2265 }
2266
2267 OptionalImmIndexMap OptionalIdx;
2268
2269 for (unsigned E = Operands.size(); I != E; ++I) {
2270 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2271
2272 // Add the register arguments
2273 if (Op.isRegOrImm()) {
2274 Op.addRegOrImmOperands(Inst, 1);
2275 continue;
2276 } else if (Op.isImmModifier()) {
2277 OptionalIdx[Op.getImmTy()] = I;
2278 } else {
2279 assert(false);
2280 }
2281 }
2282
2283 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2284 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2285 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2286 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2287 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2288 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2289 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2290 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2291}
2292
2293void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2294 unsigned I = 1;
2295 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2296 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2297 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2298 }
2299
2300 // Add src, same as dst
2301 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2302
2303 OptionalImmIndexMap OptionalIdx;
2304
2305 for (unsigned E = Operands.size(); I != E; ++I) {
2306 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2307
2308 // Add the register arguments
2309 if (Op.isRegOrImm()) {
2310 Op.addRegOrImmOperands(Inst, 1);
2311 continue;
2312 } else if (Op.isImmModifier()) {
2313 OptionalIdx[Op.getImmTy()] = I;
2314 } else {
2315 assert(false);
2316 }
2317 }
2318
2319 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2320 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2321 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2322 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2323 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2324 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2325 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2326 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2327}
2328
Sam Kolton5f10a132016-05-06 11:31:17 +00002329AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2330 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2331}
2332
2333AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2334 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2335}
2336
2337AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2338 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2339}
2340
2341AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2342 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2343}
2344
2345AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2346 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2347}
2348
Tom Stellard45bb48e2015-06-13 03:28:10 +00002349//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002350// smrd
2351//===----------------------------------------------------------------------===//
2352
2353bool AMDGPUOperand::isSMRDOffset() const {
2354
2355 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2356 // information here.
2357 return isImm() && isUInt<8>(getImm());
2358}
2359
2360bool AMDGPUOperand::isSMRDLiteralOffset() const {
2361 // 32-bit literals are only supported on CI and we only want to use them
2362 // when the offset is > 8-bits.
2363 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2364}
2365
Sam Kolton5f10a132016-05-06 11:31:17 +00002366AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2367 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2368}
2369
2370AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2371 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2372}
2373
Tom Stellard217361c2015-08-06 19:28:38 +00002374//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002375// vop3
2376//===----------------------------------------------------------------------===//
2377
2378static bool ConvertOmodMul(int64_t &Mul) {
2379 if (Mul != 1 && Mul != 2 && Mul != 4)
2380 return false;
2381
2382 Mul >>= 1;
2383 return true;
2384}
2385
2386static bool ConvertOmodDiv(int64_t &Div) {
2387 if (Div == 1) {
2388 Div = 0;
2389 return true;
2390 }
2391
2392 if (Div == 2) {
2393 Div = 3;
2394 return true;
2395 }
2396
2397 return false;
2398}
2399
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002400static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2401 if (BoundCtrl == 0) {
2402 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002403 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002404 } else if (BoundCtrl == -1) {
2405 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002406 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002407 }
2408 return false;
2409}
2410
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002411// Note: the order in this table matches the order of operands in AsmString.
2412static const OptionalOperand AMDGPUOperandTable[] = {
2413 {"offen", AMDGPUOperand::ImmTyOffen, true, 0, nullptr},
2414 {"offset0", AMDGPUOperand::ImmTyOffset0, false, 0, nullptr},
2415 {"offset1", AMDGPUOperand::ImmTyOffset1, false, 0, nullptr},
2416 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr},
2417 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
2418 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
2419 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
2420 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr},
2421 {"clamp", AMDGPUOperand::ImmTyClampSI, true, 0, nullptr},
2422 {"omod", AMDGPUOperand::ImmTyOModSI, false, 1, ConvertOmodMul},
2423 {"unorm", AMDGPUOperand::ImmTyUNorm, true, 0, nullptr},
2424 {"da", AMDGPUOperand::ImmTyDA, true, 0, nullptr},
2425 {"r128", AMDGPUOperand::ImmTyR128, true, 0, nullptr},
2426 {"lwe", AMDGPUOperand::ImmTyLWE, true, 0, nullptr},
2427 {"dmask", AMDGPUOperand::ImmTyDMask, false, 0, nullptr},
2428 {"dpp_ctrl", AMDGPUOperand::ImmTyDppCtrl, false, -1, nullptr},
2429 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2430 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2431 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, ConvertBoundCtrl},
2432};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002433
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002434AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault)
2435{
2436 if (Op.IsBit) {
2437 return parseNamedBit(Op.Name, Operands, Op.Type, AddDefault);
2438 } else if (Op.Type == AMDGPUOperand::ImmTyDppCtrl) {
2439 return parseDPPCtrlOps(Operands, AddDefault);
2440 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2441 return parseOModOperand(Operands);
2442 } else {
2443 return parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.Default, AddDefault, Op.ConvertResult);
2444 }
2445}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002446
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002447AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseAMDGPUOperand(OperandVector &Operands, StringRef Name)
2448{
2449 StringRef Tok;
2450 if (getLexer().is(AsmToken::Identifier)) {
2451 Tok = Parser.getTok().getString();
2452 }
2453 bool optional = false;
2454 if (Tok == "mul" || Tok == "div") { optional = true; }
2455 for (const OptionalOperand &Op1 : AMDGPUOperandTable) {
2456 if (Op1.Name == Tok) { optional = true; break; }
2457 }
2458 // Attemp to parse current optional operand.
2459 for (const OptionalOperand &Op : AMDGPUOperandTable) {
2460 // TODO: For now, omod is handled separately because
2461 // token name does not match name in table.
2462 bool parseThis =
2463 Name == "" ||
2464 (Op.Name == Name) ||
2465 (Name == "omod" && Op.Type == AMDGPUOperand::ImmTyOModSI);
2466 if (parseThis && Tok == Name) {
2467 // Exactly the expected token for optional operand.
2468 // Parse it and add operand normally.
2469 return parseOptionalOperand(Operands, Op, true);
2470 } else if (parseThis) {
2471 // Token for optional operand which is later in the table
2472 // than the one we expect. If needed, add default value
2473 // for the operand we expect, do not consume anything
2474 // and return MatchOperand_NoMatch. Parsing will continue.
2475 return parseOptionalOperand(Operands, Op, optional);
2476 } else if (Op.Name == Tok) {
2477 // This looks like optional operand, but we do not expect it.
2478 // This is the case when AsmString has token in it.
2479 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002480 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002481 }
2482 return MatchOperand_NoMatch;
2483}
2484
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002485AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2486{
2487 StringRef Name = Parser.getTok().getString();
2488 if (Name == "mul") {
2489 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodMul);
2490 } else if (Name == "div") {
2491 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodDiv);
2492 } else {
2493 return MatchOperand_NoMatch;
2494 }
2495}
2496
Sam Kolton5f10a132016-05-06 11:31:17 +00002497AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
2498 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
2499}
2500
2501AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
2502 return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
2503}
2504
Tom Stellarda90b9522016-02-11 03:28:15 +00002505void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2506 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002507 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002508 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002509 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2510 }
2511 for (unsigned E = Operands.size(); I != E; ++I)
2512 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2513}
2514
2515void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002516 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2517 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002518 cvtVOP3(Inst, Operands);
2519 } else {
2520 cvtId(Inst, Operands);
2521 }
2522}
2523
Tom Stellarda90b9522016-02-11 03:28:15 +00002524void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002525 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002526 unsigned I = 1;
2527 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002528 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002529 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002530 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002531
Tom Stellarda90b9522016-02-11 03:28:15 +00002532 for (unsigned E = Operands.size(); I != E; ++I) {
2533 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002534 if (Op.isRegOrImmWithInputMods()) {
2535 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002536 } else if (Op.isImm()) {
2537 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002538 } else {
2539 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002540 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002541 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002542
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002543 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2544 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002545}
2546
Sam Koltondfa29f72016-03-09 12:29:31 +00002547//===----------------------------------------------------------------------===//
2548// dpp
2549//===----------------------------------------------------------------------===//
2550
2551bool AMDGPUOperand::isDPPCtrl() const {
2552 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2553 if (result) {
2554 int64_t Imm = getImm();
2555 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2556 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2557 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2558 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2559 (Imm == 0x130) ||
2560 (Imm == 0x134) ||
2561 (Imm == 0x138) ||
2562 (Imm == 0x13c) ||
2563 (Imm == 0x140) ||
2564 (Imm == 0x141) ||
2565 (Imm == 0x142) ||
2566 (Imm == 0x143);
2567 }
2568 return false;
2569}
2570
Sam Koltona74cd522016-03-18 15:35:51 +00002571AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002572AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands, bool AddDefault) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002573 SMLoc S = Parser.getTok().getLoc();
2574 StringRef Prefix;
2575 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002576
Sam Koltona74cd522016-03-18 15:35:51 +00002577 if (getLexer().getKind() == AsmToken::Identifier) {
2578 Prefix = Parser.getTok().getString();
2579 } else {
2580 return MatchOperand_NoMatch;
2581 }
2582
2583 if (Prefix == "row_mirror") {
2584 Int = 0x140;
2585 } else if (Prefix == "row_half_mirror") {
2586 Int = 0x141;
2587 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002588 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2589 if (Prefix != "quad_perm"
2590 && Prefix != "row_shl"
2591 && Prefix != "row_shr"
2592 && Prefix != "row_ror"
2593 && Prefix != "wave_shl"
2594 && Prefix != "wave_rol"
2595 && Prefix != "wave_shr"
2596 && Prefix != "wave_ror"
2597 && Prefix != "row_bcast") {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002598 if (AddDefault) {
2599 Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyDppCtrl));
2600 return MatchOperand_Success;
2601 } else {
2602 return MatchOperand_NoMatch;
2603 }
Sam Kolton201398e2016-04-21 13:14:24 +00002604 }
2605
Sam Koltona74cd522016-03-18 15:35:51 +00002606 Parser.Lex();
2607 if (getLexer().isNot(AsmToken::Colon))
2608 return MatchOperand_ParseFail;
2609
2610 if (Prefix == "quad_perm") {
2611 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002612 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002613 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002614 return MatchOperand_ParseFail;
2615
2616 Parser.Lex();
2617 if (getLexer().isNot(AsmToken::Integer))
2618 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002619 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002620
Sam Koltona74cd522016-03-18 15:35:51 +00002621 Parser.Lex();
2622 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002623 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002624 Parser.Lex();
2625 if (getLexer().isNot(AsmToken::Integer))
2626 return MatchOperand_ParseFail;
2627 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002628
Sam Koltona74cd522016-03-18 15:35:51 +00002629 Parser.Lex();
2630 if (getLexer().isNot(AsmToken::Comma))
2631 return MatchOperand_ParseFail;
2632 Parser.Lex();
2633 if (getLexer().isNot(AsmToken::Integer))
2634 return MatchOperand_ParseFail;
2635 Int += (getLexer().getTok().getIntVal() << 4);
2636
2637 Parser.Lex();
2638 if (getLexer().isNot(AsmToken::Comma))
2639 return MatchOperand_ParseFail;
2640 Parser.Lex();
2641 if (getLexer().isNot(AsmToken::Integer))
2642 return MatchOperand_ParseFail;
2643 Int += (getLexer().getTok().getIntVal() << 6);
2644
2645 Parser.Lex();
2646 if (getLexer().isNot(AsmToken::RBrac))
2647 return MatchOperand_ParseFail;
2648
2649 } else {
2650 // sel:%d
2651 Parser.Lex();
2652 if (getLexer().isNot(AsmToken::Integer))
2653 return MatchOperand_ParseFail;
2654 Int = getLexer().getTok().getIntVal();
2655
2656 if (Prefix == "row_shl") {
2657 Int |= 0x100;
2658 } else if (Prefix == "row_shr") {
2659 Int |= 0x110;
2660 } else if (Prefix == "row_ror") {
2661 Int |= 0x120;
2662 } else if (Prefix == "wave_shl") {
2663 Int = 0x130;
2664 } else if (Prefix == "wave_rol") {
2665 Int = 0x134;
2666 } else if (Prefix == "wave_shr") {
2667 Int = 0x138;
2668 } else if (Prefix == "wave_ror") {
2669 Int = 0x13C;
2670 } else if (Prefix == "row_bcast") {
2671 if (Int == 15) {
2672 Int = 0x142;
2673 } else if (Int == 31) {
2674 Int = 0x143;
2675 }
2676 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002677 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002678 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002679 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002680 }
Sam Koltona74cd522016-03-18 15:35:51 +00002681 Parser.Lex(); // eat last token
2682
2683 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002684 AMDGPUOperand::ImmTyDppCtrl));
2685 return MatchOperand_Success;
2686}
2687
Sam Kolton5f10a132016-05-06 11:31:17 +00002688AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2689 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002690}
2691
Sam Kolton5f10a132016-05-06 11:31:17 +00002692AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2693 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002694}
2695
Sam Kolton5f10a132016-05-06 11:31:17 +00002696AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2697 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2698}
2699
2700void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002701 OptionalImmIndexMap OptionalIdx;
2702
2703 unsigned I = 1;
2704 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2705 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2706 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2707 }
2708
2709 for (unsigned E = Operands.size(); I != E; ++I) {
2710 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2711 // Add the register arguments
Sam Kolton5f10a132016-05-06 11:31:17 +00002712 if (Op.isRegOrImmWithInputMods()) {
2713 // We convert only instructions with modifiers
Sam Koltondfa29f72016-03-09 12:29:31 +00002714 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2715 } else if (Op.isDPPCtrl()) {
2716 Op.addImmOperands(Inst, 1);
2717 } else if (Op.isImm()) {
2718 // Handle optional arguments
2719 OptionalIdx[Op.getImmTy()] = I;
2720 } else {
2721 llvm_unreachable("Invalid operand type");
2722 }
2723 }
2724
2725 // ToDo: fix default values for row_mask and bank_mask
2726 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2727 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2728 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2729}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002730
Sam Kolton3025e7f2016-04-26 13:33:56 +00002731//===----------------------------------------------------------------------===//
2732// sdwa
2733//===----------------------------------------------------------------------===//
2734
2735AMDGPUAsmParser::OperandMatchResultTy
2736AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2737 SMLoc S = Parser.getTok().getLoc();
2738 StringRef Value;
2739 AMDGPUAsmParser::OperandMatchResultTy res;
2740
2741 res = parseStringWithPrefix("dst_sel", Value);
2742 if (res == MatchOperand_ParseFail) {
2743 return MatchOperand_ParseFail;
2744 } else if (res == MatchOperand_NoMatch) {
2745 res = parseStringWithPrefix("src0_sel", Value);
2746 if (res == MatchOperand_ParseFail) {
2747 return MatchOperand_ParseFail;
2748 } else if (res == MatchOperand_NoMatch) {
2749 res = parseStringWithPrefix("src1_sel", Value);
2750 if (res != MatchOperand_Success) {
2751 return res;
2752 }
2753 }
2754 }
2755
2756 int64_t Int;
2757 Int = StringSwitch<int64_t>(Value)
2758 .Case("BYTE_0", 0)
2759 .Case("BYTE_1", 1)
2760 .Case("BYTE_2", 2)
2761 .Case("BYTE_3", 3)
2762 .Case("WORD_0", 4)
2763 .Case("WORD_1", 5)
2764 .Case("DWORD", 6)
2765 .Default(0xffffffff);
2766 Parser.Lex(); // eat last token
2767
2768 if (Int == 0xffffffff) {
2769 return MatchOperand_ParseFail;
2770 }
2771
2772 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2773 AMDGPUOperand::ImmTySdwaSel));
2774 return MatchOperand_Success;
2775}
2776
2777AMDGPUAsmParser::OperandMatchResultTy
2778AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2779 SMLoc S = Parser.getTok().getLoc();
2780 StringRef Value;
2781 AMDGPUAsmParser::OperandMatchResultTy res;
2782
2783 res = parseStringWithPrefix("dst_unused", Value);
2784 if (res != MatchOperand_Success) {
2785 return res;
2786 }
2787
2788 int64_t Int;
2789 Int = StringSwitch<int64_t>(Value)
2790 .Case("UNUSED_PAD", 0)
2791 .Case("UNUSED_SEXT", 1)
2792 .Case("UNUSED_PRESERVE", 2)
2793 .Default(0xffffffff);
2794 Parser.Lex(); // eat last token
2795
2796 if (Int == 0xffffffff) {
2797 return MatchOperand_ParseFail;
2798 }
2799
2800 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2801 AMDGPUOperand::ImmTySdwaDstUnused));
2802 return MatchOperand_Success;
2803}
2804
Sam Kolton5f10a132016-05-06 11:31:17 +00002805AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
2806 return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
2807}
2808
2809AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
2810 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
2811}
2812
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002813
Tom Stellard45bb48e2015-06-13 03:28:10 +00002814/// Force static initialization.
2815extern "C" void LLVMInitializeAMDGPUAsmParser() {
2816 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2817 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2818}
2819
2820#define GET_REGISTER_MATCHER
2821#define GET_MATCHER_IMPLEMENTATION
2822#include "AMDGPUGenAsmMatcher.inc"