blob: 0e23d513a6b45c745c0df1edf3a86e5442e03570 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000018#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/StringSwitch.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000029#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000034#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetRegistry.h"
38#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039
Artem Tamazovebe71ce2016-05-06 17:48:48 +000040// FIXME ODR: Move this to some common place for AsmParser and InstPrinter
41namespace llvm {
42namespace AMDGPU {
43namespace SendMsg {
44
45// This must be in sync with llvm::AMDGPU::SendMsg::Id enum members.
46static
47const char* const IdSymbolic[] = {
48 nullptr,
49 "MSG_INTERRUPT",
50 "MSG_GS",
51 "MSG_GS_DONE",
52 nullptr,
53 nullptr,
54 nullptr,
55 nullptr,
56 nullptr,
57 nullptr,
58 nullptr,
59 nullptr,
60 nullptr,
61 nullptr,
62 nullptr,
63 "MSG_SYSMSG"
64};
65
66// These two must be in sync with llvm::AMDGPU::SendMsg::Op enum members.
67static
68const char* const OpSysSymbolic[] = {
69 nullptr,
70 "SYSMSG_OP_ECC_ERR_INTERRUPT",
71 "SYSMSG_OP_REG_RD",
72 "SYSMSG_OP_HOST_TRAP_ACK",
73 "SYSMSG_OP_TTRACE_PC"
74};
75
76static
77const char* const OpGsSymbolic[] = {
78 "GS_OP_NOP",
79 "GS_OP_CUT",
80 "GS_OP_EMIT",
81 "GS_OP_EMIT_CUT"
82};
83
84} // namespace SendMsg
85} // namespace AMDGPU
86} // namespace llvm
87
Tom Stellard45bb48e2015-06-13 03:28:10 +000088using namespace llvm;
89
90namespace {
91
92struct OptionalOperand;
93
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000094enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
95
Tom Stellard45bb48e2015-06-13 03:28:10 +000096class AMDGPUOperand : public MCParsedAsmOperand {
97 enum KindTy {
98 Token,
99 Immediate,
100 Register,
101 Expression
102 } Kind;
103
104 SMLoc StartLoc, EndLoc;
105
106public:
107 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
108
109 MCContext *Ctx;
110
Sam Kolton5f10a132016-05-06 11:31:17 +0000111 typedef std::unique_ptr<AMDGPUOperand> Ptr;
112
Tom Stellard45bb48e2015-06-13 03:28:10 +0000113 enum ImmTy {
114 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000115 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000116 ImmTyOffen,
117 ImmTyIdxen,
118 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000119 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000120 ImmTyOffset0,
121 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000122 ImmTyGLC,
123 ImmTySLC,
124 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000125 ImmTyClampSI,
126 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000127 ImmTyDppCtrl,
128 ImmTyDppRowMask,
129 ImmTyDppBankMask,
130 ImmTyDppBoundCtrl,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000131 ImmTySdwaSel,
132 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000133 ImmTyDMask,
134 ImmTyUNorm,
135 ImmTyDA,
136 ImmTyR128,
137 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +0000138 ImmTyHwreg,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000139 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000140 };
141
142 struct TokOp {
143 const char *Data;
144 unsigned Length;
145 };
146
147 struct ImmOp {
148 bool IsFPImm;
149 ImmTy Type;
150 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000151 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000152 };
153
154 struct RegOp {
155 unsigned RegNo;
156 int Modifiers;
157 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000158 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000159 bool IsForcedVOP3;
160 };
161
162 union {
163 TokOp Tok;
164 ImmOp Imm;
165 RegOp Reg;
166 const MCExpr *Expr;
167 };
168
169 void addImmOperands(MCInst &Inst, unsigned N) const {
170 Inst.addOperand(MCOperand::createImm(getImm()));
171 }
172
173 StringRef getToken() const {
174 return StringRef(Tok.Data, Tok.Length);
175 }
176
177 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000178 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000179 }
180
181 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000182 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000183 addRegOperands(Inst, N);
184 else
185 addImmOperands(Inst, N);
186 }
187
Tom Stellardd93a34f2016-02-22 19:17:56 +0000188 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
189 if (isRegKind()) {
190 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
191 addRegOperands(Inst, N);
192 } else {
193 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
194 addImmOperands(Inst, N);
195 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000196 }
197
198 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
199 if (isImm())
200 addImmOperands(Inst, N);
201 else {
202 assert(isExpr());
203 Inst.addOperand(MCOperand::createExpr(Expr));
204 }
205 }
206
Tom Stellard45bb48e2015-06-13 03:28:10 +0000207 bool isToken() const override {
208 return Kind == Token;
209 }
210
211 bool isImm() const override {
212 return Kind == Immediate;
213 }
214
Tom Stellardd93a34f2016-02-22 19:17:56 +0000215 bool isInlinableImm() const {
216 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
217 immediates are inlinable (e.g. "clamp" attribute is not) */ )
218 return false;
219 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000220 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000221 // We've had bot failures before due to weird NaN support on mips hosts.
222 const float F = BitsToFloat(Imm.Val);
223 // TODO: Add 1/(2*pi) for VI
224 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000225 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000226 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000227 }
228
Tom Stellard45bb48e2015-06-13 03:28:10 +0000229 int64_t getImm() const {
230 return Imm.Val;
231 }
232
233 enum ImmTy getImmTy() const {
234 assert(isImm());
235 return Imm.Type;
236 }
237
238 bool isRegKind() const {
239 return Kind == Register;
240 }
241
242 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000243 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000244 }
245
Tom Stellardd93a34f2016-02-22 19:17:56 +0000246 bool isRegOrImmWithInputMods() const {
247 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000248 }
249
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000250 bool isImmTy(ImmTy ImmT) const {
251 return isImm() && Imm.Type == ImmT;
252 }
253
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000254 bool isClampSI() const {
255 return isImmTy(ImmTyClampSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000256 }
257
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000258 bool isOModSI() const {
259 return isImmTy(ImmTyOModSI);
Tom Stellarda90b9522016-02-11 03:28:15 +0000260 }
261
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000262 bool isImmModifier() const {
263 return Kind == Immediate && Imm.Type != ImmTyNone;
264 }
265
266 bool isDMask() const {
267 return isImmTy(ImmTyDMask);
268 }
269
270 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
271 bool isDA() const { return isImmTy(ImmTyDA); }
272 bool isR128() const { return isImmTy(ImmTyUNorm); }
273 bool isLWE() const { return isImmTy(ImmTyLWE); }
274
Tom Stellarda90b9522016-02-11 03:28:15 +0000275 bool isMod() const {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000276 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000277 }
278
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000279 bool isOffen() const { return isImmTy(ImmTyOffen); }
280 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
281 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
282 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
283 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
284 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000285 bool isGDS() const { return isImmTy(ImmTyGDS); }
286 bool isGLC() const { return isImmTy(ImmTyGLC); }
287 bool isSLC() const { return isImmTy(ImmTySLC); }
288 bool isTFE() const { return isImmTy(ImmTyTFE); }
289
Sam Koltondfa29f72016-03-09 12:29:31 +0000290 bool isBankMask() const {
291 return isImmTy(ImmTyDppBankMask);
292 }
293
294 bool isRowMask() const {
295 return isImmTy(ImmTyDppRowMask);
296 }
297
298 bool isBoundCtrl() const {
299 return isImmTy(ImmTyDppBoundCtrl);
300 }
Sam Koltona74cd522016-03-18 15:35:51 +0000301
Sam Kolton3025e7f2016-04-26 13:33:56 +0000302 bool isSDWASel() const {
303 return isImmTy(ImmTySdwaSel);
304 }
305
306 bool isSDWADstUnused() const {
307 return isImmTy(ImmTySdwaDstUnused);
308 }
309
Tom Stellard45bb48e2015-06-13 03:28:10 +0000310 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000311 assert(isReg() || (isImm() && Imm.Modifiers == 0));
312 if (isReg())
313 Reg.Modifiers = Mods;
314 else
315 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000316 }
317
318 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000319 assert(isRegKind() || isImm());
320 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 }
322
323 unsigned getReg() const override {
324 return Reg.RegNo;
325 }
326
327 bool isRegOrImm() const {
328 return isReg() || isImm();
329 }
330
331 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000332 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000333 }
334
335 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000336 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000337 }
338
Matt Arsenault86d336e2015-09-08 21:15:00 +0000339 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000340 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000341 }
342
343 bool isSSrc32() const {
344 return isImm() || isSCSrc32();
345 }
346
347 bool isSSrc64() const {
348 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
349 // See isVSrc64().
350 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000351 }
352
Tom Stellard45bb48e2015-06-13 03:28:10 +0000353 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000354 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000355 }
356
357 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000358 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000359 }
360
361 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000362 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000363 }
364
365 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000366 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000367 // narrowed to 32 bits (in the instruction stream). That require knowledge
368 // of instruction type (unsigned/signed, floating or "untyped"/B64),
369 // see [AMD GCN3 ISA 6.3.1].
370 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
371 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000372 }
373
374 bool isMem() const override {
375 return false;
376 }
377
378 bool isExpr() const {
379 return Kind == Expression;
380 }
381
382 bool isSoppBrTarget() const {
383 return isExpr() || isImm();
384 }
385
386 SMLoc getStartLoc() const override {
387 return StartLoc;
388 }
389
390 SMLoc getEndLoc() const override {
391 return EndLoc;
392 }
393
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000394 void printImmTy(raw_ostream& OS, ImmTy Type) const {
395 switch (Type) {
396 case ImmTyNone: OS << "None"; break;
397 case ImmTyGDS: OS << "GDS"; break;
398 case ImmTyOffen: OS << "Offen"; break;
399 case ImmTyIdxen: OS << "Idxen"; break;
400 case ImmTyAddr64: OS << "Addr64"; break;
401 case ImmTyOffset: OS << "Offset"; break;
402 case ImmTyOffset0: OS << "Offset0"; break;
403 case ImmTyOffset1: OS << "Offset1"; break;
404 case ImmTyGLC: OS << "GLC"; break;
405 case ImmTySLC: OS << "SLC"; break;
406 case ImmTyTFE: OS << "TFE"; break;
407 case ImmTyClampSI: OS << "ClampSI"; break;
408 case ImmTyOModSI: OS << "OModSI"; break;
409 case ImmTyDppCtrl: OS << "DppCtrl"; break;
410 case ImmTyDppRowMask: OS << "DppRowMask"; break;
411 case ImmTyDppBankMask: OS << "DppBankMask"; break;
412 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
413 case ImmTySdwaSel: OS << "SdwaSel"; break;
414 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
415 case ImmTyDMask: OS << "DMask"; break;
416 case ImmTyUNorm: OS << "UNorm"; break;
417 case ImmTyDA: OS << "DA"; break;
418 case ImmTyR128: OS << "R128"; break;
419 case ImmTyLWE: OS << "LWE"; break;
420 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000421 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000422 }
423 }
424
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000425 void print(raw_ostream &OS) const override {
426 switch (Kind) {
427 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000428 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000429 break;
430 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000431 OS << '<' << getImm();
432 if (getImmTy() != ImmTyNone) {
433 OS << " type: "; printImmTy(OS, getImmTy());
434 }
435 OS << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000436 break;
437 case Token:
438 OS << '\'' << getToken() << '\'';
439 break;
440 case Expression:
441 OS << "<expr " << *Expr << '>';
442 break;
443 }
444 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000445
Sam Kolton5f10a132016-05-06 11:31:17 +0000446 static AMDGPUOperand::Ptr CreateImm(int64_t Val, SMLoc Loc,
447 enum ImmTy Type = ImmTyNone,
448 bool IsFPImm = false) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
450 Op->Imm.Val = Val;
451 Op->Imm.IsFPImm = IsFPImm;
452 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000453 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454 Op->StartLoc = Loc;
455 Op->EndLoc = Loc;
456 return Op;
457 }
458
Sam Kolton5f10a132016-05-06 11:31:17 +0000459 static AMDGPUOperand::Ptr CreateToken(StringRef Str, SMLoc Loc,
460 bool HasExplicitEncodingSize = true) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000461 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
462 Res->Tok.Data = Str.data();
463 Res->Tok.Length = Str.size();
464 Res->StartLoc = Loc;
465 Res->EndLoc = Loc;
466 return Res;
467 }
468
Sam Kolton5f10a132016-05-06 11:31:17 +0000469 static AMDGPUOperand::Ptr CreateReg(unsigned RegNo, SMLoc S,
470 SMLoc E,
471 const MCRegisterInfo *TRI,
472 const MCSubtargetInfo *STI,
473 bool ForceVOP3) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000474 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
475 Op->Reg.RegNo = RegNo;
476 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000477 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000478 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000479 Op->Reg.IsForcedVOP3 = ForceVOP3;
480 Op->StartLoc = S;
481 Op->EndLoc = E;
482 return Op;
483 }
484
Sam Kolton5f10a132016-05-06 11:31:17 +0000485 static AMDGPUOperand::Ptr CreateExpr(const class MCExpr *Expr, SMLoc S) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000486 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
487 Op->Expr = Expr;
488 Op->StartLoc = S;
489 Op->EndLoc = S;
490 return Op;
491 }
492
Tom Stellard45bb48e2015-06-13 03:28:10 +0000493 bool isSWaitCnt() const;
Artem Tamazovd6468662016-04-25 14:13:51 +0000494 bool isHwreg() const;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000495 bool isSendMsg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000496 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000497 bool isSMRDOffset() const;
498 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000499 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000500};
501
502class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000503 const MCInstrInfo &MII;
504 MCAsmParser &Parser;
505
506 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000507
Matt Arsenault3b159672015-12-01 20:31:08 +0000508 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000509 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000510 }
511
512 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000513 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000514 }
515
Matt Arsenault68802d32015-11-05 03:11:27 +0000516 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000517 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000518 }
519
520 bool hasSGPR102_SGPR103() const {
521 return !isVI();
522 }
523
Tom Stellard45bb48e2015-06-13 03:28:10 +0000524 /// @name Auto-generated Match Functions
525 /// {
526
527#define GET_ASSEMBLER_HEADER
528#include "AMDGPUGenAsmMatcher.inc"
529
530 /// }
531
Tom Stellard347ac792015-06-26 21:15:07 +0000532private:
533 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
534 bool ParseDirectiveHSACodeObjectVersion();
535 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000536 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
537 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000538 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000539 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000540 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000541 bool ParseDirectiveAMDGPUHsaModuleGlobal();
542 bool ParseDirectiveAMDGPUHsaProgramGlobal();
543 bool ParseSectionDirectiveHSADataGlobalAgent();
544 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000545 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000546 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
547 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Tom Stellard347ac792015-06-26 21:15:07 +0000548
Tom Stellard45bb48e2015-06-13 03:28:10 +0000549public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000550 enum AMDGPUMatchResultTy {
551 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
552 };
553
Akira Hatanakab11ef082015-11-14 06:35:56 +0000554 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000555 const MCInstrInfo &MII,
556 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000557 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000558 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000559 MCAsmParserExtension::Initialize(Parser);
560
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000561 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000562 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000563 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564 }
565
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000566 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 }
568
Tom Stellard347ac792015-06-26 21:15:07 +0000569 AMDGPUTargetStreamer &getTargetStreamer() {
570 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
571 return static_cast<AMDGPUTargetStreamer &>(TS);
572 }
573
Tom Stellard45bb48e2015-06-13 03:28:10 +0000574 unsigned getForcedEncodingSize() const {
575 return ForcedEncodingSize;
576 }
577
578 void setForcedEncodingSize(unsigned Size) {
579 ForcedEncodingSize = Size;
580 }
581
582 bool isForcedVOP3() const {
583 return ForcedEncodingSize == 64;
584 }
585
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000586 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000587 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
588 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
589 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
590 OperandVector &Operands, MCStreamer &Out,
591 uint64_t &ErrorInfo,
592 bool MatchingInlineAsm) override;
593 bool ParseDirective(AsmToken DirectiveID) override;
594 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
595 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
596 SMLoc NameLoc, OperandVector &Operands) override;
597
598 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000599 int64_t Default = 0, bool AddDefault = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000600 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
601 OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000602 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
603 int64_t Default = 0, bool AddDefault = false,
604 bool (*ConvertResult)(int64_t&) = 0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000605 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
606 enum AMDGPUOperand::ImmTy ImmTy =
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000607 AMDGPUOperand::ImmTyNone,
608 bool AddDefault = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000609 OperandMatchResultTy parseOptionalOps(
610 const ArrayRef<OptionalOperand> &OptionalOps,
611 OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000612 OperandMatchResultTy parseStringWithPrefix(const char *Prefix, StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000613
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000614 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault);
615 OperandMatchResultTy parseAMDGPUOperand(OperandVector &Operands, StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000616
617 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
618 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000619
620 bool parseCnt(int64_t &IntVal);
621 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000622 bool parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier);
623 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000624private:
625 struct OperandInfoTy {
626 int64_t Id;
627 bool IsSymbolic;
628 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
629 };
630 bool parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
631public:
632 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000633 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000634 AMDGPUOperand::Ptr defaultHwreg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000635
Tom Stellard45bb48e2015-06-13 03:28:10 +0000636
637 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000638 AMDGPUOperand::Ptr defaultMubufOffset() const;
639 AMDGPUOperand::Ptr defaultGLC() const;
640 AMDGPUOperand::Ptr defaultSLC() const;
641 AMDGPUOperand::Ptr defaultTFE() const;
642
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000643 OperandMatchResultTy parseOModSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "omod"); }
644 OperandMatchResultTy parseClampSI(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "clamp"); }
645 OperandMatchResultTy parseSMRDOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_offset"); }
646 OperandMatchResultTy parseSMRDLiteralOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "smrd_literal_offset"); }
647 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "dpp_ctrl"); }
648 OperandMatchResultTy parseRowMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "row_mask"); }
649 OperandMatchResultTy parseBankMask(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bank_mask"); }
650 OperandMatchResultTy parseBoundCtrl(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "bound_ctrl"); }
651 OperandMatchResultTy parseOffen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offen"); }
652 OperandMatchResultTy parseIdxen(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "idxen"); }
653 OperandMatchResultTy parseAddr64(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "addr64"); }
654 OperandMatchResultTy parseOffset(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset"); }
655 OperandMatchResultTy parseOffset0(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset0"); }
656 OperandMatchResultTy parseOffset1(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "offset1"); }
657 OperandMatchResultTy parseGLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "glc"); }
658 OperandMatchResultTy parseSLC(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "slc"); }
659 OperandMatchResultTy parseTFE(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "tfe"); }
660 OperandMatchResultTy parseGDS(OperandVector &Operands) { return parseAMDGPUOperand(Operands, "gds"); }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000661
662 OperandMatchResultTy parseDMask(OperandVector &Operands);
663 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000664 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000665 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000666 OperandMatchResultTy parseLWE(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000667 AMDGPUOperand::Ptr defaultDMask() const;
668 AMDGPUOperand::Ptr defaultUNorm() const;
669 AMDGPUOperand::Ptr defaultDA() const;
670 AMDGPUOperand::Ptr defaultR128() const;
671 AMDGPUOperand::Ptr defaultLWE() const;
672 AMDGPUOperand::Ptr defaultSMRDOffset() const;
673 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
674
675 AMDGPUOperand::Ptr defaultClampSI() const;
676 AMDGPUOperand::Ptr defaultOModSI() const;
677
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000678 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
679
Tom Stellarda90b9522016-02-11 03:28:15 +0000680 void cvtId(MCInst &Inst, const OperandVector &Operands);
681 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000682 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000683
684 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000685 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000686
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000687 OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands, bool AddDefault);
Sam Kolton5f10a132016-05-06 11:31:17 +0000688 AMDGPUOperand::Ptr defaultRowMask() const;
689 AMDGPUOperand::Ptr defaultBankMask() const;
690 AMDGPUOperand::Ptr defaultBoundCtrl() const;
691 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000692
693 OperandMatchResultTy parseSDWASel(OperandVector &Operands);
694 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000695 AMDGPUOperand::Ptr defaultSDWASel() const;
696 AMDGPUOperand::Ptr defaultSDWADstUnused() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697};
698
699struct OptionalOperand {
700 const char *Name;
701 AMDGPUOperand::ImmTy Type;
702 bool IsBit;
703 int64_t Default;
704 bool (*ConvertResult)(int64_t&);
705};
706
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000707}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000708
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000709static int getRegClass(RegisterKind Is, unsigned RegWidth) {
710 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000712 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000713 case 1: return AMDGPU::VGPR_32RegClassID;
714 case 2: return AMDGPU::VReg_64RegClassID;
715 case 3: return AMDGPU::VReg_96RegClassID;
716 case 4: return AMDGPU::VReg_128RegClassID;
717 case 8: return AMDGPU::VReg_256RegClassID;
718 case 16: return AMDGPU::VReg_512RegClassID;
719 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000720 } else if (Is == IS_TTMP) {
721 switch (RegWidth) {
722 default: return -1;
723 case 1: return AMDGPU::TTMP_32RegClassID;
724 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000725 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000726 }
727 } else if (Is == IS_SGPR) {
728 switch (RegWidth) {
729 default: return -1;
730 case 1: return AMDGPU::SGPR_32RegClassID;
731 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +0000732 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000733 case 8: return AMDGPU::SReg_256RegClassID;
734 case 16: return AMDGPU::SReg_512RegClassID;
735 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000736 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000737 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000738}
739
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000740static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000741 return StringSwitch<unsigned>(RegName)
742 .Case("exec", AMDGPU::EXEC)
743 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000744 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000745 .Case("m0", AMDGPU::M0)
746 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000747 .Case("tba", AMDGPU::TBA)
748 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000749 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
750 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000751 .Case("vcc_lo", AMDGPU::VCC_LO)
752 .Case("vcc_hi", AMDGPU::VCC_HI)
753 .Case("exec_lo", AMDGPU::EXEC_LO)
754 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000755 .Case("tma_lo", AMDGPU::TMA_LO)
756 .Case("tma_hi", AMDGPU::TMA_HI)
757 .Case("tba_lo", AMDGPU::TBA_LO)
758 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000759 .Default(0);
760}
761
762bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000763 auto R = parseRegister();
764 if (!R) return true;
765 assert(R->isReg());
766 RegNo = R->getReg();
767 StartLoc = R->getStartLoc();
768 EndLoc = R->getEndLoc();
769 return false;
770}
771
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000772bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
773{
774 switch (RegKind) {
775 case IS_SPECIAL:
776 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
777 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
778 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
779 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
780 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
781 return false;
782 case IS_VGPR:
783 case IS_SGPR:
784 case IS_TTMP:
785 if (Reg1 != Reg + RegWidth) { return false; }
786 RegWidth++;
787 return true;
788 default:
789 assert(false); return false;
790 }
791}
792
793bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
794{
795 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
796 if (getLexer().is(AsmToken::Identifier)) {
797 StringRef RegName = Parser.getTok().getString();
798 if ((Reg = getSpecialRegForName(RegName))) {
799 Parser.Lex();
800 RegKind = IS_SPECIAL;
801 } else {
802 unsigned RegNumIndex = 0;
803 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
804 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
805 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
806 else { return false; }
807 if (RegName.size() > RegNumIndex) {
808 // Single 32-bit register: vXX.
809 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
810 Parser.Lex();
811 RegWidth = 1;
812 } else {
813 // Range of registers: v[XX:YY].
814 Parser.Lex();
815 int64_t RegLo, RegHi;
816 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
817 Parser.Lex();
818
819 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
820
821 if (getLexer().isNot(AsmToken::Colon)) { return false; }
822 Parser.Lex();
823
824 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
825
826 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
827 Parser.Lex();
828
829 RegNum = (unsigned) RegLo;
830 RegWidth = (RegHi - RegLo) + 1;
831 }
832 }
833 } else if (getLexer().is(AsmToken::LBrac)) {
834 // List of consecutive registers: [s0,s1,s2,s3]
835 Parser.Lex();
836 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
837 if (RegWidth != 1) { return false; }
838 RegisterKind RegKind1;
839 unsigned Reg1, RegNum1, RegWidth1;
840 do {
841 if (getLexer().is(AsmToken::Comma)) {
842 Parser.Lex();
843 } else if (getLexer().is(AsmToken::RBrac)) {
844 Parser.Lex();
845 break;
846 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
847 if (RegWidth1 != 1) { return false; }
848 if (RegKind1 != RegKind) { return false; }
849 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
850 } else {
851 return false;
852 }
853 } while (true);
854 } else {
855 return false;
856 }
857 switch (RegKind) {
858 case IS_SPECIAL:
859 RegNum = 0;
860 RegWidth = 1;
861 break;
862 case IS_VGPR:
863 case IS_SGPR:
864 case IS_TTMP:
865 {
866 unsigned Size = 1;
867 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
868 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
869 Size = std::min(RegWidth, 4u);
870 }
871 if (RegNum % Size != 0) { return false; }
872 RegNum = RegNum / Size;
873 int RCID = getRegClass(RegKind, RegWidth);
874 if (RCID == -1) { return false; }
875 const MCRegisterClass RC = TRI->getRegClass(RCID);
876 if (RegNum >= RC.getNumRegs()) { return false; }
877 Reg = RC.getRegister(RegNum);
878 break;
879 }
880
881 default:
882 assert(false); return false;
883 }
884
885 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
886 return true;
887}
888
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000889std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000890 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000891 SMLoc StartLoc = Tok.getLoc();
892 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000893 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
894
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000895 RegisterKind RegKind;
896 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000897
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000898 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
899 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000900 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000901 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000902 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000903}
904
905unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
906
907 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
908
909 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
910 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
911 return Match_InvalidOperand;
912
Tom Stellard88e0b252015-10-06 15:57:53 +0000913 if ((TSFlags & SIInstrFlags::VOP3) &&
914 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
915 getForcedEncodingSize() != 64)
916 return Match_PreferE32;
917
Tom Stellard45bb48e2015-06-13 03:28:10 +0000918 return Match_Success;
919}
920
921
922bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
923 OperandVector &Operands,
924 MCStreamer &Out,
925 uint64_t &ErrorInfo,
926 bool MatchingInlineAsm) {
927 MCInst Inst;
928
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000929 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000930 default: break;
931 case Match_Success:
932 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000933 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000934 return false;
935 case Match_MissingFeature:
936 return Error(IDLoc, "instruction not supported on this GPU");
937
938 case Match_MnemonicFail:
939 return Error(IDLoc, "unrecognized instruction mnemonic");
940
941 case Match_InvalidOperand: {
942 SMLoc ErrorLoc = IDLoc;
943 if (ErrorInfo != ~0ULL) {
944 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000945 return Error(IDLoc, "too few operands for instruction");
946 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000947 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
948 if (ErrorLoc == SMLoc())
949 ErrorLoc = IDLoc;
950 }
951 return Error(ErrorLoc, "invalid operand for instruction");
952 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000953 case Match_PreferE32:
954 return Error(IDLoc, "internal error: instruction without _e64 suffix "
955 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000956 }
957 llvm_unreachable("Implement any new match types added!");
958}
959
Tom Stellard347ac792015-06-26 21:15:07 +0000960bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
961 uint32_t &Minor) {
962 if (getLexer().isNot(AsmToken::Integer))
963 return TokError("invalid major version");
964
965 Major = getLexer().getTok().getIntVal();
966 Lex();
967
968 if (getLexer().isNot(AsmToken::Comma))
969 return TokError("minor version number required, comma expected");
970 Lex();
971
972 if (getLexer().isNot(AsmToken::Integer))
973 return TokError("invalid minor version");
974
975 Minor = getLexer().getTok().getIntVal();
976 Lex();
977
978 return false;
979}
980
981bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
982
983 uint32_t Major;
984 uint32_t Minor;
985
986 if (ParseDirectiveMajorMinor(Major, Minor))
987 return true;
988
989 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
990 return false;
991}
992
993bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
994
995 uint32_t Major;
996 uint32_t Minor;
997 uint32_t Stepping;
998 StringRef VendorName;
999 StringRef ArchName;
1000
1001 // If this directive has no arguments, then use the ISA version for the
1002 // targeted GPU.
1003 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001004 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001005 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1006 Isa.Stepping,
1007 "AMD", "AMDGPU");
1008 return false;
1009 }
1010
1011
1012 if (ParseDirectiveMajorMinor(Major, Minor))
1013 return true;
1014
1015 if (getLexer().isNot(AsmToken::Comma))
1016 return TokError("stepping version number required, comma expected");
1017 Lex();
1018
1019 if (getLexer().isNot(AsmToken::Integer))
1020 return TokError("invalid stepping version");
1021
1022 Stepping = getLexer().getTok().getIntVal();
1023 Lex();
1024
1025 if (getLexer().isNot(AsmToken::Comma))
1026 return TokError("vendor name required, comma expected");
1027 Lex();
1028
1029 if (getLexer().isNot(AsmToken::String))
1030 return TokError("invalid vendor name");
1031
1032 VendorName = getLexer().getTok().getStringContents();
1033 Lex();
1034
1035 if (getLexer().isNot(AsmToken::Comma))
1036 return TokError("arch name required, comma expected");
1037 Lex();
1038
1039 if (getLexer().isNot(AsmToken::String))
1040 return TokError("invalid arch name");
1041
1042 ArchName = getLexer().getTok().getStringContents();
1043 Lex();
1044
1045 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1046 VendorName, ArchName);
1047 return false;
1048}
1049
Tom Stellardff7416b2015-06-26 21:58:31 +00001050bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1051 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001052 SmallString<40> ErrStr;
1053 raw_svector_ostream Err(ErrStr);
1054 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
1055 return TokError(Err.str());
1056 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001057 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001058 return false;
1059}
1060
1061bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
1062
1063 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001064 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001065
1066 while (true) {
1067
1068 if (getLexer().isNot(AsmToken::EndOfStatement))
1069 return TokError("amd_kernel_code_t values must begin on a new line");
1070
1071 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1072 // will set the current token to EndOfStatement.
1073 while(getLexer().is(AsmToken::EndOfStatement))
1074 Lex();
1075
1076 if (getLexer().isNot(AsmToken::Identifier))
1077 return TokError("expected value identifier or .end_amd_kernel_code_t");
1078
1079 StringRef ID = getLexer().getTok().getIdentifier();
1080 Lex();
1081
1082 if (ID == ".end_amd_kernel_code_t")
1083 break;
1084
1085 if (ParseAMDKernelCodeTValue(ID, Header))
1086 return true;
1087 }
1088
1089 getTargetStreamer().EmitAMDKernelCodeT(Header);
1090
1091 return false;
1092}
1093
Tom Stellarde135ffd2015-09-25 21:41:28 +00001094bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1095 getParser().getStreamer().SwitchSection(
1096 AMDGPU::getHSATextSection(getContext()));
1097 return false;
1098}
1099
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001100bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1101 if (getLexer().isNot(AsmToken::Identifier))
1102 return TokError("expected symbol name");
1103
1104 StringRef KernelName = Parser.getTok().getString();
1105
1106 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1107 ELF::STT_AMDGPU_HSA_KERNEL);
1108 Lex();
1109 return false;
1110}
1111
Tom Stellard00f2f912015-12-02 19:47:57 +00001112bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1113 if (getLexer().isNot(AsmToken::Identifier))
1114 return TokError("expected symbol name");
1115
1116 StringRef GlobalName = Parser.getTok().getIdentifier();
1117
1118 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1119 Lex();
1120 return false;
1121}
1122
1123bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1124 if (getLexer().isNot(AsmToken::Identifier))
1125 return TokError("expected symbol name");
1126
1127 StringRef GlobalName = Parser.getTok().getIdentifier();
1128
1129 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1130 Lex();
1131 return false;
1132}
1133
1134bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1135 getParser().getStreamer().SwitchSection(
1136 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1137 return false;
1138}
1139
1140bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1141 getParser().getStreamer().SwitchSection(
1142 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1143 return false;
1144}
1145
Tom Stellard9760f032015-12-03 03:34:32 +00001146bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1147 getParser().getStreamer().SwitchSection(
1148 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1149 return false;
1150}
1151
Tom Stellard45bb48e2015-06-13 03:28:10 +00001152bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001153 StringRef IDVal = DirectiveID.getString();
1154
1155 if (IDVal == ".hsa_code_object_version")
1156 return ParseDirectiveHSACodeObjectVersion();
1157
1158 if (IDVal == ".hsa_code_object_isa")
1159 return ParseDirectiveHSACodeObjectISA();
1160
Tom Stellardff7416b2015-06-26 21:58:31 +00001161 if (IDVal == ".amd_kernel_code_t")
1162 return ParseDirectiveAMDKernelCodeT();
1163
Tom Stellardfcfaea42016-05-05 17:03:33 +00001164 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001165 return ParseSectionDirectiveHSAText();
1166
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001167 if (IDVal == ".amdgpu_hsa_kernel")
1168 return ParseDirectiveAMDGPUHsaKernel();
1169
Tom Stellard00f2f912015-12-02 19:47:57 +00001170 if (IDVal == ".amdgpu_hsa_module_global")
1171 return ParseDirectiveAMDGPUHsaModuleGlobal();
1172
1173 if (IDVal == ".amdgpu_hsa_program_global")
1174 return ParseDirectiveAMDGPUHsaProgramGlobal();
1175
1176 if (IDVal == ".hsadata_global_agent")
1177 return ParseSectionDirectiveHSADataGlobalAgent();
1178
1179 if (IDVal == ".hsadata_global_program")
1180 return ParseSectionDirectiveHSADataGlobalProgram();
1181
Tom Stellard9760f032015-12-03 03:34:32 +00001182 if (IDVal == ".hsarodata_readonly_agent")
1183 return ParseSectionDirectiveHSARodataReadonlyAgent();
1184
Tom Stellard45bb48e2015-06-13 03:28:10 +00001185 return true;
1186}
1187
Matt Arsenault68802d32015-11-05 03:11:27 +00001188bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1189 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001190 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001191 return true;
1192
Matt Arsenault3b159672015-12-01 20:31:08 +00001193 if (isSI()) {
1194 // No flat_scr
1195 switch (RegNo) {
1196 case AMDGPU::FLAT_SCR:
1197 case AMDGPU::FLAT_SCR_LO:
1198 case AMDGPU::FLAT_SCR_HI:
1199 return false;
1200 default:
1201 return true;
1202 }
1203 }
1204
Matt Arsenault68802d32015-11-05 03:11:27 +00001205 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1206 // SI/CI have.
1207 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1208 R.isValid(); ++R) {
1209 if (*R == RegNo)
1210 return false;
1211 }
1212
1213 return true;
1214}
1215
Tom Stellard45bb48e2015-06-13 03:28:10 +00001216AMDGPUAsmParser::OperandMatchResultTy
1217AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1218
1219 // Try to parse with a custom parser
1220 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1221
1222 // If we successfully parsed the operand or if there as an error parsing,
1223 // we are done.
1224 //
1225 // If we are parsing after we reach EndOfStatement then this means we
1226 // are appending default values to the Operands list. This is only done
1227 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001228 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001229 getLexer().is(AsmToken::EndOfStatement))
1230 return ResTy;
1231
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001232 bool Negate = false, Abs = false, Abs2 = false;
1233
Tom Stellard45bb48e2015-06-13 03:28:10 +00001234 if (getLexer().getKind()== AsmToken::Minus) {
1235 Parser.Lex();
1236 Negate = true;
1237 }
1238
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001239 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1240 Parser.Lex();
1241 Abs2 = true;
1242 if (getLexer().isNot(AsmToken::LParen)) {
1243 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1244 return MatchOperand_ParseFail;
1245 }
1246 Parser.Lex();
1247 }
1248
Tom Stellard45bb48e2015-06-13 03:28:10 +00001249 if (getLexer().getKind() == AsmToken::Pipe) {
1250 Parser.Lex();
1251 Abs = true;
1252 }
1253
1254 switch(getLexer().getKind()) {
1255 case AsmToken::Integer: {
1256 SMLoc S = Parser.getTok().getLoc();
1257 int64_t IntVal;
1258 if (getParser().parseAbsoluteExpression(IntVal))
1259 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001260 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001261 Error(S, "invalid immediate: only 32-bit values are legal");
1262 return MatchOperand_ParseFail;
1263 }
1264
Tom Stellard45bb48e2015-06-13 03:28:10 +00001265 if (Negate)
1266 IntVal *= -1;
1267 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1268 return MatchOperand_Success;
1269 }
1270 case AsmToken::Real: {
1271 // FIXME: We should emit an error if a double precisions floating-point
1272 // value is used. I'm not sure the best way to detect this.
1273 SMLoc S = Parser.getTok().getLoc();
1274 int64_t IntVal;
1275 if (getParser().parseAbsoluteExpression(IntVal))
1276 return MatchOperand_ParseFail;
1277
1278 APFloat F((float)BitsToDouble(IntVal));
1279 if (Negate)
1280 F.changeSign();
1281 Operands.push_back(
1282 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1283 return MatchOperand_Success;
1284 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001285 case AsmToken::LBrac:
Tom Stellard45bb48e2015-06-13 03:28:10 +00001286 case AsmToken::Identifier: {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001287 if (auto R = parseRegister()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001288 unsigned Modifiers = 0;
1289
1290 if (Negate)
1291 Modifiers |= 0x1;
1292
1293 if (Abs) {
1294 if (getLexer().getKind() != AsmToken::Pipe)
1295 return MatchOperand_ParseFail;
1296 Parser.Lex();
1297 Modifiers |= 0x2;
1298 }
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001299 if (Abs2) {
1300 if (getLexer().isNot(AsmToken::RParen)) {
1301 return MatchOperand_ParseFail;
1302 }
1303 Parser.Lex();
1304 Modifiers |= 0x2;
1305 }
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001306 assert(R->isReg());
1307 R->Reg.IsForcedVOP3 = isForcedVOP3();
Tom Stellarda90b9522016-02-11 03:28:15 +00001308 if (Modifiers) {
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001309 R->setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001310 }
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001311 Operands.push_back(std::move(R));
Tom Stellarda90b9522016-02-11 03:28:15 +00001312 } else {
Tom Stellarda90b9522016-02-11 03:28:15 +00001313 if (ResTy == MatchOperand_NoMatch) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001314 const auto &Tok = Parser.getTok();
1315 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(),
1316 Tok.getLoc()));
Tom Stellarda90b9522016-02-11 03:28:15 +00001317 Parser.Lex();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001318 if (getLexer().is(AsmToken::Colon)) {
1319 Parser.Lex();
1320 if (getLexer().is(AsmToken::Identifier)) {
1321 Parser.Lex();
1322 }
1323 }
1324 } else {
1325 return ResTy;
Tom Stellarda90b9522016-02-11 03:28:15 +00001326 }
1327 }
1328 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001329 }
1330 default:
1331 return MatchOperand_NoMatch;
1332 }
1333}
1334
1335bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1336 StringRef Name,
1337 SMLoc NameLoc, OperandVector &Operands) {
1338
1339 // Clear any forced encodings from the previous instruction.
1340 setForcedEncodingSize(0);
1341
1342 if (Name.endswith("_e64"))
1343 setForcedEncodingSize(64);
1344 else if (Name.endswith("_e32"))
1345 setForcedEncodingSize(32);
1346
1347 // Add the instruction mnemonic
1348 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1349
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001350
1351 if (Name.endswith("_e64")) { Name = Name.substr(0, Name.size() - 4); }
1352 if (Name.endswith("_e32")) { Name = Name.substr(0, Name.size() - 4); }
1353
Tom Stellard45bb48e2015-06-13 03:28:10 +00001354 while (!getLexer().is(AsmToken::EndOfStatement)) {
1355 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1356
1357 // Eat the comma or space if there is one.
1358 if (getLexer().is(AsmToken::Comma))
1359 Parser.Lex();
1360
1361 switch (Res) {
1362 case MatchOperand_Success: break;
1363 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1364 "failed parsing operand.");
1365 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1366 "not a valid operand.");
1367 }
1368 }
1369
Tom Stellard45bb48e2015-06-13 03:28:10 +00001370 return false;
1371}
1372
1373//===----------------------------------------------------------------------===//
1374// Utility functions
1375//===----------------------------------------------------------------------===//
1376
1377AMDGPUAsmParser::OperandMatchResultTy
1378AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001379 int64_t Default, bool AddDefault) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001380 // We are at the end of the statement, and this is a default argument, so
1381 // use a default value.
1382 if (getLexer().is(AsmToken::EndOfStatement)) {
1383 Int = Default;
1384 return MatchOperand_Success;
1385 }
1386
1387 switch(getLexer().getKind()) {
1388 default: return MatchOperand_NoMatch;
1389 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001390 StringRef Name = Parser.getTok().getString();
1391 if (!Name.equals(Prefix)) {
1392 if (AddDefault) {
1393 Int = Default;
1394 return MatchOperand_Success;
1395 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001396 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001397 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001398
1399 Parser.Lex();
1400 if (getLexer().isNot(AsmToken::Colon))
1401 return MatchOperand_ParseFail;
1402
1403 Parser.Lex();
1404 if (getLexer().isNot(AsmToken::Integer))
1405 return MatchOperand_ParseFail;
1406
1407 if (getParser().parseAbsoluteExpression(Int))
1408 return MatchOperand_ParseFail;
1409 break;
1410 }
1411 }
1412 return MatchOperand_Success;
1413}
1414
1415AMDGPUAsmParser::OperandMatchResultTy
1416AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001417 enum AMDGPUOperand::ImmTy ImmTy,
1418 int64_t Default, bool AddDefault,
1419 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001420
1421 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001422 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001423
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001424 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value, Default, AddDefault);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001425 if (Res != MatchOperand_Success)
1426 return Res;
1427
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001428 if (ConvertResult && !ConvertResult(Value)) {
1429 return MatchOperand_ParseFail;
1430 }
1431
1432 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001433 return MatchOperand_Success;
1434}
1435
1436AMDGPUAsmParser::OperandMatchResultTy
1437AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001438 enum AMDGPUOperand::ImmTy ImmTy,
1439 bool AddDefault) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001440 int64_t Bit = 0;
1441 SMLoc S = Parser.getTok().getLoc();
1442
1443 // We are at the end of the statement, and this is a default argument, so
1444 // use a default value.
1445 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1446 switch(getLexer().getKind()) {
1447 case AsmToken::Identifier: {
1448 StringRef Tok = Parser.getTok().getString();
1449 if (Tok == Name) {
1450 Bit = 1;
1451 Parser.Lex();
1452 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1453 Bit = 0;
1454 Parser.Lex();
1455 } else {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001456 if (AddDefault) {
1457 Bit = 0;
1458 } else {
1459 return MatchOperand_NoMatch;
1460 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001461 }
1462 break;
1463 }
1464 default:
1465 return MatchOperand_NoMatch;
1466 }
1467 }
1468
1469 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1470 return MatchOperand_Success;
1471}
1472
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001473typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1474
Sam Koltona74cd522016-03-18 15:35:51 +00001475void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1476 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001477 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001478 auto i = OptionalIdx.find(ImmT);
1479 if (i != OptionalIdx.end()) {
1480 unsigned Idx = i->second;
1481 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1482 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001483 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001484 }
1485}
1486
Tom Stellard45bb48e2015-06-13 03:28:10 +00001487static bool operandsHasOptionalOp(const OperandVector &Operands,
1488 const OptionalOperand &OOp) {
1489 for (unsigned i = 0; i < Operands.size(); i++) {
1490 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1491 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1492 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1493 return true;
1494
1495 }
1496 return false;
1497}
1498
1499AMDGPUAsmParser::OperandMatchResultTy
1500AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1501 OperandVector &Operands) {
1502 SMLoc S = Parser.getTok().getLoc();
1503 for (const OptionalOperand &Op : OptionalOps) {
1504 if (operandsHasOptionalOp(Operands, Op))
1505 continue;
1506 AMDGPUAsmParser::OperandMatchResultTy Res;
1507 int64_t Value;
1508 if (Op.IsBit) {
1509 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1510 if (Res == MatchOperand_NoMatch)
1511 continue;
1512 return Res;
1513 }
1514
1515 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1516
1517 if (Res == MatchOperand_NoMatch)
1518 continue;
1519
1520 if (Res != MatchOperand_Success)
1521 return Res;
1522
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001523 bool DefaultValue = (Value == Op.Default);
1524
Tom Stellard45bb48e2015-06-13 03:28:10 +00001525 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1526 return MatchOperand_ParseFail;
1527 }
1528
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001529 if (!DefaultValue) {
1530 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1531 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001532 return MatchOperand_Success;
1533 }
1534 return MatchOperand_NoMatch;
1535}
1536
Sam Kolton3025e7f2016-04-26 13:33:56 +00001537AMDGPUAsmParser::OperandMatchResultTy
1538AMDGPUAsmParser::parseStringWithPrefix(const char *Prefix, StringRef &Value) {
1539 if (getLexer().isNot(AsmToken::Identifier)) {
1540 return MatchOperand_NoMatch;
1541 }
1542 StringRef Tok = Parser.getTok().getString();
1543 if (Tok != Prefix) {
1544 return MatchOperand_NoMatch;
1545 }
1546
1547 Parser.Lex();
1548 if (getLexer().isNot(AsmToken::Colon)) {
1549 return MatchOperand_ParseFail;
1550 }
1551
1552 Parser.Lex();
1553 if (getLexer().isNot(AsmToken::Identifier)) {
1554 return MatchOperand_ParseFail;
1555 }
1556
1557 Value = Parser.getTok().getString();
1558 return MatchOperand_Success;
1559}
1560
Tom Stellard45bb48e2015-06-13 03:28:10 +00001561//===----------------------------------------------------------------------===//
1562// ds
1563//===----------------------------------------------------------------------===//
1564
Tom Stellard45bb48e2015-06-13 03:28:10 +00001565void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1566 const OperandVector &Operands) {
1567
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001568 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001569
1570 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1571 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1572
1573 // Add the register arguments
1574 if (Op.isReg()) {
1575 Op.addRegOperands(Inst, 1);
1576 continue;
1577 }
1578
1579 // Handle optional arguments
1580 OptionalIdx[Op.getImmTy()] = i;
1581 }
1582
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001583 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
1584 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001585 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001586
Tom Stellard45bb48e2015-06-13 03:28:10 +00001587 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1588}
1589
1590void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1591
1592 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1593 bool GDSOnly = false;
1594
1595 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1596 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1597
1598 // Add the register arguments
1599 if (Op.isReg()) {
1600 Op.addRegOperands(Inst, 1);
1601 continue;
1602 }
1603
1604 if (Op.isToken() && Op.getToken() == "gds") {
1605 GDSOnly = true;
1606 continue;
1607 }
1608
1609 // Handle optional arguments
1610 OptionalIdx[Op.getImmTy()] = i;
1611 }
1612
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001613 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1614 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001615
1616 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001617 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001618 }
1619 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1620}
1621
1622
1623//===----------------------------------------------------------------------===//
1624// s_waitcnt
1625//===----------------------------------------------------------------------===//
1626
1627bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1628 StringRef CntName = Parser.getTok().getString();
1629 int64_t CntVal;
1630
1631 Parser.Lex();
1632 if (getLexer().isNot(AsmToken::LParen))
1633 return true;
1634
1635 Parser.Lex();
1636 if (getLexer().isNot(AsmToken::Integer))
1637 return true;
1638
1639 if (getParser().parseAbsoluteExpression(CntVal))
1640 return true;
1641
1642 if (getLexer().isNot(AsmToken::RParen))
1643 return true;
1644
1645 Parser.Lex();
1646 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1647 Parser.Lex();
1648
1649 int CntShift;
1650 int CntMask;
1651
1652 if (CntName == "vmcnt") {
1653 CntMask = 0xf;
1654 CntShift = 0;
1655 } else if (CntName == "expcnt") {
1656 CntMask = 0x7;
1657 CntShift = 4;
1658 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001659 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001660 CntShift = 8;
1661 } else {
1662 return true;
1663 }
1664
1665 IntVal &= ~(CntMask << CntShift);
1666 IntVal |= (CntVal << CntShift);
1667 return false;
1668}
1669
1670AMDGPUAsmParser::OperandMatchResultTy
1671AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1672 // Disable all counters by default.
1673 // vmcnt [3:0]
1674 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001675 // lgkmcnt [11:8]
1676 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001677 SMLoc S = Parser.getTok().getLoc();
1678
1679 switch(getLexer().getKind()) {
1680 default: return MatchOperand_ParseFail;
1681 case AsmToken::Integer:
1682 // The operand can be an integer value.
1683 if (getParser().parseAbsoluteExpression(CntVal))
1684 return MatchOperand_ParseFail;
1685 break;
1686
1687 case AsmToken::Identifier:
1688 do {
1689 if (parseCnt(CntVal))
1690 return MatchOperand_ParseFail;
1691 } while(getLexer().isNot(AsmToken::EndOfStatement));
1692 break;
1693 }
1694 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1695 return MatchOperand_Success;
1696}
1697
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001698bool AMDGPUAsmParser::parseHwregOperand(int64_t &HwRegCode, int64_t &Offset, int64_t &Width, bool &IsIdentifier) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001699 if (Parser.getTok().getString() != "hwreg")
1700 return true;
1701 Parser.Lex();
1702
1703 if (getLexer().isNot(AsmToken::LParen))
1704 return true;
1705 Parser.Lex();
1706
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001707 if (getLexer().is(AsmToken::Identifier)) {
1708 IsIdentifier = true;
1709 HwRegCode = StringSwitch<unsigned>(Parser.getTok().getString())
1710 .Case("HW_REG_MODE" , 1)
1711 .Case("HW_REG_STATUS" , 2)
1712 .Case("HW_REG_TRAPSTS" , 3)
1713 .Case("HW_REG_HW_ID" , 4)
1714 .Case("HW_REG_GPR_ALLOC", 5)
1715 .Case("HW_REG_LDS_ALLOC", 6)
1716 .Case("HW_REG_IB_STS" , 7)
1717 .Default(-1);
1718 Parser.Lex();
1719 } else {
1720 IsIdentifier = false;
1721 if (getLexer().isNot(AsmToken::Integer))
1722 return true;
1723 if (getParser().parseAbsoluteExpression(HwRegCode))
1724 return true;
1725 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001726
1727 if (getLexer().is(AsmToken::RParen)) {
1728 Parser.Lex();
1729 return false;
1730 }
1731
1732 // optional params
1733 if (getLexer().isNot(AsmToken::Comma))
1734 return true;
1735 Parser.Lex();
1736
1737 if (getLexer().isNot(AsmToken::Integer))
1738 return true;
1739 if (getParser().parseAbsoluteExpression(Offset))
1740 return true;
1741
1742 if (getLexer().isNot(AsmToken::Comma))
1743 return true;
1744 Parser.Lex();
1745
1746 if (getLexer().isNot(AsmToken::Integer))
1747 return true;
1748 if (getParser().parseAbsoluteExpression(Width))
1749 return true;
1750
1751 if (getLexer().isNot(AsmToken::RParen))
1752 return true;
1753 Parser.Lex();
1754
1755 return false;
1756}
1757
1758AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001759AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazovd6468662016-04-25 14:13:51 +00001760 int64_t Imm16Val = 0;
1761 SMLoc S = Parser.getTok().getLoc();
1762
1763 switch(getLexer().getKind()) {
1764 default: return MatchOperand_ParseFail;
1765 case AsmToken::Integer:
1766 // The operand can be an integer value.
1767 if (getParser().parseAbsoluteExpression(Imm16Val))
1768 return MatchOperand_ParseFail;
1769 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1770 Error(S, "invalid immediate: only 16-bit values are legal");
1771 // Do not return error code, but create an imm operand anyway and proceed
1772 // to the next operand, if any. That avoids unneccessary error messages.
1773 }
1774 break;
1775
1776 case AsmToken::Identifier: {
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001777 bool IsIdentifier = false;
1778 int64_t HwRegCode = -1;
Artem Tamazovd6468662016-04-25 14:13:51 +00001779 int64_t Offset = 0; // default
1780 int64_t Width = 32; // default
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001781 if (parseHwregOperand(HwRegCode, Offset, Width, IsIdentifier))
Artem Tamazovd6468662016-04-25 14:13:51 +00001782 return MatchOperand_ParseFail;
1783 // HwRegCode (6) [5:0]
1784 // Offset (5) [10:6]
1785 // WidthMinusOne (5) [15:11]
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001786 if (HwRegCode < 0 || HwRegCode > 63) {
Artem Tamazov5cd55b12016-04-27 15:17:03 +00001787 if (IsIdentifier)
1788 Error(S, "invalid symbolic name of hardware register");
1789 else
1790 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00001791 }
Artem Tamazovd6468662016-04-25 14:13:51 +00001792 if (Offset < 0 || Offset > 31)
1793 Error(S, "invalid bit offset: only 5-bit values are legal");
1794 if (Width < 1 || Width > 32)
1795 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1796 Imm16Val = HwRegCode | (Offset << 6) | ((Width-1) << 11);
1797 }
1798 break;
1799 }
1800 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1801 return MatchOperand_Success;
1802}
1803
Tom Stellard45bb48e2015-06-13 03:28:10 +00001804bool AMDGPUOperand::isSWaitCnt() const {
1805 return isImm();
1806}
1807
Artem Tamazovd6468662016-04-25 14:13:51 +00001808bool AMDGPUOperand::isHwreg() const {
1809 return isImmTy(ImmTyHwreg);
1810}
1811
Sam Kolton5f10a132016-05-06 11:31:17 +00001812AMDGPUOperand::Ptr AMDGPUAsmParser::defaultHwreg() const {
1813 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyHwreg);
1814}
1815
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001816bool AMDGPUAsmParser::parseSendMsg(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
1817 using namespace llvm::AMDGPU::SendMsg;
1818
1819 if (Parser.getTok().getString() != "sendmsg")
1820 return true;
1821 Parser.Lex();
1822
1823 if (getLexer().isNot(AsmToken::LParen))
1824 return true;
1825 Parser.Lex();
1826
1827 if (getLexer().is(AsmToken::Identifier)) {
1828 Msg.IsSymbolic = true;
1829 Msg.Id = ID_UNKNOWN_;
1830 const std::string tok = Parser.getTok().getString();
1831 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
1832 switch(i) {
1833 default: continue; // Omit gaps.
1834 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
1835 }
1836 if (tok == IdSymbolic[i]) {
1837 Msg.Id = i;
1838 break;
1839 }
1840 }
1841 Parser.Lex();
1842 } else {
1843 Msg.IsSymbolic = false;
1844 if (getLexer().isNot(AsmToken::Integer))
1845 return true;
1846 if (getParser().parseAbsoluteExpression(Msg.Id))
1847 return true;
1848 if (getLexer().is(AsmToken::Integer))
1849 if (getParser().parseAbsoluteExpression(Msg.Id))
1850 Msg.Id = ID_UNKNOWN_;
1851 }
1852 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
1853 return false;
1854
1855 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
1856 if (getLexer().isNot(AsmToken::RParen))
1857 return true;
1858 Parser.Lex();
1859 return false;
1860 }
1861
1862 if (getLexer().isNot(AsmToken::Comma))
1863 return true;
1864 Parser.Lex();
1865
1866 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
1867 Operation.Id = ID_UNKNOWN_;
1868 if (getLexer().is(AsmToken::Identifier)) {
1869 Operation.IsSymbolic = true;
1870 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
1871 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
1872 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
1873 const std::string Tok = Parser.getTok().getString();
1874 for (int i = F; i < L; ++i) {
1875 if (Tok == S[i]) {
1876 Operation.Id = i;
1877 break;
1878 }
1879 }
1880 Parser.Lex();
1881 } else {
1882 Operation.IsSymbolic = false;
1883 if (getLexer().isNot(AsmToken::Integer))
1884 return true;
1885 if (getParser().parseAbsoluteExpression(Operation.Id))
1886 return true;
1887 }
1888
1889 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1890 // Stream id is optional.
1891 if (getLexer().is(AsmToken::RParen)) {
1892 Parser.Lex();
1893 return false;
1894 }
1895
1896 if (getLexer().isNot(AsmToken::Comma))
1897 return true;
1898 Parser.Lex();
1899
1900 if (getLexer().isNot(AsmToken::Integer))
1901 return true;
1902 if (getParser().parseAbsoluteExpression(StreamId))
1903 return true;
1904 }
1905
1906 if (getLexer().isNot(AsmToken::RParen))
1907 return true;
1908 Parser.Lex();
1909 return false;
1910}
1911
1912AMDGPUAsmParser::OperandMatchResultTy
1913AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
1914 using namespace llvm::AMDGPU::SendMsg;
1915
1916 int64_t Imm16Val = 0;
1917 SMLoc S = Parser.getTok().getLoc();
1918
1919 switch(getLexer().getKind()) {
1920 default:
1921 return MatchOperand_NoMatch;
1922 case AsmToken::Integer:
1923 // The operand can be an integer value.
1924 if (getParser().parseAbsoluteExpression(Imm16Val))
1925 return MatchOperand_NoMatch;
1926 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1927 Error(S, "invalid immediate: only 16-bit values are legal");
1928 // Do not return error code, but create an imm operand anyway and proceed
1929 // to the next operand, if any. That avoids unneccessary error messages.
1930 }
1931 break;
1932 case AsmToken::Identifier: {
1933 OperandInfoTy Msg(ID_UNKNOWN_);
1934 OperandInfoTy Operation(OP_UNKNOWN_);
1935 int64_t StreamId = STREAM_ID_DEFAULT;
1936 if (parseSendMsg(Msg, Operation, StreamId))
1937 return MatchOperand_NoMatch;
1938 do {
1939 // Validate and encode message ID.
1940 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
1941 || Msg.Id == ID_SYSMSG)) {
1942 if (Msg.IsSymbolic)
1943 Error(S, "invalid/unsupported symbolic name of message");
1944 else
1945 Error(S, "invalid/unsupported code of message");
1946 break;
1947 }
1948 Imm16Val = Msg.Id;
1949 // Validate and encode operation ID.
1950 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
1951 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
1952 if (Operation.IsSymbolic)
1953 Error(S, "invalid symbolic name of GS_OP");
1954 else
1955 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
1956 break;
1957 }
1958 if (Operation.Id == OP_GS_NOP
1959 && Msg.Id != ID_GS_DONE) {
1960 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
1961 break;
1962 }
1963 Imm16Val |= (Operation.Id << OP_SHIFT_);
1964 }
1965 if (Msg.Id == ID_SYSMSG) {
1966 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
1967 if (Operation.IsSymbolic)
1968 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
1969 else
1970 Error(S, "invalid/unsupported code of SYSMSG_OP");
1971 break;
1972 }
1973 Imm16Val |= (Operation.Id << OP_SHIFT_);
1974 }
1975 // Validate and encode stream ID.
1976 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
1977 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
1978 Error(S, "invalid stream id: only 2-bit values are legal");
1979 break;
1980 }
1981 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
1982 }
1983 } while (0);
1984 }
1985 break;
1986 }
1987 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
1988 return MatchOperand_Success;
1989}
1990
1991bool AMDGPUOperand::isSendMsg() const {
1992 return isImmTy(ImmTySendMsg);
1993}
1994
Tom Stellard45bb48e2015-06-13 03:28:10 +00001995//===----------------------------------------------------------------------===//
1996// sopp branch targets
1997//===----------------------------------------------------------------------===//
1998
1999AMDGPUAsmParser::OperandMatchResultTy
2000AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2001 SMLoc S = Parser.getTok().getLoc();
2002
2003 switch (getLexer().getKind()) {
2004 default: return MatchOperand_ParseFail;
2005 case AsmToken::Integer: {
2006 int64_t Imm;
2007 if (getParser().parseAbsoluteExpression(Imm))
2008 return MatchOperand_ParseFail;
2009 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
2010 return MatchOperand_Success;
2011 }
2012
2013 case AsmToken::Identifier:
2014 Operands.push_back(AMDGPUOperand::CreateExpr(
2015 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2016 Parser.getTok().getString()), getContext()), S));
2017 Parser.Lex();
2018 return MatchOperand_Success;
2019 }
2020}
2021
2022//===----------------------------------------------------------------------===//
2023// flat
2024//===----------------------------------------------------------------------===//
2025
Tom Stellard45bb48e2015-06-13 03:28:10 +00002026//===----------------------------------------------------------------------===//
2027// mubuf
2028//===----------------------------------------------------------------------===//
2029
Tom Stellard45bb48e2015-06-13 03:28:10 +00002030bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002031 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00002032}
2033
Sam Kolton5f10a132016-05-06 11:31:17 +00002034AMDGPUOperand::Ptr AMDGPUAsmParser::defaultMubufOffset() const {
2035 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2036}
2037
2038AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
2039 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyGLC);
2040}
2041
2042AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
2043 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySLC);
2044}
2045
2046AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
2047 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyTFE);
2048}
2049
Tom Stellard45bb48e2015-06-13 03:28:10 +00002050void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
2051 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002052 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002053
2054 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2055 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2056
2057 // Add the register arguments
2058 if (Op.isReg()) {
2059 Op.addRegOperands(Inst, 1);
2060 continue;
2061 }
2062
2063 // Handle the case where soffset is an immediate
2064 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2065 Op.addImmOperands(Inst, 1);
2066 continue;
2067 }
2068
2069 // Handle tokens like 'offen' which are sometimes hard-coded into the
2070 // asm string. There are no MCInst operands for these.
2071 if (Op.isToken()) {
2072 continue;
2073 }
2074 assert(Op.isImm());
2075
2076 // Handle optional arguments
2077 OptionalIdx[Op.getImmTy()] = i;
2078 }
2079
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002080 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
2081 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2082 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2083 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002084}
2085
2086//===----------------------------------------------------------------------===//
2087// mimg
2088//===----------------------------------------------------------------------===//
2089
2090AMDGPUAsmParser::OperandMatchResultTy
2091AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002092 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002093}
2094
2095AMDGPUAsmParser::OperandMatchResultTy
2096AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002097 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
2098}
2099
2100AMDGPUAsmParser::OperandMatchResultTy
2101AMDGPUAsmParser::parseDA(OperandVector &Operands) {
2102 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002103}
2104
2105AMDGPUAsmParser::OperandMatchResultTy
2106AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002107 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
2108}
2109
2110AMDGPUAsmParser::OperandMatchResultTy
2111AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
2112 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002113}
2114
Sam Kolton5f10a132016-05-06 11:31:17 +00002115AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
2116 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDMask);
2117}
2118
2119AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
2120 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
2121}
2122
2123AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
2124 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDA);
2125}
2126
2127AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
2128 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyR128);
2129}
2130
2131AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
2132 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyLWE);
2133}
2134
Tom Stellard45bb48e2015-06-13 03:28:10 +00002135//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002136// smrd
2137//===----------------------------------------------------------------------===//
2138
2139bool AMDGPUOperand::isSMRDOffset() const {
2140
2141 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
2142 // information here.
2143 return isImm() && isUInt<8>(getImm());
2144}
2145
2146bool AMDGPUOperand::isSMRDLiteralOffset() const {
2147 // 32-bit literals are only supported on CI and we only want to use them
2148 // when the offset is > 8-bits.
2149 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2150}
2151
Sam Kolton5f10a132016-05-06 11:31:17 +00002152AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset() const {
2153 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2154}
2155
2156AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
2157 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2158}
2159
Tom Stellard217361c2015-08-06 19:28:38 +00002160//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002161// vop3
2162//===----------------------------------------------------------------------===//
2163
2164static bool ConvertOmodMul(int64_t &Mul) {
2165 if (Mul != 1 && Mul != 2 && Mul != 4)
2166 return false;
2167
2168 Mul >>= 1;
2169 return true;
2170}
2171
2172static bool ConvertOmodDiv(int64_t &Div) {
2173 if (Div == 1) {
2174 Div = 0;
2175 return true;
2176 }
2177
2178 if (Div == 2) {
2179 Div = 3;
2180 return true;
2181 }
2182
2183 return false;
2184}
2185
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002186static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2187 if (BoundCtrl == 0) {
2188 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002189 return true;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002190 } else if (BoundCtrl == -1) {
2191 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002192 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002193 }
2194 return false;
2195}
2196
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002197// Note: the order in this table matches the order of operands in AsmString.
2198static const OptionalOperand AMDGPUOperandTable[] = {
2199 {"offen", AMDGPUOperand::ImmTyOffen, true, 0, nullptr},
2200 {"offset0", AMDGPUOperand::ImmTyOffset0, false, 0, nullptr},
2201 {"offset1", AMDGPUOperand::ImmTyOffset1, false, 0, nullptr},
2202 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr},
2203 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
2204 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
2205 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
2206 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr},
2207 {"clamp", AMDGPUOperand::ImmTyClampSI, true, 0, nullptr},
2208 {"omod", AMDGPUOperand::ImmTyOModSI, false, 1, ConvertOmodMul},
2209 {"unorm", AMDGPUOperand::ImmTyUNorm, true, 0, nullptr},
2210 {"da", AMDGPUOperand::ImmTyDA, true, 0, nullptr},
2211 {"r128", AMDGPUOperand::ImmTyR128, true, 0, nullptr},
2212 {"lwe", AMDGPUOperand::ImmTyLWE, true, 0, nullptr},
2213 {"dmask", AMDGPUOperand::ImmTyDMask, false, 0, nullptr},
2214 {"dpp_ctrl", AMDGPUOperand::ImmTyDppCtrl, false, -1, nullptr},
2215 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2216 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2217 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, ConvertBoundCtrl},
2218};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002219
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002220AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands, const OptionalOperand& Op, bool AddDefault)
2221{
2222 if (Op.IsBit) {
2223 return parseNamedBit(Op.Name, Operands, Op.Type, AddDefault);
2224 } else if (Op.Type == AMDGPUOperand::ImmTyDppCtrl) {
2225 return parseDPPCtrlOps(Operands, AddDefault);
2226 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2227 return parseOModOperand(Operands);
2228 } else {
2229 return parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.Default, AddDefault, Op.ConvertResult);
2230 }
2231}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002232
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002233AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseAMDGPUOperand(OperandVector &Operands, StringRef Name)
2234{
2235 StringRef Tok;
2236 if (getLexer().is(AsmToken::Identifier)) {
2237 Tok = Parser.getTok().getString();
2238 }
2239 bool optional = false;
2240 if (Tok == "mul" || Tok == "div") { optional = true; }
2241 for (const OptionalOperand &Op1 : AMDGPUOperandTable) {
2242 if (Op1.Name == Tok) { optional = true; break; }
2243 }
2244 // Attemp to parse current optional operand.
2245 for (const OptionalOperand &Op : AMDGPUOperandTable) {
2246 // TODO: For now, omod is handled separately because
2247 // token name does not match name in table.
2248 bool parseThis =
2249 Name == "" ||
2250 (Op.Name == Name) ||
2251 (Name == "omod" && Op.Type == AMDGPUOperand::ImmTyOModSI);
2252 if (parseThis && Tok == Name) {
2253 // Exactly the expected token for optional operand.
2254 // Parse it and add operand normally.
2255 return parseOptionalOperand(Operands, Op, true);
2256 } else if (parseThis) {
2257 // Token for optional operand which is later in the table
2258 // than the one we expect. If needed, add default value
2259 // for the operand we expect, do not consume anything
2260 // and return MatchOperand_NoMatch. Parsing will continue.
2261 return parseOptionalOperand(Operands, Op, optional);
2262 } else if (Op.Name == Tok) {
2263 // This looks like optional operand, but we do not expect it.
2264 // This is the case when AsmString has token in it.
2265 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002266 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002267 }
2268 return MatchOperand_NoMatch;
2269}
2270
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002271AMDGPUAsmParser::OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands)
2272{
2273 StringRef Name = Parser.getTok().getString();
2274 if (Name == "mul") {
2275 return parseIntWithPrefix("mul", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodMul);
2276 } else if (Name == "div") {
2277 return parseIntWithPrefix("div", Operands, AMDGPUOperand::ImmTyOModSI, 0, false, ConvertOmodDiv);
2278 } else {
2279 return MatchOperand_NoMatch;
2280 }
2281}
2282
Sam Kolton5f10a132016-05-06 11:31:17 +00002283AMDGPUOperand::Ptr AMDGPUAsmParser::defaultClampSI() const {
2284 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyClampSI);
2285}
2286
2287AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOModSI() const {
2288 return AMDGPUOperand::CreateImm(1, SMLoc(), AMDGPUOperand::ImmTyOModSI);
2289}
2290
Tom Stellarda90b9522016-02-11 03:28:15 +00002291void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2292 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002293 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002294 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002295 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2296 }
2297 for (unsigned E = Operands.size(); I != E; ++I)
2298 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2299}
2300
2301void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002302 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2303 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002304 cvtVOP3(Inst, Operands);
2305 } else {
2306 cvtId(Inst, Operands);
2307 }
2308}
2309
Tom Stellarda90b9522016-02-11 03:28:15 +00002310void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002311 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002312 unsigned I = 1;
2313 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002314 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002315 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002316 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002317
Tom Stellarda90b9522016-02-11 03:28:15 +00002318 for (unsigned E = Operands.size(); I != E; ++I) {
2319 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002320 if (Op.isRegOrImmWithInputMods()) {
2321 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002322 } else if (Op.isImm()) {
2323 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002324 } else {
2325 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002326 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002327 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002328
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002329 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2330 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002331}
2332
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002333void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002334 unsigned I = 1;
2335 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2336 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2337 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2338 }
2339
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002340 OptionalImmIndexMap OptionalIdx;
2341
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002342 for (unsigned E = Operands.size(); I != E; ++I) {
2343 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002344
2345 // Add the register arguments
2346 if (Op.isRegOrImm()) {
2347 Op.addRegOrImmOperands(Inst, 1);
2348 continue;
2349 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002350 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002351 } else {
2352 assert(false);
2353 }
2354 }
2355
2356 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2357 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2358 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002359 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002360 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2361 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2362 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002363 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002364}
2365
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002366void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2367 unsigned I = 1;
2368 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2369 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2370 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2371 }
2372
2373 // Add src, same as dst
2374 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2375
2376 OptionalImmIndexMap OptionalIdx;
2377
2378 for (unsigned E = Operands.size(); I != E; ++I) {
2379 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2380
2381 // Add the register arguments
2382 if (Op.isRegOrImm()) {
2383 Op.addRegOrImmOperands(Inst, 1);
2384 continue;
2385 } else if (Op.isImmModifier()) {
2386 OptionalIdx[Op.getImmTy()] = I;
2387 } else {
2388 assert(false);
2389 }
2390 }
2391
2392 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2393 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2394 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2395 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2396 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2397 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2398 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2399 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2400}
2401
Sam Koltondfa29f72016-03-09 12:29:31 +00002402//===----------------------------------------------------------------------===//
2403// dpp
2404//===----------------------------------------------------------------------===//
2405
2406bool AMDGPUOperand::isDPPCtrl() const {
2407 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2408 if (result) {
2409 int64_t Imm = getImm();
2410 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2411 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2412 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2413 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2414 (Imm == 0x130) ||
2415 (Imm == 0x134) ||
2416 (Imm == 0x138) ||
2417 (Imm == 0x13c) ||
2418 (Imm == 0x140) ||
2419 (Imm == 0x141) ||
2420 (Imm == 0x142) ||
2421 (Imm == 0x143);
2422 }
2423 return false;
2424}
2425
Sam Koltona74cd522016-03-18 15:35:51 +00002426AMDGPUAsmParser::OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002427AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands, bool AddDefault) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002428 SMLoc S = Parser.getTok().getLoc();
2429 StringRef Prefix;
2430 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002431
Sam Koltona74cd522016-03-18 15:35:51 +00002432 if (getLexer().getKind() == AsmToken::Identifier) {
2433 Prefix = Parser.getTok().getString();
2434 } else {
2435 return MatchOperand_NoMatch;
2436 }
2437
2438 if (Prefix == "row_mirror") {
2439 Int = 0x140;
2440 } else if (Prefix == "row_half_mirror") {
2441 Int = 0x141;
2442 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002443 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2444 if (Prefix != "quad_perm"
2445 && Prefix != "row_shl"
2446 && Prefix != "row_shr"
2447 && Prefix != "row_ror"
2448 && Prefix != "wave_shl"
2449 && Prefix != "wave_rol"
2450 && Prefix != "wave_shr"
2451 && Prefix != "wave_ror"
2452 && Prefix != "row_bcast") {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002453 if (AddDefault) {
2454 Operands.push_back(AMDGPUOperand::CreateImm(0, S, AMDGPUOperand::ImmTyDppCtrl));
2455 return MatchOperand_Success;
2456 } else {
2457 return MatchOperand_NoMatch;
2458 }
Sam Kolton201398e2016-04-21 13:14:24 +00002459 }
2460
Sam Koltona74cd522016-03-18 15:35:51 +00002461 Parser.Lex();
2462 if (getLexer().isNot(AsmToken::Colon))
2463 return MatchOperand_ParseFail;
2464
2465 if (Prefix == "quad_perm") {
2466 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002467 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002468 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002469 return MatchOperand_ParseFail;
2470
2471 Parser.Lex();
2472 if (getLexer().isNot(AsmToken::Integer))
2473 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002474 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002475
Sam Koltona74cd522016-03-18 15:35:51 +00002476 Parser.Lex();
2477 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002478 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002479 Parser.Lex();
2480 if (getLexer().isNot(AsmToken::Integer))
2481 return MatchOperand_ParseFail;
2482 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002483
Sam Koltona74cd522016-03-18 15:35:51 +00002484 Parser.Lex();
2485 if (getLexer().isNot(AsmToken::Comma))
2486 return MatchOperand_ParseFail;
2487 Parser.Lex();
2488 if (getLexer().isNot(AsmToken::Integer))
2489 return MatchOperand_ParseFail;
2490 Int += (getLexer().getTok().getIntVal() << 4);
2491
2492 Parser.Lex();
2493 if (getLexer().isNot(AsmToken::Comma))
2494 return MatchOperand_ParseFail;
2495 Parser.Lex();
2496 if (getLexer().isNot(AsmToken::Integer))
2497 return MatchOperand_ParseFail;
2498 Int += (getLexer().getTok().getIntVal() << 6);
2499
2500 Parser.Lex();
2501 if (getLexer().isNot(AsmToken::RBrac))
2502 return MatchOperand_ParseFail;
2503
2504 } else {
2505 // sel:%d
2506 Parser.Lex();
2507 if (getLexer().isNot(AsmToken::Integer))
2508 return MatchOperand_ParseFail;
2509 Int = getLexer().getTok().getIntVal();
2510
2511 if (Prefix == "row_shl") {
2512 Int |= 0x100;
2513 } else if (Prefix == "row_shr") {
2514 Int |= 0x110;
2515 } else if (Prefix == "row_ror") {
2516 Int |= 0x120;
2517 } else if (Prefix == "wave_shl") {
2518 Int = 0x130;
2519 } else if (Prefix == "wave_rol") {
2520 Int = 0x134;
2521 } else if (Prefix == "wave_shr") {
2522 Int = 0x138;
2523 } else if (Prefix == "wave_ror") {
2524 Int = 0x13C;
2525 } else if (Prefix == "row_bcast") {
2526 if (Int == 15) {
2527 Int = 0x142;
2528 } else if (Int == 31) {
2529 Int = 0x143;
2530 }
2531 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002532 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002533 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002534 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002535 }
Sam Koltona74cd522016-03-18 15:35:51 +00002536 Parser.Lex(); // eat last token
2537
2538 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002539 AMDGPUOperand::ImmTyDppCtrl));
2540 return MatchOperand_Success;
2541}
2542
Sam Kolton5f10a132016-05-06 11:31:17 +00002543AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
2544 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002545}
2546
Sam Kolton5f10a132016-05-06 11:31:17 +00002547AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
2548 return AMDGPUOperand::CreateImm(0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00002549}
2550
Sam Kolton5f10a132016-05-06 11:31:17 +00002551AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
2552 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
2553}
2554
2555void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002556 OptionalImmIndexMap OptionalIdx;
2557
2558 unsigned I = 1;
2559 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2560 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2561 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2562 }
2563
2564 for (unsigned E = Operands.size(); I != E; ++I) {
2565 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2566 // Add the register arguments
Sam Kolton5f10a132016-05-06 11:31:17 +00002567 if (Op.isRegOrImmWithInputMods()) {
2568 // We convert only instructions with modifiers
Sam Koltondfa29f72016-03-09 12:29:31 +00002569 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2570 } else if (Op.isDPPCtrl()) {
2571 Op.addImmOperands(Inst, 1);
2572 } else if (Op.isImm()) {
2573 // Handle optional arguments
2574 OptionalIdx[Op.getImmTy()] = I;
2575 } else {
2576 llvm_unreachable("Invalid operand type");
2577 }
2578 }
2579
2580 // ToDo: fix default values for row_mask and bank_mask
2581 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2582 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2583 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2584}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002585
Sam Kolton3025e7f2016-04-26 13:33:56 +00002586//===----------------------------------------------------------------------===//
2587// sdwa
2588//===----------------------------------------------------------------------===//
2589
2590AMDGPUAsmParser::OperandMatchResultTy
2591AMDGPUAsmParser::parseSDWASel(OperandVector &Operands) {
2592 SMLoc S = Parser.getTok().getLoc();
2593 StringRef Value;
2594 AMDGPUAsmParser::OperandMatchResultTy res;
2595
2596 res = parseStringWithPrefix("dst_sel", Value);
2597 if (res == MatchOperand_ParseFail) {
2598 return MatchOperand_ParseFail;
2599 } else if (res == MatchOperand_NoMatch) {
2600 res = parseStringWithPrefix("src0_sel", Value);
2601 if (res == MatchOperand_ParseFail) {
2602 return MatchOperand_ParseFail;
2603 } else if (res == MatchOperand_NoMatch) {
2604 res = parseStringWithPrefix("src1_sel", Value);
2605 if (res != MatchOperand_Success) {
2606 return res;
2607 }
2608 }
2609 }
2610
2611 int64_t Int;
2612 Int = StringSwitch<int64_t>(Value)
2613 .Case("BYTE_0", 0)
2614 .Case("BYTE_1", 1)
2615 .Case("BYTE_2", 2)
2616 .Case("BYTE_3", 3)
2617 .Case("WORD_0", 4)
2618 .Case("WORD_1", 5)
2619 .Case("DWORD", 6)
2620 .Default(0xffffffff);
2621 Parser.Lex(); // eat last token
2622
2623 if (Int == 0xffffffff) {
2624 return MatchOperand_ParseFail;
2625 }
2626
2627 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2628 AMDGPUOperand::ImmTySdwaSel));
2629 return MatchOperand_Success;
2630}
2631
2632AMDGPUAsmParser::OperandMatchResultTy
2633AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
2634 SMLoc S = Parser.getTok().getLoc();
2635 StringRef Value;
2636 AMDGPUAsmParser::OperandMatchResultTy res;
2637
2638 res = parseStringWithPrefix("dst_unused", Value);
2639 if (res != MatchOperand_Success) {
2640 return res;
2641 }
2642
2643 int64_t Int;
2644 Int = StringSwitch<int64_t>(Value)
2645 .Case("UNUSED_PAD", 0)
2646 .Case("UNUSED_SEXT", 1)
2647 .Case("UNUSED_PRESERVE", 2)
2648 .Default(0xffffffff);
2649 Parser.Lex(); // eat last token
2650
2651 if (Int == 0xffffffff) {
2652 return MatchOperand_ParseFail;
2653 }
2654
2655 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2656 AMDGPUOperand::ImmTySdwaDstUnused));
2657 return MatchOperand_Success;
2658}
2659
Sam Kolton5f10a132016-05-06 11:31:17 +00002660AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWASel() const {
2661 return AMDGPUOperand::CreateImm(6, SMLoc(), AMDGPUOperand::ImmTySdwaSel);
2662}
2663
2664AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSDWADstUnused() const {
2665 return AMDGPUOperand::CreateImm(0, SMLoc(), AMDGPUOperand::ImmTySdwaDstUnused);
2666}
2667
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002668
Tom Stellard45bb48e2015-06-13 03:28:10 +00002669/// Force static initialization.
2670extern "C" void LLVMInitializeAMDGPUAsmParser() {
2671 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2672 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2673}
2674
2675#define GET_REGISTER_MATCHER
2676#define GET_MATCHER_IMPLEMENTATION
2677#include "AMDGPUGenAsmMatcher.inc"