blob: e6462689d249c459b8726e87f10bdfd40f9ff418 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000028#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000032#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000033#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000045enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
46
Tom Stellard45bb48e2015-06-13 03:28:10 +000047class AMDGPUOperand : public MCParsedAsmOperand {
48 enum KindTy {
49 Token,
50 Immediate,
51 Register,
52 Expression
53 } Kind;
54
55 SMLoc StartLoc, EndLoc;
56
57public:
58 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
59
60 MCContext *Ctx;
61
62 enum ImmTy {
63 ImmTyNone,
64 ImmTyDSOffset0,
65 ImmTyDSOffset1,
66 ImmTyGDS,
67 ImmTyOffset,
68 ImmTyGLC,
69 ImmTySLC,
70 ImmTyTFE,
71 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000072 ImmTyOMod,
Sam Koltondfa29f72016-03-09 12:29:31 +000073 ImmTyDppCtrl,
74 ImmTyDppRowMask,
75 ImmTyDppBankMask,
76 ImmTyDppBoundCtrl,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000077 ImmTyDMask,
78 ImmTyUNorm,
79 ImmTyDA,
80 ImmTyR128,
81 ImmTyLWE,
Tom Stellard45bb48e2015-06-13 03:28:10 +000082 };
83
84 struct TokOp {
85 const char *Data;
86 unsigned Length;
87 };
88
89 struct ImmOp {
90 bool IsFPImm;
91 ImmTy Type;
92 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000093 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000094 };
95
96 struct RegOp {
97 unsigned RegNo;
98 int Modifiers;
99 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000100 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000101 bool IsForcedVOP3;
102 };
103
104 union {
105 TokOp Tok;
106 ImmOp Imm;
107 RegOp Reg;
108 const MCExpr *Expr;
109 };
110
111 void addImmOperands(MCInst &Inst, unsigned N) const {
112 Inst.addOperand(MCOperand::createImm(getImm()));
113 }
114
115 StringRef getToken() const {
116 return StringRef(Tok.Data, Tok.Length);
117 }
118
119 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000120 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000121 }
122
123 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000124 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000125 addRegOperands(Inst, N);
126 else
127 addImmOperands(Inst, N);
128 }
129
Tom Stellardd93a34f2016-02-22 19:17:56 +0000130 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
131 if (isRegKind()) {
132 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
133 addRegOperands(Inst, N);
134 } else {
135 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
136 addImmOperands(Inst, N);
137 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000138 }
139
140 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
141 if (isImm())
142 addImmOperands(Inst, N);
143 else {
144 assert(isExpr());
145 Inst.addOperand(MCOperand::createExpr(Expr));
146 }
147 }
148
149 bool defaultTokenHasSuffix() const {
150 StringRef Token(Tok.Data, Tok.Length);
151
Sam Koltona74cd522016-03-18 15:35:51 +0000152 return Token.endswith("_e32") || Token.endswith("_e64") ||
Sam Koltondfa29f72016-03-09 12:29:31 +0000153 Token.endswith("_dpp");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000154 }
155
156 bool isToken() const override {
157 return Kind == Token;
158 }
159
160 bool isImm() const override {
161 return Kind == Immediate;
162 }
163
Tom Stellardd93a34f2016-02-22 19:17:56 +0000164 bool isInlinableImm() const {
165 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
166 immediates are inlinable (e.g. "clamp" attribute is not) */ )
167 return false;
168 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000169 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000170 // We've had bot failures before due to weird NaN support on mips hosts.
171 const float F = BitsToFloat(Imm.Val);
172 // TODO: Add 1/(2*pi) for VI
173 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000174 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000175 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000176 }
177
178 bool isDSOffset0() const {
179 assert(isImm());
180 return Imm.Type == ImmTyDSOffset0;
181 }
182
183 bool isDSOffset1() const {
184 assert(isImm());
185 return Imm.Type == ImmTyDSOffset1;
186 }
187
188 int64_t getImm() const {
189 return Imm.Val;
190 }
191
192 enum ImmTy getImmTy() const {
193 assert(isImm());
194 return Imm.Type;
195 }
196
197 bool isRegKind() const {
198 return Kind == Register;
199 }
200
201 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000202 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000203 }
204
Tom Stellardd93a34f2016-02-22 19:17:56 +0000205 bool isRegOrImmWithInputMods() const {
206 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000207 }
208
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000209 bool isImmTy(ImmTy ImmT) const {
210 return isImm() && Imm.Type == ImmT;
211 }
212
Tom Stellarda90b9522016-02-11 03:28:15 +0000213 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000214 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000215 }
216
217 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000218 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000219 }
220
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000221 bool isImmModifier() const {
222 return Kind == Immediate && Imm.Type != ImmTyNone;
223 }
224
225 bool isDMask() const {
226 return isImmTy(ImmTyDMask);
227 }
228
229 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
230 bool isDA() const { return isImmTy(ImmTyDA); }
231 bool isR128() const { return isImmTy(ImmTyUNorm); }
232 bool isLWE() const { return isImmTy(ImmTyLWE); }
233
Tom Stellarda90b9522016-02-11 03:28:15 +0000234 bool isMod() const {
235 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000236 }
237
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000238 bool isGDS() const { return isImmTy(ImmTyGDS); }
239 bool isGLC() const { return isImmTy(ImmTyGLC); }
240 bool isSLC() const { return isImmTy(ImmTySLC); }
241 bool isTFE() const { return isImmTy(ImmTyTFE); }
242
Sam Koltondfa29f72016-03-09 12:29:31 +0000243 bool isBankMask() const {
244 return isImmTy(ImmTyDppBankMask);
245 }
246
247 bool isRowMask() const {
248 return isImmTy(ImmTyDppRowMask);
249 }
250
251 bool isBoundCtrl() const {
252 return isImmTy(ImmTyDppBoundCtrl);
253 }
Sam Koltona74cd522016-03-18 15:35:51 +0000254
Tom Stellard45bb48e2015-06-13 03:28:10 +0000255 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000256 assert(isReg() || (isImm() && Imm.Modifiers == 0));
257 if (isReg())
258 Reg.Modifiers = Mods;
259 else
260 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000261 }
262
263 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000264 assert(isRegKind() || isImm());
265 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000266 }
267
268 unsigned getReg() const override {
269 return Reg.RegNo;
270 }
271
272 bool isRegOrImm() const {
273 return isReg() || isImm();
274 }
275
276 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000277 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000278 }
279
280 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000281 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000282 }
283
Matt Arsenault86d336e2015-09-08 21:15:00 +0000284 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000285 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000286 }
287
288 bool isSSrc32() const {
289 return isImm() || isSCSrc32();
290 }
291
292 bool isSSrc64() const {
293 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
294 // See isVSrc64().
295 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000296 }
297
Tom Stellard45bb48e2015-06-13 03:28:10 +0000298 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000299 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000300 }
301
302 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000303 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000304 }
305
306 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000307 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000308 }
309
310 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000311 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000312 // narrowed to 32 bits (in the instruction stream). That require knowledge
313 // of instruction type (unsigned/signed, floating or "untyped"/B64),
314 // see [AMD GCN3 ISA 6.3.1].
315 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
316 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000317 }
318
319 bool isMem() const override {
320 return false;
321 }
322
323 bool isExpr() const {
324 return Kind == Expression;
325 }
326
327 bool isSoppBrTarget() const {
328 return isExpr() || isImm();
329 }
330
331 SMLoc getStartLoc() const override {
332 return StartLoc;
333 }
334
335 SMLoc getEndLoc() const override {
336 return EndLoc;
337 }
338
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000339 void print(raw_ostream &OS) const override {
340 switch (Kind) {
341 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000342 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000343 break;
344 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000345 if (Imm.Type != AMDGPUOperand::ImmTyNone)
346 OS << getImm();
Sam Koltona74cd522016-03-18 15:35:51 +0000347 else
Tom Stellardd93a34f2016-02-22 19:17:56 +0000348 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000349 break;
350 case Token:
351 OS << '\'' << getToken() << '\'';
352 break;
353 case Expression:
354 OS << "<expr " << *Expr << '>';
355 break;
356 }
357 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000358
359 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
360 enum ImmTy Type = ImmTyNone,
361 bool IsFPImm = false) {
362 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
363 Op->Imm.Val = Val;
364 Op->Imm.IsFPImm = IsFPImm;
365 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000366 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000367 Op->StartLoc = Loc;
368 Op->EndLoc = Loc;
369 return Op;
370 }
371
372 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
373 bool HasExplicitEncodingSize = true) {
374 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
375 Res->Tok.Data = Str.data();
376 Res->Tok.Length = Str.size();
377 Res->StartLoc = Loc;
378 Res->EndLoc = Loc;
379 return Res;
380 }
381
382 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
383 SMLoc E,
384 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000385 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000386 bool ForceVOP3) {
387 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
388 Op->Reg.RegNo = RegNo;
389 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000390 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000391 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000392 Op->Reg.IsForcedVOP3 = ForceVOP3;
393 Op->StartLoc = S;
394 Op->EndLoc = E;
395 return Op;
396 }
397
398 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
399 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
400 Op->Expr = Expr;
401 Op->StartLoc = S;
402 Op->EndLoc = S;
403 return Op;
404 }
405
406 bool isDSOffset() const;
407 bool isDSOffset01() const;
408 bool isSWaitCnt() const;
409 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000410 bool isSMRDOffset() const;
411 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000412 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000413};
414
415class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000416 const MCInstrInfo &MII;
417 MCAsmParser &Parser;
418
419 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000420
Matt Arsenault3b159672015-12-01 20:31:08 +0000421 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000422 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000423 }
424
425 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000426 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000427 }
428
Matt Arsenault68802d32015-11-05 03:11:27 +0000429 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000430 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000431 }
432
433 bool hasSGPR102_SGPR103() const {
434 return !isVI();
435 }
436
Tom Stellard45bb48e2015-06-13 03:28:10 +0000437 /// @name Auto-generated Match Functions
438 /// {
439
440#define GET_ASSEMBLER_HEADER
441#include "AMDGPUGenAsmMatcher.inc"
442
443 /// }
444
Tom Stellard347ac792015-06-26 21:15:07 +0000445private:
446 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
447 bool ParseDirectiveHSACodeObjectVersion();
448 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000449 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
450 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000451 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000452 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000453 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000454 bool ParseDirectiveAMDGPUHsaModuleGlobal();
455 bool ParseDirectiveAMDGPUHsaProgramGlobal();
456 bool ParseSectionDirectiveHSADataGlobalAgent();
457 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000458 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000459 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
460 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Tom Stellard347ac792015-06-26 21:15:07 +0000461
Tom Stellard45bb48e2015-06-13 03:28:10 +0000462public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000463 enum AMDGPUMatchResultTy {
464 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
465 };
466
Akira Hatanakab11ef082015-11-14 06:35:56 +0000467 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000468 const MCInstrInfo &MII,
469 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000470 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000471 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000472 MCAsmParserExtension::Initialize(Parser);
473
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000474 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000475 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000476 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000477 }
478
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000479 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000480 }
481
Tom Stellard347ac792015-06-26 21:15:07 +0000482 AMDGPUTargetStreamer &getTargetStreamer() {
483 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
484 return static_cast<AMDGPUTargetStreamer &>(TS);
485 }
486
Tom Stellard45bb48e2015-06-13 03:28:10 +0000487 unsigned getForcedEncodingSize() const {
488 return ForcedEncodingSize;
489 }
490
491 void setForcedEncodingSize(unsigned Size) {
492 ForcedEncodingSize = Size;
493 }
494
495 bool isForcedVOP3() const {
496 return ForcedEncodingSize == 64;
497 }
498
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000499 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000500 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
501 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
502 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
503 OperandVector &Operands, MCStreamer &Out,
504 uint64_t &ErrorInfo,
505 bool MatchingInlineAsm) override;
506 bool ParseDirective(AsmToken DirectiveID) override;
507 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
508 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
509 SMLoc NameLoc, OperandVector &Operands) override;
510
511 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
512 int64_t Default = 0);
513 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
514 OperandVector &Operands,
515 enum AMDGPUOperand::ImmTy ImmTy =
516 AMDGPUOperand::ImmTyNone);
517 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
518 enum AMDGPUOperand::ImmTy ImmTy =
519 AMDGPUOperand::ImmTyNone);
520 OperandMatchResultTy parseOptionalOps(
521 const ArrayRef<OptionalOperand> &OptionalOps,
522 OperandVector &Operands);
523
524
525 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
526 void cvtDS(MCInst &Inst, const OperandVector &Operands);
527 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
528 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
529 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
530
531 bool parseCnt(int64_t &IntVal);
532 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
533 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
534
535 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
536 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
537 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000538 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000539
540 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
541 OperandMatchResultTy parseOffset(OperandVector &Operands);
542 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
543 OperandMatchResultTy parseGLC(OperandVector &Operands);
544 OperandMatchResultTy parseSLC(OperandVector &Operands);
545 OperandMatchResultTy parseTFE(OperandVector &Operands);
546
547 OperandMatchResultTy parseDMask(OperandVector &Operands);
548 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000549 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000550 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000551 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000552
Tom Stellarda90b9522016-02-11 03:28:15 +0000553 void cvtId(MCInst &Inst, const OperandVector &Operands);
554 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
555 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
556 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000557 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000558
559 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000560 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000561 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000562
563 OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands);
564 OperandMatchResultTy parseDPPOptionalOps(OperandVector &Operands);
565 void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
566 void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
567 void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000568};
569
570struct OptionalOperand {
571 const char *Name;
572 AMDGPUOperand::ImmTy Type;
573 bool IsBit;
574 int64_t Default;
575 bool (*ConvertResult)(int64_t&);
576};
577
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000578}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000579
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000580static int getRegClass(RegisterKind Is, unsigned RegWidth) {
581 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000582 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000583 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000584 case 1: return AMDGPU::VGPR_32RegClassID;
585 case 2: return AMDGPU::VReg_64RegClassID;
586 case 3: return AMDGPU::VReg_96RegClassID;
587 case 4: return AMDGPU::VReg_128RegClassID;
588 case 8: return AMDGPU::VReg_256RegClassID;
589 case 16: return AMDGPU::VReg_512RegClassID;
590 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000591 } else if (Is == IS_TTMP) {
592 switch (RegWidth) {
593 default: return -1;
594 case 1: return AMDGPU::TTMP_32RegClassID;
595 case 2: return AMDGPU::TTMP_64RegClassID;
596 }
597 } else if (Is == IS_SGPR) {
598 switch (RegWidth) {
599 default: return -1;
600 case 1: return AMDGPU::SGPR_32RegClassID;
601 case 2: return AMDGPU::SGPR_64RegClassID;
602 case 4: return AMDGPU::SReg_128RegClassID;
603 case 8: return AMDGPU::SReg_256RegClassID;
604 case 16: return AMDGPU::SReg_512RegClassID;
605 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000606 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000607 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000608}
609
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000610static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000611 return StringSwitch<unsigned>(RegName)
612 .Case("exec", AMDGPU::EXEC)
613 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000614 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000615 .Case("m0", AMDGPU::M0)
616 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000617 .Case("tba", AMDGPU::TBA)
618 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000619 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
620 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000621 .Case("vcc_lo", AMDGPU::VCC_LO)
622 .Case("vcc_hi", AMDGPU::VCC_HI)
623 .Case("exec_lo", AMDGPU::EXEC_LO)
624 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000625 .Case("tma_lo", AMDGPU::TMA_LO)
626 .Case("tma_hi", AMDGPU::TMA_HI)
627 .Case("tba_lo", AMDGPU::TBA_LO)
628 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000629 .Default(0);
630}
631
632bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000633 auto R = parseRegister();
634 if (!R) return true;
635 assert(R->isReg());
636 RegNo = R->getReg();
637 StartLoc = R->getStartLoc();
638 EndLoc = R->getEndLoc();
639 return false;
640}
641
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000642bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
643{
644 switch (RegKind) {
645 case IS_SPECIAL:
646 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
647 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
648 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
649 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
650 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
651 return false;
652 case IS_VGPR:
653 case IS_SGPR:
654 case IS_TTMP:
655 if (Reg1 != Reg + RegWidth) { return false; }
656 RegWidth++;
657 return true;
658 default:
659 assert(false); return false;
660 }
661}
662
663bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
664{
665 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
666 if (getLexer().is(AsmToken::Identifier)) {
667 StringRef RegName = Parser.getTok().getString();
668 if ((Reg = getSpecialRegForName(RegName))) {
669 Parser.Lex();
670 RegKind = IS_SPECIAL;
671 } else {
672 unsigned RegNumIndex = 0;
673 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
674 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
675 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
676 else { return false; }
677 if (RegName.size() > RegNumIndex) {
678 // Single 32-bit register: vXX.
679 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
680 Parser.Lex();
681 RegWidth = 1;
682 } else {
683 // Range of registers: v[XX:YY].
684 Parser.Lex();
685 int64_t RegLo, RegHi;
686 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
687 Parser.Lex();
688
689 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
690
691 if (getLexer().isNot(AsmToken::Colon)) { return false; }
692 Parser.Lex();
693
694 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
695
696 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
697 Parser.Lex();
698
699 RegNum = (unsigned) RegLo;
700 RegWidth = (RegHi - RegLo) + 1;
701 }
702 }
703 } else if (getLexer().is(AsmToken::LBrac)) {
704 // List of consecutive registers: [s0,s1,s2,s3]
705 Parser.Lex();
706 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
707 if (RegWidth != 1) { return false; }
708 RegisterKind RegKind1;
709 unsigned Reg1, RegNum1, RegWidth1;
710 do {
711 if (getLexer().is(AsmToken::Comma)) {
712 Parser.Lex();
713 } else if (getLexer().is(AsmToken::RBrac)) {
714 Parser.Lex();
715 break;
716 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
717 if (RegWidth1 != 1) { return false; }
718 if (RegKind1 != RegKind) { return false; }
719 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
720 } else {
721 return false;
722 }
723 } while (true);
724 } else {
725 return false;
726 }
727 switch (RegKind) {
728 case IS_SPECIAL:
729 RegNum = 0;
730 RegWidth = 1;
731 break;
732 case IS_VGPR:
733 case IS_SGPR:
734 case IS_TTMP:
735 {
736 unsigned Size = 1;
737 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
738 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
739 Size = std::min(RegWidth, 4u);
740 }
741 if (RegNum % Size != 0) { return false; }
742 RegNum = RegNum / Size;
743 int RCID = getRegClass(RegKind, RegWidth);
744 if (RCID == -1) { return false; }
745 const MCRegisterClass RC = TRI->getRegClass(RCID);
746 if (RegNum >= RC.getNumRegs()) { return false; }
747 Reg = RC.getRegister(RegNum);
748 break;
749 }
750
751 default:
752 assert(false); return false;
753 }
754
755 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
756 return true;
757}
758
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000759std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000760 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000761 SMLoc StartLoc = Tok.getLoc();
762 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000763 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
764
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000765 RegisterKind RegKind;
766 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000767
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000768 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
769 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000770 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000771 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000772 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000773}
774
775unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
776
777 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
778
779 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
780 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
781 return Match_InvalidOperand;
782
Tom Stellard88e0b252015-10-06 15:57:53 +0000783 if ((TSFlags & SIInstrFlags::VOP3) &&
784 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
785 getForcedEncodingSize() != 64)
786 return Match_PreferE32;
787
Tom Stellard45bb48e2015-06-13 03:28:10 +0000788 return Match_Success;
789}
790
791
792bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
793 OperandVector &Operands,
794 MCStreamer &Out,
795 uint64_t &ErrorInfo,
796 bool MatchingInlineAsm) {
797 MCInst Inst;
798
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000799 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000800 default: break;
801 case Match_Success:
802 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000803 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000804 return false;
805 case Match_MissingFeature:
806 return Error(IDLoc, "instruction not supported on this GPU");
807
808 case Match_MnemonicFail:
809 return Error(IDLoc, "unrecognized instruction mnemonic");
810
811 case Match_InvalidOperand: {
812 SMLoc ErrorLoc = IDLoc;
813 if (ErrorInfo != ~0ULL) {
814 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000815 return Error(IDLoc, "too few operands for instruction");
816 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000817 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
818 if (ErrorLoc == SMLoc())
819 ErrorLoc = IDLoc;
820 }
821 return Error(ErrorLoc, "invalid operand for instruction");
822 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000823 case Match_PreferE32:
824 return Error(IDLoc, "internal error: instruction without _e64 suffix "
825 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000826 }
827 llvm_unreachable("Implement any new match types added!");
828}
829
Tom Stellard347ac792015-06-26 21:15:07 +0000830bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
831 uint32_t &Minor) {
832 if (getLexer().isNot(AsmToken::Integer))
833 return TokError("invalid major version");
834
835 Major = getLexer().getTok().getIntVal();
836 Lex();
837
838 if (getLexer().isNot(AsmToken::Comma))
839 return TokError("minor version number required, comma expected");
840 Lex();
841
842 if (getLexer().isNot(AsmToken::Integer))
843 return TokError("invalid minor version");
844
845 Minor = getLexer().getTok().getIntVal();
846 Lex();
847
848 return false;
849}
850
851bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
852
853 uint32_t Major;
854 uint32_t Minor;
855
856 if (ParseDirectiveMajorMinor(Major, Minor))
857 return true;
858
859 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
860 return false;
861}
862
863bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
864
865 uint32_t Major;
866 uint32_t Minor;
867 uint32_t Stepping;
868 StringRef VendorName;
869 StringRef ArchName;
870
871 // If this directive has no arguments, then use the ISA version for the
872 // targeted GPU.
873 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000874 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000875 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
876 Isa.Stepping,
877 "AMD", "AMDGPU");
878 return false;
879 }
880
881
882 if (ParseDirectiveMajorMinor(Major, Minor))
883 return true;
884
885 if (getLexer().isNot(AsmToken::Comma))
886 return TokError("stepping version number required, comma expected");
887 Lex();
888
889 if (getLexer().isNot(AsmToken::Integer))
890 return TokError("invalid stepping version");
891
892 Stepping = getLexer().getTok().getIntVal();
893 Lex();
894
895 if (getLexer().isNot(AsmToken::Comma))
896 return TokError("vendor name required, comma expected");
897 Lex();
898
899 if (getLexer().isNot(AsmToken::String))
900 return TokError("invalid vendor name");
901
902 VendorName = getLexer().getTok().getStringContents();
903 Lex();
904
905 if (getLexer().isNot(AsmToken::Comma))
906 return TokError("arch name required, comma expected");
907 Lex();
908
909 if (getLexer().isNot(AsmToken::String))
910 return TokError("invalid arch name");
911
912 ArchName = getLexer().getTok().getStringContents();
913 Lex();
914
915 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
916 VendorName, ArchName);
917 return false;
918}
919
Tom Stellardff7416b2015-06-26 21:58:31 +0000920bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
921 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +0000922 SmallString<40> ErrStr;
923 raw_svector_ostream Err(ErrStr);
924 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
925 return TokError(Err.str());
926 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000927 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +0000928 return false;
929}
930
931bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
932
933 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000934 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000935
936 while (true) {
937
938 if (getLexer().isNot(AsmToken::EndOfStatement))
939 return TokError("amd_kernel_code_t values must begin on a new line");
940
941 // Lex EndOfStatement. This is in a while loop, because lexing a comment
942 // will set the current token to EndOfStatement.
943 while(getLexer().is(AsmToken::EndOfStatement))
944 Lex();
945
946 if (getLexer().isNot(AsmToken::Identifier))
947 return TokError("expected value identifier or .end_amd_kernel_code_t");
948
949 StringRef ID = getLexer().getTok().getIdentifier();
950 Lex();
951
952 if (ID == ".end_amd_kernel_code_t")
953 break;
954
955 if (ParseAMDKernelCodeTValue(ID, Header))
956 return true;
957 }
958
959 getTargetStreamer().EmitAMDKernelCodeT(Header);
960
961 return false;
962}
963
Tom Stellarde135ffd2015-09-25 21:41:28 +0000964bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
965 getParser().getStreamer().SwitchSection(
966 AMDGPU::getHSATextSection(getContext()));
967 return false;
968}
969
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000970bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
971 if (getLexer().isNot(AsmToken::Identifier))
972 return TokError("expected symbol name");
973
974 StringRef KernelName = Parser.getTok().getString();
975
976 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
977 ELF::STT_AMDGPU_HSA_KERNEL);
978 Lex();
979 return false;
980}
981
Tom Stellard00f2f912015-12-02 19:47:57 +0000982bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
983 if (getLexer().isNot(AsmToken::Identifier))
984 return TokError("expected symbol name");
985
986 StringRef GlobalName = Parser.getTok().getIdentifier();
987
988 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
989 Lex();
990 return false;
991}
992
993bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
994 if (getLexer().isNot(AsmToken::Identifier))
995 return TokError("expected symbol name");
996
997 StringRef GlobalName = Parser.getTok().getIdentifier();
998
999 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1000 Lex();
1001 return false;
1002}
1003
1004bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1005 getParser().getStreamer().SwitchSection(
1006 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1007 return false;
1008}
1009
1010bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1011 getParser().getStreamer().SwitchSection(
1012 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1013 return false;
1014}
1015
Tom Stellard9760f032015-12-03 03:34:32 +00001016bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1017 getParser().getStreamer().SwitchSection(
1018 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1019 return false;
1020}
1021
Tom Stellard45bb48e2015-06-13 03:28:10 +00001022bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001023 StringRef IDVal = DirectiveID.getString();
1024
1025 if (IDVal == ".hsa_code_object_version")
1026 return ParseDirectiveHSACodeObjectVersion();
1027
1028 if (IDVal == ".hsa_code_object_isa")
1029 return ParseDirectiveHSACodeObjectISA();
1030
Tom Stellardff7416b2015-06-26 21:58:31 +00001031 if (IDVal == ".amd_kernel_code_t")
1032 return ParseDirectiveAMDKernelCodeT();
1033
Tom Stellarde135ffd2015-09-25 21:41:28 +00001034 if (IDVal == ".hsatext" || IDVal == ".text")
1035 return ParseSectionDirectiveHSAText();
1036
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001037 if (IDVal == ".amdgpu_hsa_kernel")
1038 return ParseDirectiveAMDGPUHsaKernel();
1039
Tom Stellard00f2f912015-12-02 19:47:57 +00001040 if (IDVal == ".amdgpu_hsa_module_global")
1041 return ParseDirectiveAMDGPUHsaModuleGlobal();
1042
1043 if (IDVal == ".amdgpu_hsa_program_global")
1044 return ParseDirectiveAMDGPUHsaProgramGlobal();
1045
1046 if (IDVal == ".hsadata_global_agent")
1047 return ParseSectionDirectiveHSADataGlobalAgent();
1048
1049 if (IDVal == ".hsadata_global_program")
1050 return ParseSectionDirectiveHSADataGlobalProgram();
1051
Tom Stellard9760f032015-12-03 03:34:32 +00001052 if (IDVal == ".hsarodata_readonly_agent")
1053 return ParseSectionDirectiveHSARodataReadonlyAgent();
1054
Tom Stellard45bb48e2015-06-13 03:28:10 +00001055 return true;
1056}
1057
Matt Arsenault68802d32015-11-05 03:11:27 +00001058bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1059 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001060 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001061 return true;
1062
Matt Arsenault3b159672015-12-01 20:31:08 +00001063 if (isSI()) {
1064 // No flat_scr
1065 switch (RegNo) {
1066 case AMDGPU::FLAT_SCR:
1067 case AMDGPU::FLAT_SCR_LO:
1068 case AMDGPU::FLAT_SCR_HI:
1069 return false;
1070 default:
1071 return true;
1072 }
1073 }
1074
Matt Arsenault68802d32015-11-05 03:11:27 +00001075 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1076 // SI/CI have.
1077 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1078 R.isValid(); ++R) {
1079 if (*R == RegNo)
1080 return false;
1081 }
1082
1083 return true;
1084}
1085
Tom Stellard45bb48e2015-06-13 03:28:10 +00001086static bool operandsHaveModifiers(const OperandVector &Operands) {
1087
1088 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1089 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1090 if (Op.isRegKind() && Op.hasModifiers())
1091 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001092 if (Op.isImm() && Op.hasModifiers())
1093 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001094 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1095 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1096 return true;
1097 }
1098 return false;
1099}
1100
1101AMDGPUAsmParser::OperandMatchResultTy
1102AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1103
1104 // Try to parse with a custom parser
1105 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1106
1107 // If we successfully parsed the operand or if there as an error parsing,
1108 // we are done.
1109 //
1110 // If we are parsing after we reach EndOfStatement then this means we
1111 // are appending default values to the Operands list. This is only done
1112 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001113 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001114 getLexer().is(AsmToken::EndOfStatement))
1115 return ResTy;
1116
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001117 bool Negate = false, Abs = false, Abs2 = false;
1118
Tom Stellard45bb48e2015-06-13 03:28:10 +00001119 if (getLexer().getKind()== AsmToken::Minus) {
1120 Parser.Lex();
1121 Negate = true;
1122 }
1123
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001124 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1125 Parser.Lex();
1126 Abs2 = true;
1127 if (getLexer().isNot(AsmToken::LParen)) {
1128 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1129 return MatchOperand_ParseFail;
1130 }
1131 Parser.Lex();
1132 }
1133
Tom Stellard45bb48e2015-06-13 03:28:10 +00001134 if (getLexer().getKind() == AsmToken::Pipe) {
1135 Parser.Lex();
1136 Abs = true;
1137 }
1138
1139 switch(getLexer().getKind()) {
1140 case AsmToken::Integer: {
1141 SMLoc S = Parser.getTok().getLoc();
1142 int64_t IntVal;
1143 if (getParser().parseAbsoluteExpression(IntVal))
1144 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001145 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001146 Error(S, "invalid immediate: only 32-bit values are legal");
1147 return MatchOperand_ParseFail;
1148 }
1149
Tom Stellard45bb48e2015-06-13 03:28:10 +00001150 if (Negate)
1151 IntVal *= -1;
1152 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1153 return MatchOperand_Success;
1154 }
1155 case AsmToken::Real: {
1156 // FIXME: We should emit an error if a double precisions floating-point
1157 // value is used. I'm not sure the best way to detect this.
1158 SMLoc S = Parser.getTok().getLoc();
1159 int64_t IntVal;
1160 if (getParser().parseAbsoluteExpression(IntVal))
1161 return MatchOperand_ParseFail;
1162
1163 APFloat F((float)BitsToDouble(IntVal));
1164 if (Negate)
1165 F.changeSign();
1166 Operands.push_back(
1167 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1168 return MatchOperand_Success;
1169 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001170 case AsmToken::LBrac:
Tom Stellard45bb48e2015-06-13 03:28:10 +00001171 case AsmToken::Identifier: {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001172 if (auto R = parseRegister()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001173 unsigned Modifiers = 0;
1174
1175 if (Negate)
1176 Modifiers |= 0x1;
1177
1178 if (Abs) {
1179 if (getLexer().getKind() != AsmToken::Pipe)
1180 return MatchOperand_ParseFail;
1181 Parser.Lex();
1182 Modifiers |= 0x2;
1183 }
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001184 if (Abs2) {
1185 if (getLexer().isNot(AsmToken::RParen)) {
1186 return MatchOperand_ParseFail;
1187 }
1188 Parser.Lex();
1189 Modifiers |= 0x2;
1190 }
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001191 assert(R->isReg());
1192 R->Reg.IsForcedVOP3 = isForcedVOP3();
Tom Stellarda90b9522016-02-11 03:28:15 +00001193 if (Modifiers) {
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001194 R->setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001195 }
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001196 Operands.push_back(std::move(R));
Tom Stellarda90b9522016-02-11 03:28:15 +00001197 } else {
1198 ResTy = parseVOP3OptionalOps(Operands);
1199 if (ResTy == MatchOperand_NoMatch) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001200 const auto &Tok = Parser.getTok();
1201 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(),
1202 Tok.getLoc()));
Tom Stellarda90b9522016-02-11 03:28:15 +00001203 Parser.Lex();
1204 }
1205 }
1206 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001207 }
1208 default:
1209 return MatchOperand_NoMatch;
1210 }
1211}
1212
1213bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1214 StringRef Name,
1215 SMLoc NameLoc, OperandVector &Operands) {
1216
1217 // Clear any forced encodings from the previous instruction.
1218 setForcedEncodingSize(0);
1219
1220 if (Name.endswith("_e64"))
1221 setForcedEncodingSize(64);
1222 else if (Name.endswith("_e32"))
1223 setForcedEncodingSize(32);
1224
1225 // Add the instruction mnemonic
1226 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1227
1228 while (!getLexer().is(AsmToken::EndOfStatement)) {
1229 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1230
1231 // Eat the comma or space if there is one.
1232 if (getLexer().is(AsmToken::Comma))
1233 Parser.Lex();
1234
1235 switch (Res) {
1236 case MatchOperand_Success: break;
1237 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1238 "failed parsing operand.");
1239 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1240 "not a valid operand.");
1241 }
1242 }
1243
Tom Stellard45bb48e2015-06-13 03:28:10 +00001244 return false;
1245}
1246
1247//===----------------------------------------------------------------------===//
1248// Utility functions
1249//===----------------------------------------------------------------------===//
1250
1251AMDGPUAsmParser::OperandMatchResultTy
1252AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1253 int64_t Default) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001254 // We are at the end of the statement, and this is a default argument, so
1255 // use a default value.
1256 if (getLexer().is(AsmToken::EndOfStatement)) {
1257 Int = Default;
1258 return MatchOperand_Success;
1259 }
1260
1261 switch(getLexer().getKind()) {
1262 default: return MatchOperand_NoMatch;
1263 case AsmToken::Identifier: {
1264 StringRef OffsetName = Parser.getTok().getString();
1265 if (!OffsetName.equals(Prefix))
1266 return MatchOperand_NoMatch;
1267
1268 Parser.Lex();
1269 if (getLexer().isNot(AsmToken::Colon))
1270 return MatchOperand_ParseFail;
1271
1272 Parser.Lex();
1273 if (getLexer().isNot(AsmToken::Integer))
1274 return MatchOperand_ParseFail;
1275
1276 if (getParser().parseAbsoluteExpression(Int))
1277 return MatchOperand_ParseFail;
1278 break;
1279 }
1280 }
1281 return MatchOperand_Success;
1282}
1283
1284AMDGPUAsmParser::OperandMatchResultTy
1285AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1286 enum AMDGPUOperand::ImmTy ImmTy) {
1287
1288 SMLoc S = Parser.getTok().getLoc();
1289 int64_t Offset = 0;
1290
1291 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1292 if (Res != MatchOperand_Success)
1293 return Res;
1294
1295 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1296 return MatchOperand_Success;
1297}
1298
1299AMDGPUAsmParser::OperandMatchResultTy
1300AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1301 enum AMDGPUOperand::ImmTy ImmTy) {
1302 int64_t Bit = 0;
1303 SMLoc S = Parser.getTok().getLoc();
1304
1305 // We are at the end of the statement, and this is a default argument, so
1306 // use a default value.
1307 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1308 switch(getLexer().getKind()) {
1309 case AsmToken::Identifier: {
1310 StringRef Tok = Parser.getTok().getString();
1311 if (Tok == Name) {
1312 Bit = 1;
1313 Parser.Lex();
1314 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1315 Bit = 0;
1316 Parser.Lex();
1317 } else {
1318 return MatchOperand_NoMatch;
1319 }
1320 break;
1321 }
1322 default:
1323 return MatchOperand_NoMatch;
1324 }
1325 }
1326
1327 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1328 return MatchOperand_Success;
1329}
1330
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001331typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1332
Sam Koltona74cd522016-03-18 15:35:51 +00001333void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1334 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001335 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001336 auto i = OptionalIdx.find(ImmT);
1337 if (i != OptionalIdx.end()) {
1338 unsigned Idx = i->second;
1339 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1340 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001341 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001342 }
1343}
1344
Tom Stellard45bb48e2015-06-13 03:28:10 +00001345static bool operandsHasOptionalOp(const OperandVector &Operands,
1346 const OptionalOperand &OOp) {
1347 for (unsigned i = 0; i < Operands.size(); i++) {
1348 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1349 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1350 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1351 return true;
1352
1353 }
1354 return false;
1355}
1356
1357AMDGPUAsmParser::OperandMatchResultTy
1358AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1359 OperandVector &Operands) {
1360 SMLoc S = Parser.getTok().getLoc();
1361 for (const OptionalOperand &Op : OptionalOps) {
1362 if (operandsHasOptionalOp(Operands, Op))
1363 continue;
1364 AMDGPUAsmParser::OperandMatchResultTy Res;
1365 int64_t Value;
1366 if (Op.IsBit) {
1367 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1368 if (Res == MatchOperand_NoMatch)
1369 continue;
1370 return Res;
1371 }
1372
1373 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1374
1375 if (Res == MatchOperand_NoMatch)
1376 continue;
1377
1378 if (Res != MatchOperand_Success)
1379 return Res;
1380
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001381 bool DefaultValue = (Value == Op.Default);
1382
Tom Stellard45bb48e2015-06-13 03:28:10 +00001383 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1384 return MatchOperand_ParseFail;
1385 }
1386
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001387 if (!DefaultValue) {
1388 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1389 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001390 return MatchOperand_Success;
1391 }
1392 return MatchOperand_NoMatch;
1393}
1394
1395//===----------------------------------------------------------------------===//
1396// ds
1397//===----------------------------------------------------------------------===//
1398
1399static const OptionalOperand DSOptionalOps [] = {
1400 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1401 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1402};
1403
1404static const OptionalOperand DSOptionalOpsOff01 [] = {
1405 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1406 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1407 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1408};
1409
1410AMDGPUAsmParser::OperandMatchResultTy
1411AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1412 return parseOptionalOps(DSOptionalOps, Operands);
1413}
1414AMDGPUAsmParser::OperandMatchResultTy
1415AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1416 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1417}
1418
1419AMDGPUAsmParser::OperandMatchResultTy
1420AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1421 SMLoc S = Parser.getTok().getLoc();
1422 AMDGPUAsmParser::OperandMatchResultTy Res =
1423 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1424 if (Res == MatchOperand_NoMatch) {
1425 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1426 AMDGPUOperand::ImmTyOffset));
1427 Res = MatchOperand_Success;
1428 }
1429 return Res;
1430}
1431
1432bool AMDGPUOperand::isDSOffset() const {
1433 return isImm() && isUInt<16>(getImm());
1434}
1435
1436bool AMDGPUOperand::isDSOffset01() const {
1437 return isImm() && isUInt<8>(getImm());
1438}
1439
1440void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1441 const OperandVector &Operands) {
1442
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001443 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001444
1445 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1446 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1447
1448 // Add the register arguments
1449 if (Op.isReg()) {
1450 Op.addRegOperands(Inst, 1);
1451 continue;
1452 }
1453
1454 // Handle optional arguments
1455 OptionalIdx[Op.getImmTy()] = i;
1456 }
1457
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001458 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1459 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1460 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001461
Tom Stellard45bb48e2015-06-13 03:28:10 +00001462 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1463}
1464
1465void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1466
1467 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1468 bool GDSOnly = false;
1469
1470 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1471 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1472
1473 // Add the register arguments
1474 if (Op.isReg()) {
1475 Op.addRegOperands(Inst, 1);
1476 continue;
1477 }
1478
1479 if (Op.isToken() && Op.getToken() == "gds") {
1480 GDSOnly = true;
1481 continue;
1482 }
1483
1484 // Handle optional arguments
1485 OptionalIdx[Op.getImmTy()] = i;
1486 }
1487
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001488 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1489 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001490
1491 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001492 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001493 }
1494 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1495}
1496
1497
1498//===----------------------------------------------------------------------===//
1499// s_waitcnt
1500//===----------------------------------------------------------------------===//
1501
1502bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1503 StringRef CntName = Parser.getTok().getString();
1504 int64_t CntVal;
1505
1506 Parser.Lex();
1507 if (getLexer().isNot(AsmToken::LParen))
1508 return true;
1509
1510 Parser.Lex();
1511 if (getLexer().isNot(AsmToken::Integer))
1512 return true;
1513
1514 if (getParser().parseAbsoluteExpression(CntVal))
1515 return true;
1516
1517 if (getLexer().isNot(AsmToken::RParen))
1518 return true;
1519
1520 Parser.Lex();
1521 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1522 Parser.Lex();
1523
1524 int CntShift;
1525 int CntMask;
1526
1527 if (CntName == "vmcnt") {
1528 CntMask = 0xf;
1529 CntShift = 0;
1530 } else if (CntName == "expcnt") {
1531 CntMask = 0x7;
1532 CntShift = 4;
1533 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001534 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001535 CntShift = 8;
1536 } else {
1537 return true;
1538 }
1539
1540 IntVal &= ~(CntMask << CntShift);
1541 IntVal |= (CntVal << CntShift);
1542 return false;
1543}
1544
1545AMDGPUAsmParser::OperandMatchResultTy
1546AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1547 // Disable all counters by default.
1548 // vmcnt [3:0]
1549 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001550 // lgkmcnt [11:8]
1551 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001552 SMLoc S = Parser.getTok().getLoc();
1553
1554 switch(getLexer().getKind()) {
1555 default: return MatchOperand_ParseFail;
1556 case AsmToken::Integer:
1557 // The operand can be an integer value.
1558 if (getParser().parseAbsoluteExpression(CntVal))
1559 return MatchOperand_ParseFail;
1560 break;
1561
1562 case AsmToken::Identifier:
1563 do {
1564 if (parseCnt(CntVal))
1565 return MatchOperand_ParseFail;
1566 } while(getLexer().isNot(AsmToken::EndOfStatement));
1567 break;
1568 }
1569 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1570 return MatchOperand_Success;
1571}
1572
1573bool AMDGPUOperand::isSWaitCnt() const {
1574 return isImm();
1575}
1576
1577//===----------------------------------------------------------------------===//
1578// sopp branch targets
1579//===----------------------------------------------------------------------===//
1580
1581AMDGPUAsmParser::OperandMatchResultTy
1582AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1583 SMLoc S = Parser.getTok().getLoc();
1584
1585 switch (getLexer().getKind()) {
1586 default: return MatchOperand_ParseFail;
1587 case AsmToken::Integer: {
1588 int64_t Imm;
1589 if (getParser().parseAbsoluteExpression(Imm))
1590 return MatchOperand_ParseFail;
1591 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1592 return MatchOperand_Success;
1593 }
1594
1595 case AsmToken::Identifier:
1596 Operands.push_back(AMDGPUOperand::CreateExpr(
1597 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1598 Parser.getTok().getString()), getContext()), S));
1599 Parser.Lex();
1600 return MatchOperand_Success;
1601 }
1602}
1603
1604//===----------------------------------------------------------------------===//
1605// flat
1606//===----------------------------------------------------------------------===//
1607
1608static const OptionalOperand FlatOptionalOps [] = {
1609 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1610 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1611 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1612};
1613
1614static const OptionalOperand FlatAtomicOptionalOps [] = {
1615 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1616 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1617};
1618
1619AMDGPUAsmParser::OperandMatchResultTy
1620AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1621 return parseOptionalOps(FlatOptionalOps, Operands);
1622}
1623
1624AMDGPUAsmParser::OperandMatchResultTy
1625AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1626 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1627}
1628
1629void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1630 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001631 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001632
1633 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1634 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1635
1636 // Add the register arguments
1637 if (Op.isReg()) {
1638 Op.addRegOperands(Inst, 1);
1639 continue;
1640 }
1641
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001642 OptionalIdx[Op.getImmTy()] = i;
1643 }
1644 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1645 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1646 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1647}
1648
1649
1650void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1651 const OperandVector &Operands) {
1652 OptionalImmIndexMap OptionalIdx;
1653
1654 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1655 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1656
1657 // Add the register arguments
1658 if (Op.isReg()) {
1659 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001660 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001661 }
1662
1663 // Handle 'glc' token for flat atomics.
1664 if (Op.isToken()) {
1665 continue;
1666 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001667
1668 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001669 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001670 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001671 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1672 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001673}
1674
1675//===----------------------------------------------------------------------===//
1676// mubuf
1677//===----------------------------------------------------------------------===//
1678
1679static const OptionalOperand MubufOptionalOps [] = {
1680 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1681 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1682 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1683 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1684};
1685
1686AMDGPUAsmParser::OperandMatchResultTy
1687AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1688 return parseOptionalOps(MubufOptionalOps, Operands);
1689}
1690
1691AMDGPUAsmParser::OperandMatchResultTy
1692AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1693 return parseIntWithPrefix("offset", Operands);
1694}
1695
1696AMDGPUAsmParser::OperandMatchResultTy
1697AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1698 return parseNamedBit("glc", Operands);
1699}
1700
1701AMDGPUAsmParser::OperandMatchResultTy
1702AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1703 return parseNamedBit("slc", Operands);
1704}
1705
1706AMDGPUAsmParser::OperandMatchResultTy
1707AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1708 return parseNamedBit("tfe", Operands);
1709}
1710
1711bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001712 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001713}
1714
1715void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1716 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001717 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001718
1719 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1720 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1721
1722 // Add the register arguments
1723 if (Op.isReg()) {
1724 Op.addRegOperands(Inst, 1);
1725 continue;
1726 }
1727
1728 // Handle the case where soffset is an immediate
1729 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1730 Op.addImmOperands(Inst, 1);
1731 continue;
1732 }
1733
1734 // Handle tokens like 'offen' which are sometimes hard-coded into the
1735 // asm string. There are no MCInst operands for these.
1736 if (Op.isToken()) {
1737 continue;
1738 }
1739 assert(Op.isImm());
1740
1741 // Handle optional arguments
1742 OptionalIdx[Op.getImmTy()] = i;
1743 }
1744
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001745 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1746 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1747 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1748 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001749}
1750
1751//===----------------------------------------------------------------------===//
1752// mimg
1753//===----------------------------------------------------------------------===//
1754
1755AMDGPUAsmParser::OperandMatchResultTy
1756AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001757 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001758}
1759
1760AMDGPUAsmParser::OperandMatchResultTy
1761AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001762 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1763}
1764
1765AMDGPUAsmParser::OperandMatchResultTy
1766AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1767 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001768}
1769
1770AMDGPUAsmParser::OperandMatchResultTy
1771AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001772 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1773}
1774
1775AMDGPUAsmParser::OperandMatchResultTy
1776AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1777 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001778}
1779
1780//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001781// smrd
1782//===----------------------------------------------------------------------===//
1783
1784bool AMDGPUOperand::isSMRDOffset() const {
1785
1786 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1787 // information here.
1788 return isImm() && isUInt<8>(getImm());
1789}
1790
1791bool AMDGPUOperand::isSMRDLiteralOffset() const {
1792 // 32-bit literals are only supported on CI and we only want to use them
1793 // when the offset is > 8-bits.
1794 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1795}
1796
1797//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001798// vop3
1799//===----------------------------------------------------------------------===//
1800
1801static bool ConvertOmodMul(int64_t &Mul) {
1802 if (Mul != 1 && Mul != 2 && Mul != 4)
1803 return false;
1804
1805 Mul >>= 1;
1806 return true;
1807}
1808
1809static bool ConvertOmodDiv(int64_t &Div) {
1810 if (Div == 1) {
1811 Div = 0;
1812 return true;
1813 }
1814
1815 if (Div == 2) {
1816 Div = 3;
1817 return true;
1818 }
1819
1820 return false;
1821}
1822
1823static const OptionalOperand VOP3OptionalOps [] = {
1824 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1825 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1826 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1827};
1828
1829static bool isVOP3(OperandVector &Operands) {
1830 if (operandsHaveModifiers(Operands))
1831 return true;
1832
Tom Stellarda90b9522016-02-11 03:28:15 +00001833 if (Operands.size() >= 2) {
1834 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001835
Valery Pykhtinf91911c2016-03-14 05:01:45 +00001836 if (DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
Tom Stellarda90b9522016-02-11 03:28:15 +00001837 return true;
1838 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001839
1840 if (Operands.size() >= 5)
1841 return true;
1842
1843 if (Operands.size() > 3) {
1844 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Valery Pykhtinf91911c2016-03-14 05:01:45 +00001845 if (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1846 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001847 return true;
1848 }
1849 return false;
1850}
1851
1852AMDGPUAsmParser::OperandMatchResultTy
1853AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1854
1855 // The value returned by this function may change after parsing
1856 // an operand so store the original value here.
1857 bool HasModifiers = operandsHaveModifiers(Operands);
1858
1859 bool IsVOP3 = isVOP3(Operands);
1860 if (HasModifiers || IsVOP3 ||
1861 getLexer().isNot(AsmToken::EndOfStatement) ||
1862 getForcedEncodingSize() == 64) {
1863
1864 AMDGPUAsmParser::OperandMatchResultTy Res =
1865 parseOptionalOps(VOP3OptionalOps, Operands);
1866
1867 if (!HasModifiers && Res == MatchOperand_Success) {
1868 // We have added a modifier operation, so we need to make sure all
1869 // previous register operands have modifiers
1870 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1871 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001872 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001873 Op.setModifiers(0);
1874 }
1875 }
1876 return Res;
1877 }
1878 return MatchOperand_NoMatch;
1879}
1880
Tom Stellarda90b9522016-02-11 03:28:15 +00001881void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1882 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001883 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001884 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001885 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1886 }
1887 for (unsigned E = Operands.size(); I != E; ++I)
1888 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1889}
1890
1891void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001892 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1893 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001894 cvtVOP3(Inst, Operands);
1895 } else {
1896 cvtId(Inst, Operands);
1897 }
1898}
1899
1900void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1901 if (operandsHaveModifiers(Operands)) {
1902 cvtVOP3(Inst, Operands);
1903 } else {
1904 cvtId(Inst, Operands);
1905 }
1906}
1907
1908void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1909 cvtVOP3(Inst, Operands);
1910}
1911
1912void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001913 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00001914 unsigned I = 1;
1915 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001916 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001917 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001918 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001919
Tom Stellarda90b9522016-02-11 03:28:15 +00001920 for (unsigned E = Operands.size(); I != E; ++I) {
1921 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001922 if (Op.isRegOrImmWithInputMods()) {
1923 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001924 } else if (Op.isImm()) {
1925 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001926 } else {
1927 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001928 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001929 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001930
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001931 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
1932 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001933}
1934
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001935void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001936 unsigned I = 1;
1937 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1938 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1939 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1940 }
1941
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001942 OptionalImmIndexMap OptionalIdx;
1943
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001944 for (unsigned E = Operands.size(); I != E; ++I) {
1945 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001946
1947 // Add the register arguments
1948 if (Op.isRegOrImm()) {
1949 Op.addRegOrImmOperands(Inst, 1);
1950 continue;
1951 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001952 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001953 } else {
1954 assert(false);
1955 }
1956 }
1957
1958 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1959 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1960 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1961 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1962 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1963 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1964 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1965 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1966}
1967
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001968void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
1969 unsigned I = 1;
1970 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1971 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1972 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1973 }
1974
1975 // Add src, same as dst
1976 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
1977
1978 OptionalImmIndexMap OptionalIdx;
1979
1980 for (unsigned E = Operands.size(); I != E; ++I) {
1981 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1982
1983 // Add the register arguments
1984 if (Op.isRegOrImm()) {
1985 Op.addRegOrImmOperands(Inst, 1);
1986 continue;
1987 } else if (Op.isImmModifier()) {
1988 OptionalIdx[Op.getImmTy()] = I;
1989 } else {
1990 assert(false);
1991 }
1992 }
1993
1994 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1995 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1996 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1997 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1998 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1999 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2000 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2001 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2002}
2003
Sam Koltondfa29f72016-03-09 12:29:31 +00002004//===----------------------------------------------------------------------===//
2005// dpp
2006//===----------------------------------------------------------------------===//
2007
2008bool AMDGPUOperand::isDPPCtrl() const {
2009 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2010 if (result) {
2011 int64_t Imm = getImm();
2012 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2013 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2014 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2015 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2016 (Imm == 0x130) ||
2017 (Imm == 0x134) ||
2018 (Imm == 0x138) ||
2019 (Imm == 0x13c) ||
2020 (Imm == 0x140) ||
2021 (Imm == 0x141) ||
2022 (Imm == 0x142) ||
2023 (Imm == 0x143);
2024 }
2025 return false;
2026}
2027
Sam Koltona74cd522016-03-18 15:35:51 +00002028AMDGPUAsmParser::OperandMatchResultTy
Sam Koltondfa29f72016-03-09 12:29:31 +00002029AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002030 SMLoc S = Parser.getTok().getLoc();
2031 StringRef Prefix;
2032 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002033
Sam Koltona74cd522016-03-18 15:35:51 +00002034 if (getLexer().getKind() == AsmToken::Identifier) {
2035 Prefix = Parser.getTok().getString();
2036 } else {
2037 return MatchOperand_NoMatch;
2038 }
2039
2040 if (Prefix == "row_mirror") {
2041 Int = 0x140;
2042 } else if (Prefix == "row_half_mirror") {
2043 Int = 0x141;
2044 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002045 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2046 if (Prefix != "quad_perm"
2047 && Prefix != "row_shl"
2048 && Prefix != "row_shr"
2049 && Prefix != "row_ror"
2050 && Prefix != "wave_shl"
2051 && Prefix != "wave_rol"
2052 && Prefix != "wave_shr"
2053 && Prefix != "wave_ror"
2054 && Prefix != "row_bcast") {
2055 return MatchOperand_NoMatch;
2056 }
2057
Sam Koltona74cd522016-03-18 15:35:51 +00002058 Parser.Lex();
2059 if (getLexer().isNot(AsmToken::Colon))
2060 return MatchOperand_ParseFail;
2061
2062 if (Prefix == "quad_perm") {
2063 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002064 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002065 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002066 return MatchOperand_ParseFail;
2067
2068 Parser.Lex();
2069 if (getLexer().isNot(AsmToken::Integer))
2070 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002071 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002072
Sam Koltona74cd522016-03-18 15:35:51 +00002073 Parser.Lex();
2074 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002075 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002076 Parser.Lex();
2077 if (getLexer().isNot(AsmToken::Integer))
2078 return MatchOperand_ParseFail;
2079 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002080
Sam Koltona74cd522016-03-18 15:35:51 +00002081 Parser.Lex();
2082 if (getLexer().isNot(AsmToken::Comma))
2083 return MatchOperand_ParseFail;
2084 Parser.Lex();
2085 if (getLexer().isNot(AsmToken::Integer))
2086 return MatchOperand_ParseFail;
2087 Int += (getLexer().getTok().getIntVal() << 4);
2088
2089 Parser.Lex();
2090 if (getLexer().isNot(AsmToken::Comma))
2091 return MatchOperand_ParseFail;
2092 Parser.Lex();
2093 if (getLexer().isNot(AsmToken::Integer))
2094 return MatchOperand_ParseFail;
2095 Int += (getLexer().getTok().getIntVal() << 6);
2096
2097 Parser.Lex();
2098 if (getLexer().isNot(AsmToken::RBrac))
2099 return MatchOperand_ParseFail;
2100
2101 } else {
2102 // sel:%d
2103 Parser.Lex();
2104 if (getLexer().isNot(AsmToken::Integer))
2105 return MatchOperand_ParseFail;
2106 Int = getLexer().getTok().getIntVal();
2107
2108 if (Prefix == "row_shl") {
2109 Int |= 0x100;
2110 } else if (Prefix == "row_shr") {
2111 Int |= 0x110;
2112 } else if (Prefix == "row_ror") {
2113 Int |= 0x120;
2114 } else if (Prefix == "wave_shl") {
2115 Int = 0x130;
2116 } else if (Prefix == "wave_rol") {
2117 Int = 0x134;
2118 } else if (Prefix == "wave_shr") {
2119 Int = 0x138;
2120 } else if (Prefix == "wave_ror") {
2121 Int = 0x13C;
2122 } else if (Prefix == "row_bcast") {
2123 if (Int == 15) {
2124 Int = 0x142;
2125 } else if (Int == 31) {
2126 Int = 0x143;
2127 }
2128 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002129 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002130 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002131 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002132 }
Sam Koltona74cd522016-03-18 15:35:51 +00002133 Parser.Lex(); // eat last token
2134
2135 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002136 AMDGPUOperand::ImmTyDppCtrl));
2137 return MatchOperand_Success;
2138}
2139
2140static const OptionalOperand DPPOptionalOps [] = {
2141 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2142 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2143 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, nullptr}
2144};
2145
Sam Koltona74cd522016-03-18 15:35:51 +00002146AMDGPUAsmParser::OperandMatchResultTy
Sam Koltondfa29f72016-03-09 12:29:31 +00002147AMDGPUAsmParser::parseDPPOptionalOps(OperandVector &Operands) {
2148 SMLoc S = Parser.getTok().getLoc();
2149 OperandMatchResultTy Res = parseOptionalOps(DPPOptionalOps, Operands);
2150 // XXX - sp3 use syntax "bound_ctrl:0" to indicate that bound_ctrl bit was set
2151 if (Res == MatchOperand_Success) {
2152 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands.back());
2153 // If last operand was parsed as bound_ctrl we should replace it with correct value (1)
2154 if (Op.isImmTy(AMDGPUOperand::ImmTyDppBoundCtrl)) {
2155 Operands.pop_back();
2156 Operands.push_back(
2157 AMDGPUOperand::CreateImm(1, S, AMDGPUOperand::ImmTyDppBoundCtrl));
2158 return MatchOperand_Success;
2159 }
2160 }
2161 return Res;
2162}
2163
2164void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
2165 cvtDPP(Inst, Operands, true);
2166}
2167
2168void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
2169 cvtDPP(Inst, Operands, false);
2170}
2171
Sam Koltona74cd522016-03-18 15:35:51 +00002172void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
Sam Koltondfa29f72016-03-09 12:29:31 +00002173 bool HasMods) {
2174 OptionalImmIndexMap OptionalIdx;
2175
2176 unsigned I = 1;
2177 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2178 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2179 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2180 }
2181
2182 for (unsigned E = Operands.size(); I != E; ++I) {
2183 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2184 // Add the register arguments
2185 if (!HasMods && Op.isReg()) {
2186 Op.addRegOperands(Inst, 1);
2187 } else if (HasMods && Op.isRegOrImmWithInputMods()) {
2188 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2189 } else if (Op.isDPPCtrl()) {
2190 Op.addImmOperands(Inst, 1);
2191 } else if (Op.isImm()) {
2192 // Handle optional arguments
2193 OptionalIdx[Op.getImmTy()] = I;
2194 } else {
2195 llvm_unreachable("Invalid operand type");
2196 }
2197 }
2198
2199 // ToDo: fix default values for row_mask and bank_mask
2200 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2201 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2202 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2203}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002204
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002205
Tom Stellard45bb48e2015-06-13 03:28:10 +00002206/// Force static initialization.
2207extern "C" void LLVMInitializeAMDGPUAsmParser() {
2208 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2209 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2210}
2211
2212#define GET_REGISTER_MATCHER
2213#define GET_MATCHER_IMPLEMENTATION
2214#include "AMDGPUGenAsmMatcher.inc"