blob: 6277ff371ba05e6571fec2886ffe866fd17eb23d [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/SmallString.h"
19#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/StringSwitch.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000029#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000034#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetRegistry.h"
38#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039
40using namespace llvm;
41
42namespace {
43
44struct OptionalOperand;
45
46class AMDGPUOperand : public MCParsedAsmOperand {
47 enum KindTy {
48 Token,
49 Immediate,
50 Register,
51 Expression
52 } Kind;
53
54 SMLoc StartLoc, EndLoc;
55
56public:
57 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
58
59 MCContext *Ctx;
60
61 enum ImmTy {
62 ImmTyNone,
63 ImmTyDSOffset0,
64 ImmTyDSOffset1,
65 ImmTyGDS,
66 ImmTyOffset,
67 ImmTyGLC,
68 ImmTySLC,
69 ImmTyTFE,
70 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000071 ImmTyOMod,
72 ImmTyDMask,
73 ImmTyUNorm,
74 ImmTyDA,
75 ImmTyR128,
76 ImmTyLWE,
Tom Stellard45bb48e2015-06-13 03:28:10 +000077 };
78
79 struct TokOp {
80 const char *Data;
81 unsigned Length;
82 };
83
84 struct ImmOp {
85 bool IsFPImm;
86 ImmTy Type;
87 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000088 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000089 };
90
91 struct RegOp {
92 unsigned RegNo;
93 int Modifiers;
94 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000095 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +000096 bool IsForcedVOP3;
97 };
98
99 union {
100 TokOp Tok;
101 ImmOp Imm;
102 RegOp Reg;
103 const MCExpr *Expr;
104 };
105
106 void addImmOperands(MCInst &Inst, unsigned N) const {
107 Inst.addOperand(MCOperand::createImm(getImm()));
108 }
109
110 StringRef getToken() const {
111 return StringRef(Tok.Data, Tok.Length);
112 }
113
114 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000115 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000116 }
117
118 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000119 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000120 addRegOperands(Inst, N);
121 else
122 addImmOperands(Inst, N);
123 }
124
Tom Stellardd93a34f2016-02-22 19:17:56 +0000125 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
126 if (isRegKind()) {
127 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
128 addRegOperands(Inst, N);
129 } else {
130 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
131 addImmOperands(Inst, N);
132 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000133 }
134
135 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
136 if (isImm())
137 addImmOperands(Inst, N);
138 else {
139 assert(isExpr());
140 Inst.addOperand(MCOperand::createExpr(Expr));
141 }
142 }
143
144 bool defaultTokenHasSuffix() const {
145 StringRef Token(Tok.Data, Tok.Length);
146
147 return Token.endswith("_e32") || Token.endswith("_e64");
148 }
149
150 bool isToken() const override {
151 return Kind == Token;
152 }
153
154 bool isImm() const override {
155 return Kind == Immediate;
156 }
157
Tom Stellardd93a34f2016-02-22 19:17:56 +0000158 bool isInlinableImm() const {
159 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
160 immediates are inlinable (e.g. "clamp" attribute is not) */ )
161 return false;
162 // TODO: We should avoid using host float here. It would be better to
163 // check the float bit values which is what a few other places do.
164 // We've had bot failures before due to weird NaN support on mips hosts.
165 const float F = BitsToFloat(Imm.Val);
166 // TODO: Add 1/(2*pi) for VI
167 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000168 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000169 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000170 }
171
172 bool isDSOffset0() const {
173 assert(isImm());
174 return Imm.Type == ImmTyDSOffset0;
175 }
176
177 bool isDSOffset1() const {
178 assert(isImm());
179 return Imm.Type == ImmTyDSOffset1;
180 }
181
182 int64_t getImm() const {
183 return Imm.Val;
184 }
185
186 enum ImmTy getImmTy() const {
187 assert(isImm());
188 return Imm.Type;
189 }
190
191 bool isRegKind() const {
192 return Kind == Register;
193 }
194
195 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000196 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 }
198
Tom Stellardd93a34f2016-02-22 19:17:56 +0000199 bool isRegOrImmWithInputMods() const {
200 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000201 }
202
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000203 bool isImmTy(ImmTy ImmT) const {
204 return isImm() && Imm.Type == ImmT;
205 }
206
Tom Stellarda90b9522016-02-11 03:28:15 +0000207 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000208 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000209 }
210
211 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000212 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000213 }
214
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000215 bool isImmModifier() const {
216 return Kind == Immediate && Imm.Type != ImmTyNone;
217 }
218
219 bool isDMask() const {
220 return isImmTy(ImmTyDMask);
221 }
222
223 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
224 bool isDA() const { return isImmTy(ImmTyDA); }
225 bool isR128() const { return isImmTy(ImmTyUNorm); }
226 bool isLWE() const { return isImmTy(ImmTyLWE); }
227
Tom Stellarda90b9522016-02-11 03:28:15 +0000228 bool isMod() const {
229 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000230 }
231
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000232 bool isGDS() const { return isImmTy(ImmTyGDS); }
233 bool isGLC() const { return isImmTy(ImmTyGLC); }
234 bool isSLC() const { return isImmTy(ImmTySLC); }
235 bool isTFE() const { return isImmTy(ImmTyTFE); }
236
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000238 assert(isReg() || (isImm() && Imm.Modifiers == 0));
239 if (isReg())
240 Reg.Modifiers = Mods;
241 else
242 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000243 }
244
245 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000246 assert(isRegKind() || isImm());
247 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000248 }
249
250 unsigned getReg() const override {
251 return Reg.RegNo;
252 }
253
254 bool isRegOrImm() const {
255 return isReg() || isImm();
256 }
257
258 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000259 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000260 }
261
262 bool isSCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000263 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000264 }
265
Matt Arsenault86d336e2015-09-08 21:15:00 +0000266 bool isSCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000267 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
268 }
269
270 bool isSSrc32() const {
271 return isImm() || isSCSrc32();
272 }
273
274 bool isSSrc64() const {
275 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
276 // See isVSrc64().
277 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000278 }
279
Tom Stellard45bb48e2015-06-13 03:28:10 +0000280 bool isVCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000281 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000282 }
283
284 bool isVCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000285 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000286 }
287
288 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000289 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000290 }
291
292 bool isVSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000293 // TODO: Check if the 64-bit value (coming from assembly source) can be
294 // narrowed to 32 bits (in the instruction stream). That require knowledge
295 // of instruction type (unsigned/signed, floating or "untyped"/B64),
296 // see [AMD GCN3 ISA 6.3.1].
297 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
298 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000299 }
300
301 bool isMem() const override {
302 return false;
303 }
304
305 bool isExpr() const {
306 return Kind == Expression;
307 }
308
309 bool isSoppBrTarget() const {
310 return isExpr() || isImm();
311 }
312
313 SMLoc getStartLoc() const override {
314 return StartLoc;
315 }
316
317 SMLoc getEndLoc() const override {
318 return EndLoc;
319 }
320
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000321 void print(raw_ostream &OS) const override {
322 switch (Kind) {
323 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000324 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000325 break;
326 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000327 if (Imm.Type != AMDGPUOperand::ImmTyNone)
328 OS << getImm();
329 else
330 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000331 break;
332 case Token:
333 OS << '\'' << getToken() << '\'';
334 break;
335 case Expression:
336 OS << "<expr " << *Expr << '>';
337 break;
338 }
339 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000340
341 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
342 enum ImmTy Type = ImmTyNone,
343 bool IsFPImm = false) {
344 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
345 Op->Imm.Val = Val;
346 Op->Imm.IsFPImm = IsFPImm;
347 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000348 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000349 Op->StartLoc = Loc;
350 Op->EndLoc = Loc;
351 return Op;
352 }
353
354 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
355 bool HasExplicitEncodingSize = true) {
356 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
357 Res->Tok.Data = Str.data();
358 Res->Tok.Length = Str.size();
359 Res->StartLoc = Loc;
360 Res->EndLoc = Loc;
361 return Res;
362 }
363
364 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
365 SMLoc E,
366 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000367 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000368 bool ForceVOP3) {
369 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
370 Op->Reg.RegNo = RegNo;
371 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000372 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000373 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000374 Op->Reg.IsForcedVOP3 = ForceVOP3;
375 Op->StartLoc = S;
376 Op->EndLoc = E;
377 return Op;
378 }
379
380 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
381 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
382 Op->Expr = Expr;
383 Op->StartLoc = S;
384 Op->EndLoc = S;
385 return Op;
386 }
387
388 bool isDSOffset() const;
389 bool isDSOffset01() const;
390 bool isSWaitCnt() const;
391 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000392 bool isSMRDOffset() const;
393 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000394};
395
396class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000397 const MCInstrInfo &MII;
398 MCAsmParser &Parser;
399
400 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000401
Matt Arsenault3b159672015-12-01 20:31:08 +0000402 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000403 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000404 }
405
406 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000407 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000408 }
409
Matt Arsenault68802d32015-11-05 03:11:27 +0000410 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000411 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000412 }
413
414 bool hasSGPR102_SGPR103() const {
415 return !isVI();
416 }
417
Tom Stellard45bb48e2015-06-13 03:28:10 +0000418 /// @name Auto-generated Match Functions
419 /// {
420
421#define GET_ASSEMBLER_HEADER
422#include "AMDGPUGenAsmMatcher.inc"
423
424 /// }
425
Tom Stellard347ac792015-06-26 21:15:07 +0000426private:
427 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
428 bool ParseDirectiveHSACodeObjectVersion();
429 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000430 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
431 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000432 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000433 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000434 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000435 bool ParseDirectiveAMDGPUHsaModuleGlobal();
436 bool ParseDirectiveAMDGPUHsaProgramGlobal();
437 bool ParseSectionDirectiveHSADataGlobalAgent();
438 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000439 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000440
Tom Stellard45bb48e2015-06-13 03:28:10 +0000441public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000442public:
443 enum AMDGPUMatchResultTy {
444 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
445 };
446
Akira Hatanakab11ef082015-11-14 06:35:56 +0000447 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000448 const MCInstrInfo &MII,
449 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000450 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000451 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000452 MCAsmParserExtension::Initialize(Parser);
453
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000454 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000455 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000456 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000457 }
458
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000459 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000460 }
461
Tom Stellard347ac792015-06-26 21:15:07 +0000462 AMDGPUTargetStreamer &getTargetStreamer() {
463 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
464 return static_cast<AMDGPUTargetStreamer &>(TS);
465 }
466
Tom Stellard45bb48e2015-06-13 03:28:10 +0000467 unsigned getForcedEncodingSize() const {
468 return ForcedEncodingSize;
469 }
470
471 void setForcedEncodingSize(unsigned Size) {
472 ForcedEncodingSize = Size;
473 }
474
475 bool isForcedVOP3() const {
476 return ForcedEncodingSize == 64;
477 }
478
479 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
480 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
481 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
482 OperandVector &Operands, MCStreamer &Out,
483 uint64_t &ErrorInfo,
484 bool MatchingInlineAsm) override;
485 bool ParseDirective(AsmToken DirectiveID) override;
486 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
487 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
488 SMLoc NameLoc, OperandVector &Operands) override;
489
490 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
491 int64_t Default = 0);
492 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
493 OperandVector &Operands,
494 enum AMDGPUOperand::ImmTy ImmTy =
495 AMDGPUOperand::ImmTyNone);
496 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
497 enum AMDGPUOperand::ImmTy ImmTy =
498 AMDGPUOperand::ImmTyNone);
499 OperandMatchResultTy parseOptionalOps(
500 const ArrayRef<OptionalOperand> &OptionalOps,
501 OperandVector &Operands);
502
503
504 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
505 void cvtDS(MCInst &Inst, const OperandVector &Operands);
506 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
507 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
508 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
509
510 bool parseCnt(int64_t &IntVal);
511 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
512 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
513
514 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
515 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
516 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000517 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000518
519 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
520 OperandMatchResultTy parseOffset(OperandVector &Operands);
521 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
522 OperandMatchResultTy parseGLC(OperandVector &Operands);
523 OperandMatchResultTy parseSLC(OperandVector &Operands);
524 OperandMatchResultTy parseTFE(OperandVector &Operands);
525
526 OperandMatchResultTy parseDMask(OperandVector &Operands);
527 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000528 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000529 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000530 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000531
Tom Stellarda90b9522016-02-11 03:28:15 +0000532 void cvtId(MCInst &Inst, const OperandVector &Operands);
533 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
534 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
535 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000536 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000537
538 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000539 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000540 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
541};
542
543struct OptionalOperand {
544 const char *Name;
545 AMDGPUOperand::ImmTy Type;
546 bool IsBit;
547 int64_t Default;
548 bool (*ConvertResult)(int64_t&);
549};
550
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000551}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000552
Matt Arsenault967c2f52015-11-03 22:50:32 +0000553static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 if (IsVgpr) {
555 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000556 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000557 case 1: return AMDGPU::VGPR_32RegClassID;
558 case 2: return AMDGPU::VReg_64RegClassID;
559 case 3: return AMDGPU::VReg_96RegClassID;
560 case 4: return AMDGPU::VReg_128RegClassID;
561 case 8: return AMDGPU::VReg_256RegClassID;
562 case 16: return AMDGPU::VReg_512RegClassID;
563 }
564 }
565
566 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000567 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000568 case 1: return AMDGPU::SGPR_32RegClassID;
569 case 2: return AMDGPU::SGPR_64RegClassID;
570 case 4: return AMDGPU::SReg_128RegClassID;
571 case 8: return AMDGPU::SReg_256RegClassID;
572 case 16: return AMDGPU::SReg_512RegClassID;
573 }
574}
575
Craig Topper4e9b03d62015-09-21 00:18:00 +0000576static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577
578 return StringSwitch<unsigned>(RegName)
579 .Case("exec", AMDGPU::EXEC)
580 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000581 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000582 .Case("m0", AMDGPU::M0)
583 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000584 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
585 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000586 .Case("vcc_lo", AMDGPU::VCC_LO)
587 .Case("vcc_hi", AMDGPU::VCC_HI)
588 .Case("exec_lo", AMDGPU::EXEC_LO)
589 .Case("exec_hi", AMDGPU::EXEC_HI)
590 .Default(0);
591}
592
593bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
594 const AsmToken Tok = Parser.getTok();
595 StartLoc = Tok.getLoc();
596 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000597 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
598
Matt Arsenault57116cc2015-09-10 21:51:15 +0000599 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000600 RegNo = getRegForName(RegName);
601
602 if (RegNo) {
603 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000604 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000605 }
606
607 // Match vgprs and sgprs
608 if (RegName[0] != 's' && RegName[0] != 'v')
609 return true;
610
611 bool IsVgpr = RegName[0] == 'v';
612 unsigned RegWidth;
613 unsigned RegIndexInClass;
614 if (RegName.size() > 1) {
615 // We have a 32-bit register
616 RegWidth = 1;
617 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
618 return true;
619 Parser.Lex();
620 } else {
621 // We have a register greater than 32-bits.
622
623 int64_t RegLo, RegHi;
624 Parser.Lex();
625 if (getLexer().isNot(AsmToken::LBrac))
626 return true;
627
628 Parser.Lex();
629 if (getParser().parseAbsoluteExpression(RegLo))
630 return true;
631
632 if (getLexer().isNot(AsmToken::Colon))
633 return true;
634
635 Parser.Lex();
636 if (getParser().parseAbsoluteExpression(RegHi))
637 return true;
638
639 if (getLexer().isNot(AsmToken::RBrac))
640 return true;
641
642 Parser.Lex();
643 RegWidth = (RegHi - RegLo) + 1;
644 if (IsVgpr) {
645 // VGPR registers aren't aligned.
646 RegIndexInClass = RegLo;
647 } else {
648 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000649 unsigned Size = std::min(RegWidth, 4u);
650 if (RegLo % Size != 0)
651 return true;
652
653 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000654 }
655 }
656
Matt Arsenault967c2f52015-11-03 22:50:32 +0000657 int RCID = getRegClass(IsVgpr, RegWidth);
658 if (RCID == -1)
659 return true;
660
661 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000662 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000663 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000664
665 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000666 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000667}
668
669unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
670
671 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
672
673 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
674 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
675 return Match_InvalidOperand;
676
Tom Stellard88e0b252015-10-06 15:57:53 +0000677 if ((TSFlags & SIInstrFlags::VOP3) &&
678 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
679 getForcedEncodingSize() != 64)
680 return Match_PreferE32;
681
Tom Stellard45bb48e2015-06-13 03:28:10 +0000682 return Match_Success;
683}
684
685
686bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
687 OperandVector &Operands,
688 MCStreamer &Out,
689 uint64_t &ErrorInfo,
690 bool MatchingInlineAsm) {
691 MCInst Inst;
692
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000693 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000694 default: break;
695 case Match_Success:
696 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000697 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000698 return false;
699 case Match_MissingFeature:
700 return Error(IDLoc, "instruction not supported on this GPU");
701
702 case Match_MnemonicFail:
703 return Error(IDLoc, "unrecognized instruction mnemonic");
704
705 case Match_InvalidOperand: {
706 SMLoc ErrorLoc = IDLoc;
707 if (ErrorInfo != ~0ULL) {
708 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000709 return Error(IDLoc, "too few operands for instruction");
710 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000711 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
712 if (ErrorLoc == SMLoc())
713 ErrorLoc = IDLoc;
714 }
715 return Error(ErrorLoc, "invalid operand for instruction");
716 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000717 case Match_PreferE32:
718 return Error(IDLoc, "internal error: instruction without _e64 suffix "
719 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000720 }
721 llvm_unreachable("Implement any new match types added!");
722}
723
Tom Stellard347ac792015-06-26 21:15:07 +0000724bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
725 uint32_t &Minor) {
726 if (getLexer().isNot(AsmToken::Integer))
727 return TokError("invalid major version");
728
729 Major = getLexer().getTok().getIntVal();
730 Lex();
731
732 if (getLexer().isNot(AsmToken::Comma))
733 return TokError("minor version number required, comma expected");
734 Lex();
735
736 if (getLexer().isNot(AsmToken::Integer))
737 return TokError("invalid minor version");
738
739 Minor = getLexer().getTok().getIntVal();
740 Lex();
741
742 return false;
743}
744
745bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
746
747 uint32_t Major;
748 uint32_t Minor;
749
750 if (ParseDirectiveMajorMinor(Major, Minor))
751 return true;
752
753 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
754 return false;
755}
756
757bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
758
759 uint32_t Major;
760 uint32_t Minor;
761 uint32_t Stepping;
762 StringRef VendorName;
763 StringRef ArchName;
764
765 // If this directive has no arguments, then use the ISA version for the
766 // targeted GPU.
767 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000768 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000769 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
770 Isa.Stepping,
771 "AMD", "AMDGPU");
772 return false;
773 }
774
775
776 if (ParseDirectiveMajorMinor(Major, Minor))
777 return true;
778
779 if (getLexer().isNot(AsmToken::Comma))
780 return TokError("stepping version number required, comma expected");
781 Lex();
782
783 if (getLexer().isNot(AsmToken::Integer))
784 return TokError("invalid stepping version");
785
786 Stepping = getLexer().getTok().getIntVal();
787 Lex();
788
789 if (getLexer().isNot(AsmToken::Comma))
790 return TokError("vendor name required, comma expected");
791 Lex();
792
793 if (getLexer().isNot(AsmToken::String))
794 return TokError("invalid vendor name");
795
796 VendorName = getLexer().getTok().getStringContents();
797 Lex();
798
799 if (getLexer().isNot(AsmToken::Comma))
800 return TokError("arch name required, comma expected");
801 Lex();
802
803 if (getLexer().isNot(AsmToken::String))
804 return TokError("invalid arch name");
805
806 ArchName = getLexer().getTok().getStringContents();
807 Lex();
808
809 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
810 VendorName, ArchName);
811 return false;
812}
813
Tom Stellardff7416b2015-06-26 21:58:31 +0000814bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
815 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +0000816 SmallString<40> ErrStr;
817 raw_svector_ostream Err(ErrStr);
818 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
819 return TokError(Err.str());
820 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000821 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +0000822 return false;
823}
824
825bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
826
827 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000828 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000829
830 while (true) {
831
832 if (getLexer().isNot(AsmToken::EndOfStatement))
833 return TokError("amd_kernel_code_t values must begin on a new line");
834
835 // Lex EndOfStatement. This is in a while loop, because lexing a comment
836 // will set the current token to EndOfStatement.
837 while(getLexer().is(AsmToken::EndOfStatement))
838 Lex();
839
840 if (getLexer().isNot(AsmToken::Identifier))
841 return TokError("expected value identifier or .end_amd_kernel_code_t");
842
843 StringRef ID = getLexer().getTok().getIdentifier();
844 Lex();
845
846 if (ID == ".end_amd_kernel_code_t")
847 break;
848
849 if (ParseAMDKernelCodeTValue(ID, Header))
850 return true;
851 }
852
853 getTargetStreamer().EmitAMDKernelCodeT(Header);
854
855 return false;
856}
857
Tom Stellarde135ffd2015-09-25 21:41:28 +0000858bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
859 getParser().getStreamer().SwitchSection(
860 AMDGPU::getHSATextSection(getContext()));
861 return false;
862}
863
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000864bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
865 if (getLexer().isNot(AsmToken::Identifier))
866 return TokError("expected symbol name");
867
868 StringRef KernelName = Parser.getTok().getString();
869
870 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
871 ELF::STT_AMDGPU_HSA_KERNEL);
872 Lex();
873 return false;
874}
875
Tom Stellard00f2f912015-12-02 19:47:57 +0000876bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
877 if (getLexer().isNot(AsmToken::Identifier))
878 return TokError("expected symbol name");
879
880 StringRef GlobalName = Parser.getTok().getIdentifier();
881
882 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
883 Lex();
884 return false;
885}
886
887bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
888 if (getLexer().isNot(AsmToken::Identifier))
889 return TokError("expected symbol name");
890
891 StringRef GlobalName = Parser.getTok().getIdentifier();
892
893 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
894 Lex();
895 return false;
896}
897
898bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
899 getParser().getStreamer().SwitchSection(
900 AMDGPU::getHSADataGlobalAgentSection(getContext()));
901 return false;
902}
903
904bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
905 getParser().getStreamer().SwitchSection(
906 AMDGPU::getHSADataGlobalProgramSection(getContext()));
907 return false;
908}
909
Tom Stellard9760f032015-12-03 03:34:32 +0000910bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
911 getParser().getStreamer().SwitchSection(
912 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
913 return false;
914}
915
Tom Stellard45bb48e2015-06-13 03:28:10 +0000916bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000917 StringRef IDVal = DirectiveID.getString();
918
919 if (IDVal == ".hsa_code_object_version")
920 return ParseDirectiveHSACodeObjectVersion();
921
922 if (IDVal == ".hsa_code_object_isa")
923 return ParseDirectiveHSACodeObjectISA();
924
Tom Stellardff7416b2015-06-26 21:58:31 +0000925 if (IDVal == ".amd_kernel_code_t")
926 return ParseDirectiveAMDKernelCodeT();
927
Tom Stellarde135ffd2015-09-25 21:41:28 +0000928 if (IDVal == ".hsatext" || IDVal == ".text")
929 return ParseSectionDirectiveHSAText();
930
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000931 if (IDVal == ".amdgpu_hsa_kernel")
932 return ParseDirectiveAMDGPUHsaKernel();
933
Tom Stellard00f2f912015-12-02 19:47:57 +0000934 if (IDVal == ".amdgpu_hsa_module_global")
935 return ParseDirectiveAMDGPUHsaModuleGlobal();
936
937 if (IDVal == ".amdgpu_hsa_program_global")
938 return ParseDirectiveAMDGPUHsaProgramGlobal();
939
940 if (IDVal == ".hsadata_global_agent")
941 return ParseSectionDirectiveHSADataGlobalAgent();
942
943 if (IDVal == ".hsadata_global_program")
944 return ParseSectionDirectiveHSADataGlobalProgram();
945
Tom Stellard9760f032015-12-03 03:34:32 +0000946 if (IDVal == ".hsarodata_readonly_agent")
947 return ParseSectionDirectiveHSARodataReadonlyAgent();
948
Tom Stellard45bb48e2015-06-13 03:28:10 +0000949 return true;
950}
951
Matt Arsenault68802d32015-11-05 03:11:27 +0000952bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
953 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +0000954 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +0000955 return true;
956
Matt Arsenault3b159672015-12-01 20:31:08 +0000957 if (isSI()) {
958 // No flat_scr
959 switch (RegNo) {
960 case AMDGPU::FLAT_SCR:
961 case AMDGPU::FLAT_SCR_LO:
962 case AMDGPU::FLAT_SCR_HI:
963 return false;
964 default:
965 return true;
966 }
967 }
968
Matt Arsenault68802d32015-11-05 03:11:27 +0000969 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
970 // SI/CI have.
971 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
972 R.isValid(); ++R) {
973 if (*R == RegNo)
974 return false;
975 }
976
977 return true;
978}
979
Tom Stellard45bb48e2015-06-13 03:28:10 +0000980static bool operandsHaveModifiers(const OperandVector &Operands) {
981
982 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
983 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
984 if (Op.isRegKind() && Op.hasModifiers())
985 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000986 if (Op.isImm() && Op.hasModifiers())
987 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000988 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
989 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
990 return true;
991 }
992 return false;
993}
994
995AMDGPUAsmParser::OperandMatchResultTy
996AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
997
998 // Try to parse with a custom parser
999 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1000
1001 // If we successfully parsed the operand or if there as an error parsing,
1002 // we are done.
1003 //
1004 // If we are parsing after we reach EndOfStatement then this means we
1005 // are appending default values to the Operands list. This is only done
1006 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001007 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001008 getLexer().is(AsmToken::EndOfStatement))
1009 return ResTy;
1010
1011 bool Negate = false, Abs = false;
1012 if (getLexer().getKind()== AsmToken::Minus) {
1013 Parser.Lex();
1014 Negate = true;
1015 }
1016
1017 if (getLexer().getKind() == AsmToken::Pipe) {
1018 Parser.Lex();
1019 Abs = true;
1020 }
1021
1022 switch(getLexer().getKind()) {
1023 case AsmToken::Integer: {
1024 SMLoc S = Parser.getTok().getLoc();
1025 int64_t IntVal;
1026 if (getParser().parseAbsoluteExpression(IntVal))
1027 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001028 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001029 Error(S, "invalid immediate: only 32-bit values are legal");
1030 return MatchOperand_ParseFail;
1031 }
1032
Tom Stellard45bb48e2015-06-13 03:28:10 +00001033 if (Negate)
1034 IntVal *= -1;
1035 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1036 return MatchOperand_Success;
1037 }
1038 case AsmToken::Real: {
1039 // FIXME: We should emit an error if a double precisions floating-point
1040 // value is used. I'm not sure the best way to detect this.
1041 SMLoc S = Parser.getTok().getLoc();
1042 int64_t IntVal;
1043 if (getParser().parseAbsoluteExpression(IntVal))
1044 return MatchOperand_ParseFail;
1045
1046 APFloat F((float)BitsToDouble(IntVal));
1047 if (Negate)
1048 F.changeSign();
1049 Operands.push_back(
1050 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1051 return MatchOperand_Success;
1052 }
1053 case AsmToken::Identifier: {
1054 SMLoc S, E;
1055 unsigned RegNo;
1056 if (!ParseRegister(RegNo, S, E)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001057 unsigned Modifiers = 0;
1058
1059 if (Negate)
1060 Modifiers |= 0x1;
1061
1062 if (Abs) {
1063 if (getLexer().getKind() != AsmToken::Pipe)
1064 return MatchOperand_ParseFail;
1065 Parser.Lex();
1066 Modifiers |= 0x2;
1067 }
1068
Tom Stellard45bb48e2015-06-13 03:28:10 +00001069 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard2b65ed32015-12-21 18:44:27 +00001070 RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
Tom Stellard45bb48e2015-06-13 03:28:10 +00001071 isForcedVOP3()));
1072
Tom Stellarda90b9522016-02-11 03:28:15 +00001073 if (Modifiers) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001074 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1075 RegOp.setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001076 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001077 } else {
1078 ResTy = parseVOP3OptionalOps(Operands);
1079 if (ResTy == MatchOperand_NoMatch) {
1080 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1081 S));
1082 Parser.Lex();
1083 }
1084 }
1085 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001086 }
1087 default:
1088 return MatchOperand_NoMatch;
1089 }
1090}
1091
1092bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1093 StringRef Name,
1094 SMLoc NameLoc, OperandVector &Operands) {
1095
1096 // Clear any forced encodings from the previous instruction.
1097 setForcedEncodingSize(0);
1098
1099 if (Name.endswith("_e64"))
1100 setForcedEncodingSize(64);
1101 else if (Name.endswith("_e32"))
1102 setForcedEncodingSize(32);
1103
1104 // Add the instruction mnemonic
1105 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1106
1107 while (!getLexer().is(AsmToken::EndOfStatement)) {
1108 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1109
1110 // Eat the comma or space if there is one.
1111 if (getLexer().is(AsmToken::Comma))
1112 Parser.Lex();
1113
1114 switch (Res) {
1115 case MatchOperand_Success: break;
1116 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1117 "failed parsing operand.");
1118 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1119 "not a valid operand.");
1120 }
1121 }
1122
Tom Stellard45bb48e2015-06-13 03:28:10 +00001123 return false;
1124}
1125
1126//===----------------------------------------------------------------------===//
1127// Utility functions
1128//===----------------------------------------------------------------------===//
1129
1130AMDGPUAsmParser::OperandMatchResultTy
1131AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1132 int64_t Default) {
1133
1134 // We are at the end of the statement, and this is a default argument, so
1135 // use a default value.
1136 if (getLexer().is(AsmToken::EndOfStatement)) {
1137 Int = Default;
1138 return MatchOperand_Success;
1139 }
1140
1141 switch(getLexer().getKind()) {
1142 default: return MatchOperand_NoMatch;
1143 case AsmToken::Identifier: {
1144 StringRef OffsetName = Parser.getTok().getString();
1145 if (!OffsetName.equals(Prefix))
1146 return MatchOperand_NoMatch;
1147
1148 Parser.Lex();
1149 if (getLexer().isNot(AsmToken::Colon))
1150 return MatchOperand_ParseFail;
1151
1152 Parser.Lex();
1153 if (getLexer().isNot(AsmToken::Integer))
1154 return MatchOperand_ParseFail;
1155
1156 if (getParser().parseAbsoluteExpression(Int))
1157 return MatchOperand_ParseFail;
1158 break;
1159 }
1160 }
1161 return MatchOperand_Success;
1162}
1163
1164AMDGPUAsmParser::OperandMatchResultTy
1165AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1166 enum AMDGPUOperand::ImmTy ImmTy) {
1167
1168 SMLoc S = Parser.getTok().getLoc();
1169 int64_t Offset = 0;
1170
1171 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1172 if (Res != MatchOperand_Success)
1173 return Res;
1174
1175 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1176 return MatchOperand_Success;
1177}
1178
1179AMDGPUAsmParser::OperandMatchResultTy
1180AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1181 enum AMDGPUOperand::ImmTy ImmTy) {
1182 int64_t Bit = 0;
1183 SMLoc S = Parser.getTok().getLoc();
1184
1185 // We are at the end of the statement, and this is a default argument, so
1186 // use a default value.
1187 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1188 switch(getLexer().getKind()) {
1189 case AsmToken::Identifier: {
1190 StringRef Tok = Parser.getTok().getString();
1191 if (Tok == Name) {
1192 Bit = 1;
1193 Parser.Lex();
1194 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1195 Bit = 0;
1196 Parser.Lex();
1197 } else {
1198 return MatchOperand_NoMatch;
1199 }
1200 break;
1201 }
1202 default:
1203 return MatchOperand_NoMatch;
1204 }
1205 }
1206
1207 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1208 return MatchOperand_Success;
1209}
1210
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001211typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1212
1213void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1214 auto i = OptionalIdx.find(ImmT);
1215 if (i != OptionalIdx.end()) {
1216 unsigned Idx = i->second;
1217 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1218 } else {
1219 Inst.addOperand(MCOperand::createImm(0));
1220 }
1221}
1222
Tom Stellard45bb48e2015-06-13 03:28:10 +00001223static bool operandsHasOptionalOp(const OperandVector &Operands,
1224 const OptionalOperand &OOp) {
1225 for (unsigned i = 0; i < Operands.size(); i++) {
1226 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1227 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1228 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1229 return true;
1230
1231 }
1232 return false;
1233}
1234
1235AMDGPUAsmParser::OperandMatchResultTy
1236AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1237 OperandVector &Operands) {
1238 SMLoc S = Parser.getTok().getLoc();
1239 for (const OptionalOperand &Op : OptionalOps) {
1240 if (operandsHasOptionalOp(Operands, Op))
1241 continue;
1242 AMDGPUAsmParser::OperandMatchResultTy Res;
1243 int64_t Value;
1244 if (Op.IsBit) {
1245 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1246 if (Res == MatchOperand_NoMatch)
1247 continue;
1248 return Res;
1249 }
1250
1251 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1252
1253 if (Res == MatchOperand_NoMatch)
1254 continue;
1255
1256 if (Res != MatchOperand_Success)
1257 return Res;
1258
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001259 bool DefaultValue = (Value == Op.Default);
1260
Tom Stellard45bb48e2015-06-13 03:28:10 +00001261 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1262 return MatchOperand_ParseFail;
1263 }
1264
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001265 if (!DefaultValue) {
1266 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1267 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001268 return MatchOperand_Success;
1269 }
1270 return MatchOperand_NoMatch;
1271}
1272
1273//===----------------------------------------------------------------------===//
1274// ds
1275//===----------------------------------------------------------------------===//
1276
1277static const OptionalOperand DSOptionalOps [] = {
1278 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1279 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1280};
1281
1282static const OptionalOperand DSOptionalOpsOff01 [] = {
1283 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1284 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1285 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1286};
1287
1288AMDGPUAsmParser::OperandMatchResultTy
1289AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1290 return parseOptionalOps(DSOptionalOps, Operands);
1291}
1292AMDGPUAsmParser::OperandMatchResultTy
1293AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1294 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1295}
1296
1297AMDGPUAsmParser::OperandMatchResultTy
1298AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1299 SMLoc S = Parser.getTok().getLoc();
1300 AMDGPUAsmParser::OperandMatchResultTy Res =
1301 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1302 if (Res == MatchOperand_NoMatch) {
1303 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1304 AMDGPUOperand::ImmTyOffset));
1305 Res = MatchOperand_Success;
1306 }
1307 return Res;
1308}
1309
1310bool AMDGPUOperand::isDSOffset() const {
1311 return isImm() && isUInt<16>(getImm());
1312}
1313
1314bool AMDGPUOperand::isDSOffset01() const {
1315 return isImm() && isUInt<8>(getImm());
1316}
1317
1318void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1319 const OperandVector &Operands) {
1320
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001321 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001322
1323 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1324 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1325
1326 // Add the register arguments
1327 if (Op.isReg()) {
1328 Op.addRegOperands(Inst, 1);
1329 continue;
1330 }
1331
1332 // Handle optional arguments
1333 OptionalIdx[Op.getImmTy()] = i;
1334 }
1335
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001336 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1337 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1338 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001339
Tom Stellard45bb48e2015-06-13 03:28:10 +00001340 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1341}
1342
1343void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1344
1345 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1346 bool GDSOnly = false;
1347
1348 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1349 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1350
1351 // Add the register arguments
1352 if (Op.isReg()) {
1353 Op.addRegOperands(Inst, 1);
1354 continue;
1355 }
1356
1357 if (Op.isToken() && Op.getToken() == "gds") {
1358 GDSOnly = true;
1359 continue;
1360 }
1361
1362 // Handle optional arguments
1363 OptionalIdx[Op.getImmTy()] = i;
1364 }
1365
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001366 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1367 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001368
1369 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001370 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001371 }
1372 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1373}
1374
1375
1376//===----------------------------------------------------------------------===//
1377// s_waitcnt
1378//===----------------------------------------------------------------------===//
1379
1380bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1381 StringRef CntName = Parser.getTok().getString();
1382 int64_t CntVal;
1383
1384 Parser.Lex();
1385 if (getLexer().isNot(AsmToken::LParen))
1386 return true;
1387
1388 Parser.Lex();
1389 if (getLexer().isNot(AsmToken::Integer))
1390 return true;
1391
1392 if (getParser().parseAbsoluteExpression(CntVal))
1393 return true;
1394
1395 if (getLexer().isNot(AsmToken::RParen))
1396 return true;
1397
1398 Parser.Lex();
1399 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1400 Parser.Lex();
1401
1402 int CntShift;
1403 int CntMask;
1404
1405 if (CntName == "vmcnt") {
1406 CntMask = 0xf;
1407 CntShift = 0;
1408 } else if (CntName == "expcnt") {
1409 CntMask = 0x7;
1410 CntShift = 4;
1411 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001412 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001413 CntShift = 8;
1414 } else {
1415 return true;
1416 }
1417
1418 IntVal &= ~(CntMask << CntShift);
1419 IntVal |= (CntVal << CntShift);
1420 return false;
1421}
1422
1423AMDGPUAsmParser::OperandMatchResultTy
1424AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1425 // Disable all counters by default.
1426 // vmcnt [3:0]
1427 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001428 // lgkmcnt [11:8]
1429 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001430 SMLoc S = Parser.getTok().getLoc();
1431
1432 switch(getLexer().getKind()) {
1433 default: return MatchOperand_ParseFail;
1434 case AsmToken::Integer:
1435 // The operand can be an integer value.
1436 if (getParser().parseAbsoluteExpression(CntVal))
1437 return MatchOperand_ParseFail;
1438 break;
1439
1440 case AsmToken::Identifier:
1441 do {
1442 if (parseCnt(CntVal))
1443 return MatchOperand_ParseFail;
1444 } while(getLexer().isNot(AsmToken::EndOfStatement));
1445 break;
1446 }
1447 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1448 return MatchOperand_Success;
1449}
1450
1451bool AMDGPUOperand::isSWaitCnt() const {
1452 return isImm();
1453}
1454
1455//===----------------------------------------------------------------------===//
1456// sopp branch targets
1457//===----------------------------------------------------------------------===//
1458
1459AMDGPUAsmParser::OperandMatchResultTy
1460AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1461 SMLoc S = Parser.getTok().getLoc();
1462
1463 switch (getLexer().getKind()) {
1464 default: return MatchOperand_ParseFail;
1465 case AsmToken::Integer: {
1466 int64_t Imm;
1467 if (getParser().parseAbsoluteExpression(Imm))
1468 return MatchOperand_ParseFail;
1469 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1470 return MatchOperand_Success;
1471 }
1472
1473 case AsmToken::Identifier:
1474 Operands.push_back(AMDGPUOperand::CreateExpr(
1475 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1476 Parser.getTok().getString()), getContext()), S));
1477 Parser.Lex();
1478 return MatchOperand_Success;
1479 }
1480}
1481
1482//===----------------------------------------------------------------------===//
1483// flat
1484//===----------------------------------------------------------------------===//
1485
1486static const OptionalOperand FlatOptionalOps [] = {
1487 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1488 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1489 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1490};
1491
1492static const OptionalOperand FlatAtomicOptionalOps [] = {
1493 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1494 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1495};
1496
1497AMDGPUAsmParser::OperandMatchResultTy
1498AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1499 return parseOptionalOps(FlatOptionalOps, Operands);
1500}
1501
1502AMDGPUAsmParser::OperandMatchResultTy
1503AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1504 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1505}
1506
1507void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1508 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001509 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001510
1511 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1512 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1513
1514 // Add the register arguments
1515 if (Op.isReg()) {
1516 Op.addRegOperands(Inst, 1);
1517 continue;
1518 }
1519
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001520 OptionalIdx[Op.getImmTy()] = i;
1521 }
1522 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1523 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1524 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1525}
1526
1527
1528void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1529 const OperandVector &Operands) {
1530 OptionalImmIndexMap OptionalIdx;
1531
1532 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1533 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1534
1535 // Add the register arguments
1536 if (Op.isReg()) {
1537 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001538 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001539 }
1540
1541 // Handle 'glc' token for flat atomics.
1542 if (Op.isToken()) {
1543 continue;
1544 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001545
1546 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001547 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001548 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001549 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1550 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001551}
1552
1553//===----------------------------------------------------------------------===//
1554// mubuf
1555//===----------------------------------------------------------------------===//
1556
1557static const OptionalOperand MubufOptionalOps [] = {
1558 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1559 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1560 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1561 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1562};
1563
1564AMDGPUAsmParser::OperandMatchResultTy
1565AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1566 return parseOptionalOps(MubufOptionalOps, Operands);
1567}
1568
1569AMDGPUAsmParser::OperandMatchResultTy
1570AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1571 return parseIntWithPrefix("offset", Operands);
1572}
1573
1574AMDGPUAsmParser::OperandMatchResultTy
1575AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1576 return parseNamedBit("glc", Operands);
1577}
1578
1579AMDGPUAsmParser::OperandMatchResultTy
1580AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1581 return parseNamedBit("slc", Operands);
1582}
1583
1584AMDGPUAsmParser::OperandMatchResultTy
1585AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1586 return parseNamedBit("tfe", Operands);
1587}
1588
1589bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001590 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001591}
1592
1593void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1594 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001595 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001596
1597 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1598 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1599
1600 // Add the register arguments
1601 if (Op.isReg()) {
1602 Op.addRegOperands(Inst, 1);
1603 continue;
1604 }
1605
1606 // Handle the case where soffset is an immediate
1607 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1608 Op.addImmOperands(Inst, 1);
1609 continue;
1610 }
1611
1612 // Handle tokens like 'offen' which are sometimes hard-coded into the
1613 // asm string. There are no MCInst operands for these.
1614 if (Op.isToken()) {
1615 continue;
1616 }
1617 assert(Op.isImm());
1618
1619 // Handle optional arguments
1620 OptionalIdx[Op.getImmTy()] = i;
1621 }
1622
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001623 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1624 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1625 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1626 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001627}
1628
1629//===----------------------------------------------------------------------===//
1630// mimg
1631//===----------------------------------------------------------------------===//
1632
1633AMDGPUAsmParser::OperandMatchResultTy
1634AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001635 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001636}
1637
1638AMDGPUAsmParser::OperandMatchResultTy
1639AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001640 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1641}
1642
1643AMDGPUAsmParser::OperandMatchResultTy
1644AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1645 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001646}
1647
1648AMDGPUAsmParser::OperandMatchResultTy
1649AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001650 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1651}
1652
1653AMDGPUAsmParser::OperandMatchResultTy
1654AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1655 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001656}
1657
1658//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001659// smrd
1660//===----------------------------------------------------------------------===//
1661
1662bool AMDGPUOperand::isSMRDOffset() const {
1663
1664 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1665 // information here.
1666 return isImm() && isUInt<8>(getImm());
1667}
1668
1669bool AMDGPUOperand::isSMRDLiteralOffset() const {
1670 // 32-bit literals are only supported on CI and we only want to use them
1671 // when the offset is > 8-bits.
1672 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1673}
1674
1675//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001676// vop3
1677//===----------------------------------------------------------------------===//
1678
1679static bool ConvertOmodMul(int64_t &Mul) {
1680 if (Mul != 1 && Mul != 2 && Mul != 4)
1681 return false;
1682
1683 Mul >>= 1;
1684 return true;
1685}
1686
1687static bool ConvertOmodDiv(int64_t &Div) {
1688 if (Div == 1) {
1689 Div = 0;
1690 return true;
1691 }
1692
1693 if (Div == 2) {
1694 Div = 3;
1695 return true;
1696 }
1697
1698 return false;
1699}
1700
1701static const OptionalOperand VOP3OptionalOps [] = {
1702 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1703 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1704 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1705};
1706
1707static bool isVOP3(OperandVector &Operands) {
1708 if (operandsHaveModifiers(Operands))
1709 return true;
1710
Tom Stellarda90b9522016-02-11 03:28:15 +00001711 if (Operands.size() >= 2) {
1712 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001713
Tom Stellarda90b9522016-02-11 03:28:15 +00001714 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1715 return true;
1716 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001717
1718 if (Operands.size() >= 5)
1719 return true;
1720
1721 if (Operands.size() > 3) {
1722 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Benjamin Kramerac5e36f2016-02-12 12:37:21 +00001723 if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1724 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001725 return true;
1726 }
1727 return false;
1728}
1729
1730AMDGPUAsmParser::OperandMatchResultTy
1731AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1732
1733 // The value returned by this function may change after parsing
1734 // an operand so store the original value here.
1735 bool HasModifiers = operandsHaveModifiers(Operands);
1736
1737 bool IsVOP3 = isVOP3(Operands);
1738 if (HasModifiers || IsVOP3 ||
1739 getLexer().isNot(AsmToken::EndOfStatement) ||
1740 getForcedEncodingSize() == 64) {
1741
1742 AMDGPUAsmParser::OperandMatchResultTy Res =
1743 parseOptionalOps(VOP3OptionalOps, Operands);
1744
1745 if (!HasModifiers && Res == MatchOperand_Success) {
1746 // We have added a modifier operation, so we need to make sure all
1747 // previous register operands have modifiers
1748 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1749 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001750 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001751 Op.setModifiers(0);
1752 }
1753 }
1754 return Res;
1755 }
1756 return MatchOperand_NoMatch;
1757}
1758
Tom Stellarda90b9522016-02-11 03:28:15 +00001759void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1760 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001761 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001762 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001763 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1764 }
1765 for (unsigned E = Operands.size(); I != E; ++I)
1766 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1767}
1768
1769void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001770 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1771 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001772 cvtVOP3(Inst, Operands);
1773 } else {
1774 cvtId(Inst, Operands);
1775 }
1776}
1777
1778void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1779 if (operandsHaveModifiers(Operands)) {
1780 cvtVOP3(Inst, Operands);
1781 } else {
1782 cvtId(Inst, Operands);
1783 }
1784}
1785
1786void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1787 cvtVOP3(Inst, Operands);
1788}
1789
1790void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001791 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00001792 unsigned I = 1;
1793 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001794 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001795 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001796 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001797
Tom Stellarda90b9522016-02-11 03:28:15 +00001798 for (unsigned E = Operands.size(); I != E; ++I) {
1799 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001800 if (Op.isRegOrImmWithInputMods()) {
1801 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001802 } else if (Op.isImm()) {
1803 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001804 } else {
1805 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001806 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001807 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001808
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001809 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
1810 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001811}
1812
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001813void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001814 unsigned I = 1;
1815 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1816 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1817 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1818 }
1819
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001820 OptionalImmIndexMap OptionalIdx;
1821
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001822 for (unsigned E = Operands.size(); I != E; ++I) {
1823 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001824
1825 // Add the register arguments
1826 if (Op.isRegOrImm()) {
1827 Op.addRegOrImmOperands(Inst, 1);
1828 continue;
1829 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001830 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001831 } else {
1832 assert(false);
1833 }
1834 }
1835
1836 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1837 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1838 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1839 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1840 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1841 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1842 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1843 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1844}
1845
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001846void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
1847 unsigned I = 1;
1848 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1849 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1850 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1851 }
1852
1853 // Add src, same as dst
1854 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
1855
1856 OptionalImmIndexMap OptionalIdx;
1857
1858 for (unsigned E = Operands.size(); I != E; ++I) {
1859 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1860
1861 // Add the register arguments
1862 if (Op.isRegOrImm()) {
1863 Op.addRegOrImmOperands(Inst, 1);
1864 continue;
1865 } else if (Op.isImmModifier()) {
1866 OptionalIdx[Op.getImmTy()] = I;
1867 } else {
1868 assert(false);
1869 }
1870 }
1871
1872 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1873 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1874 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1875 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1876 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1877 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1878 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1879 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1880}
1881
1882
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001883
Tom Stellard45bb48e2015-06-13 03:28:10 +00001884/// Force static initialization.
1885extern "C" void LLVMInitializeAMDGPUAsmParser() {
1886 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1887 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1888}
1889
1890#define GET_REGISTER_MATCHER
1891#define GET_MATCHER_IMPLEMENTATION
1892#include "AMDGPUGenAsmMatcher.inc"