blob: fa84f1cb261dbf3e64a756611b816722bef6e0fc [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000015#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/SmallString.h"
18#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000028#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000032#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000033#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000070 ImmTyOMod,
71 ImmTyDMask,
72 ImmTyUNorm,
73 ImmTyDA,
74 ImmTyR128,
75 ImmTyLWE,
Tom Stellard45bb48e2015-06-13 03:28:10 +000076 };
77
78 struct TokOp {
79 const char *Data;
80 unsigned Length;
81 };
82
83 struct ImmOp {
84 bool IsFPImm;
85 ImmTy Type;
86 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000087 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000088 };
89
90 struct RegOp {
91 unsigned RegNo;
92 int Modifiers;
93 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000094 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +000095 bool IsForcedVOP3;
96 };
97
98 union {
99 TokOp Tok;
100 ImmOp Imm;
101 RegOp Reg;
102 const MCExpr *Expr;
103 };
104
105 void addImmOperands(MCInst &Inst, unsigned N) const {
106 Inst.addOperand(MCOperand::createImm(getImm()));
107 }
108
109 StringRef getToken() const {
110 return StringRef(Tok.Data, Tok.Length);
111 }
112
113 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000114 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000115 }
116
117 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000118 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000119 addRegOperands(Inst, N);
120 else
121 addImmOperands(Inst, N);
122 }
123
Tom Stellardd93a34f2016-02-22 19:17:56 +0000124 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
125 if (isRegKind()) {
126 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
127 addRegOperands(Inst, N);
128 } else {
129 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
130 addImmOperands(Inst, N);
131 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000132 }
133
134 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
135 if (isImm())
136 addImmOperands(Inst, N);
137 else {
138 assert(isExpr());
139 Inst.addOperand(MCOperand::createExpr(Expr));
140 }
141 }
142
143 bool defaultTokenHasSuffix() const {
144 StringRef Token(Tok.Data, Tok.Length);
145
146 return Token.endswith("_e32") || Token.endswith("_e64");
147 }
148
149 bool isToken() const override {
150 return Kind == Token;
151 }
152
153 bool isImm() const override {
154 return Kind == Immediate;
155 }
156
Tom Stellardd93a34f2016-02-22 19:17:56 +0000157 bool isInlinableImm() const {
158 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
159 immediates are inlinable (e.g. "clamp" attribute is not) */ )
160 return false;
161 // TODO: We should avoid using host float here. It would be better to
162 // check the float bit values which is what a few other places do.
163 // We've had bot failures before due to weird NaN support on mips hosts.
164 const float F = BitsToFloat(Imm.Val);
165 // TODO: Add 1/(2*pi) for VI
166 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000167 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000168 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000169 }
170
171 bool isDSOffset0() const {
172 assert(isImm());
173 return Imm.Type == ImmTyDSOffset0;
174 }
175
176 bool isDSOffset1() const {
177 assert(isImm());
178 return Imm.Type == ImmTyDSOffset1;
179 }
180
181 int64_t getImm() const {
182 return Imm.Val;
183 }
184
185 enum ImmTy getImmTy() const {
186 assert(isImm());
187 return Imm.Type;
188 }
189
190 bool isRegKind() const {
191 return Kind == Register;
192 }
193
194 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000195 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000196 }
197
Tom Stellardd93a34f2016-02-22 19:17:56 +0000198 bool isRegOrImmWithInputMods() const {
199 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000200 }
201
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000202 bool isImmTy(ImmTy ImmT) const {
203 return isImm() && Imm.Type == ImmT;
204 }
205
Tom Stellarda90b9522016-02-11 03:28:15 +0000206 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000207 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000208 }
209
210 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000211 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000212 }
213
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000214 bool isImmModifier() const {
215 return Kind == Immediate && Imm.Type != ImmTyNone;
216 }
217
218 bool isDMask() const {
219 return isImmTy(ImmTyDMask);
220 }
221
222 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
223 bool isDA() const { return isImmTy(ImmTyDA); }
224 bool isR128() const { return isImmTy(ImmTyUNorm); }
225 bool isLWE() const { return isImmTy(ImmTyLWE); }
226
Tom Stellarda90b9522016-02-11 03:28:15 +0000227 bool isMod() const {
228 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000229 }
230
231 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000232 assert(isReg() || (isImm() && Imm.Modifiers == 0));
233 if (isReg())
234 Reg.Modifiers = Mods;
235 else
236 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237 }
238
239 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000240 assert(isRegKind() || isImm());
241 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000242 }
243
244 unsigned getReg() const override {
245 return Reg.RegNo;
246 }
247
248 bool isRegOrImm() const {
249 return isReg() || isImm();
250 }
251
252 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000253 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000254 }
255
256 bool isSCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000257 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000258 }
259
Matt Arsenault86d336e2015-09-08 21:15:00 +0000260 bool isSCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000261 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
262 }
263
264 bool isSSrc32() const {
265 return isImm() || isSCSrc32();
266 }
267
268 bool isSSrc64() const {
269 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
270 // See isVSrc64().
271 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000272 }
273
Tom Stellard45bb48e2015-06-13 03:28:10 +0000274 bool isVCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000275 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000276 }
277
278 bool isVCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000279 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000280 }
281
282 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000283 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000284 }
285
286 bool isVSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000287 // TODO: Check if the 64-bit value (coming from assembly source) can be
288 // narrowed to 32 bits (in the instruction stream). That require knowledge
289 // of instruction type (unsigned/signed, floating or "untyped"/B64),
290 // see [AMD GCN3 ISA 6.3.1].
291 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
292 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000293 }
294
295 bool isMem() const override {
296 return false;
297 }
298
299 bool isExpr() const {
300 return Kind == Expression;
301 }
302
303 bool isSoppBrTarget() const {
304 return isExpr() || isImm();
305 }
306
307 SMLoc getStartLoc() const override {
308 return StartLoc;
309 }
310
311 SMLoc getEndLoc() const override {
312 return EndLoc;
313 }
314
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000315 void print(raw_ostream &OS) const override {
316 switch (Kind) {
317 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000318 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000319 break;
320 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000321 if (Imm.Type != AMDGPUOperand::ImmTyNone)
322 OS << getImm();
323 else
324 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000325 break;
326 case Token:
327 OS << '\'' << getToken() << '\'';
328 break;
329 case Expression:
330 OS << "<expr " << *Expr << '>';
331 break;
332 }
333 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334
335 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
336 enum ImmTy Type = ImmTyNone,
337 bool IsFPImm = false) {
338 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
339 Op->Imm.Val = Val;
340 Op->Imm.IsFPImm = IsFPImm;
341 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000342 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000343 Op->StartLoc = Loc;
344 Op->EndLoc = Loc;
345 return Op;
346 }
347
348 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
349 bool HasExplicitEncodingSize = true) {
350 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
351 Res->Tok.Data = Str.data();
352 Res->Tok.Length = Str.size();
353 Res->StartLoc = Loc;
354 Res->EndLoc = Loc;
355 return Res;
356 }
357
358 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
359 SMLoc E,
360 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000361 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000362 bool ForceVOP3) {
363 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
364 Op->Reg.RegNo = RegNo;
365 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000366 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000367 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000368 Op->Reg.IsForcedVOP3 = ForceVOP3;
369 Op->StartLoc = S;
370 Op->EndLoc = E;
371 return Op;
372 }
373
374 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
375 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
376 Op->Expr = Expr;
377 Op->StartLoc = S;
378 Op->EndLoc = S;
379 return Op;
380 }
381
382 bool isDSOffset() const;
383 bool isDSOffset01() const;
384 bool isSWaitCnt() const;
385 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000386 bool isSMRDOffset() const;
387 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000388};
389
390class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000391 const MCInstrInfo &MII;
392 MCAsmParser &Parser;
393
394 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000395
Matt Arsenault3b159672015-12-01 20:31:08 +0000396 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000397 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000398 }
399
400 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000401 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000402 }
403
Matt Arsenault68802d32015-11-05 03:11:27 +0000404 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000405 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000406 }
407
408 bool hasSGPR102_SGPR103() const {
409 return !isVI();
410 }
411
Tom Stellard45bb48e2015-06-13 03:28:10 +0000412 /// @name Auto-generated Match Functions
413 /// {
414
415#define GET_ASSEMBLER_HEADER
416#include "AMDGPUGenAsmMatcher.inc"
417
418 /// }
419
Tom Stellard347ac792015-06-26 21:15:07 +0000420private:
421 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
422 bool ParseDirectiveHSACodeObjectVersion();
423 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000424 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
425 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000426 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000427 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000428 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000429 bool ParseDirectiveAMDGPUHsaModuleGlobal();
430 bool ParseDirectiveAMDGPUHsaProgramGlobal();
431 bool ParseSectionDirectiveHSADataGlobalAgent();
432 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000433 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000434
Tom Stellard45bb48e2015-06-13 03:28:10 +0000435public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000436public:
437 enum AMDGPUMatchResultTy {
438 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
439 };
440
Akira Hatanakab11ef082015-11-14 06:35:56 +0000441 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000442 const MCInstrInfo &MII,
443 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000444 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000445 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000446 MCAsmParserExtension::Initialize(Parser);
447
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000448 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000450 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000451 }
452
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000453 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454 }
455
Tom Stellard347ac792015-06-26 21:15:07 +0000456 AMDGPUTargetStreamer &getTargetStreamer() {
457 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
458 return static_cast<AMDGPUTargetStreamer &>(TS);
459 }
460
Tom Stellard45bb48e2015-06-13 03:28:10 +0000461 unsigned getForcedEncodingSize() const {
462 return ForcedEncodingSize;
463 }
464
465 void setForcedEncodingSize(unsigned Size) {
466 ForcedEncodingSize = Size;
467 }
468
469 bool isForcedVOP3() const {
470 return ForcedEncodingSize == 64;
471 }
472
473 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
474 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
475 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
476 OperandVector &Operands, MCStreamer &Out,
477 uint64_t &ErrorInfo,
478 bool MatchingInlineAsm) override;
479 bool ParseDirective(AsmToken DirectiveID) override;
480 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
481 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
482 SMLoc NameLoc, OperandVector &Operands) override;
483
484 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
485 int64_t Default = 0);
486 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
487 OperandVector &Operands,
488 enum AMDGPUOperand::ImmTy ImmTy =
489 AMDGPUOperand::ImmTyNone);
490 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
491 enum AMDGPUOperand::ImmTy ImmTy =
492 AMDGPUOperand::ImmTyNone);
493 OperandMatchResultTy parseOptionalOps(
494 const ArrayRef<OptionalOperand> &OptionalOps,
495 OperandVector &Operands);
496
497
498 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
499 void cvtDS(MCInst &Inst, const OperandVector &Operands);
500 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
501 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
502 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
503
504 bool parseCnt(int64_t &IntVal);
505 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
506 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
507
508 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
509 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
510 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000511 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000512
513 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
514 OperandMatchResultTy parseOffset(OperandVector &Operands);
515 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
516 OperandMatchResultTy parseGLC(OperandVector &Operands);
517 OperandMatchResultTy parseSLC(OperandVector &Operands);
518 OperandMatchResultTy parseTFE(OperandVector &Operands);
519
520 OperandMatchResultTy parseDMask(OperandVector &Operands);
521 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000522 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000523 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000524 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000525
Tom Stellarda90b9522016-02-11 03:28:15 +0000526 void cvtId(MCInst &Inst, const OperandVector &Operands);
527 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
528 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
529 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000530 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000531
532 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000533 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
534};
535
536struct OptionalOperand {
537 const char *Name;
538 AMDGPUOperand::ImmTy Type;
539 bool IsBit;
540 int64_t Default;
541 bool (*ConvertResult)(int64_t&);
542};
543
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000544}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000545
Matt Arsenault967c2f52015-11-03 22:50:32 +0000546static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000547 if (IsVgpr) {
548 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000549 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000550 case 1: return AMDGPU::VGPR_32RegClassID;
551 case 2: return AMDGPU::VReg_64RegClassID;
552 case 3: return AMDGPU::VReg_96RegClassID;
553 case 4: return AMDGPU::VReg_128RegClassID;
554 case 8: return AMDGPU::VReg_256RegClassID;
555 case 16: return AMDGPU::VReg_512RegClassID;
556 }
557 }
558
559 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000560 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000561 case 1: return AMDGPU::SGPR_32RegClassID;
562 case 2: return AMDGPU::SGPR_64RegClassID;
563 case 4: return AMDGPU::SReg_128RegClassID;
564 case 8: return AMDGPU::SReg_256RegClassID;
565 case 16: return AMDGPU::SReg_512RegClassID;
566 }
567}
568
Craig Topper4e9b03d62015-09-21 00:18:00 +0000569static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000570
571 return StringSwitch<unsigned>(RegName)
572 .Case("exec", AMDGPU::EXEC)
573 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000574 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000575 .Case("m0", AMDGPU::M0)
576 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000577 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
578 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000579 .Case("vcc_lo", AMDGPU::VCC_LO)
580 .Case("vcc_hi", AMDGPU::VCC_HI)
581 .Case("exec_lo", AMDGPU::EXEC_LO)
582 .Case("exec_hi", AMDGPU::EXEC_HI)
583 .Default(0);
584}
585
586bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
587 const AsmToken Tok = Parser.getTok();
588 StartLoc = Tok.getLoc();
589 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000590 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
591
Matt Arsenault57116cc2015-09-10 21:51:15 +0000592 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000593 RegNo = getRegForName(RegName);
594
595 if (RegNo) {
596 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000597 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000598 }
599
600 // Match vgprs and sgprs
601 if (RegName[0] != 's' && RegName[0] != 'v')
602 return true;
603
604 bool IsVgpr = RegName[0] == 'v';
605 unsigned RegWidth;
606 unsigned RegIndexInClass;
607 if (RegName.size() > 1) {
608 // We have a 32-bit register
609 RegWidth = 1;
610 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
611 return true;
612 Parser.Lex();
613 } else {
614 // We have a register greater than 32-bits.
615
616 int64_t RegLo, RegHi;
617 Parser.Lex();
618 if (getLexer().isNot(AsmToken::LBrac))
619 return true;
620
621 Parser.Lex();
622 if (getParser().parseAbsoluteExpression(RegLo))
623 return true;
624
625 if (getLexer().isNot(AsmToken::Colon))
626 return true;
627
628 Parser.Lex();
629 if (getParser().parseAbsoluteExpression(RegHi))
630 return true;
631
632 if (getLexer().isNot(AsmToken::RBrac))
633 return true;
634
635 Parser.Lex();
636 RegWidth = (RegHi - RegLo) + 1;
637 if (IsVgpr) {
638 // VGPR registers aren't aligned.
639 RegIndexInClass = RegLo;
640 } else {
641 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000642 unsigned Size = std::min(RegWidth, 4u);
643 if (RegLo % Size != 0)
644 return true;
645
646 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000647 }
648 }
649
Matt Arsenault967c2f52015-11-03 22:50:32 +0000650 int RCID = getRegClass(IsVgpr, RegWidth);
651 if (RCID == -1)
652 return true;
653
654 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000655 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000657
658 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000659 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000660}
661
662unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
663
664 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
665
666 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
667 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
668 return Match_InvalidOperand;
669
Tom Stellard88e0b252015-10-06 15:57:53 +0000670 if ((TSFlags & SIInstrFlags::VOP3) &&
671 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
672 getForcedEncodingSize() != 64)
673 return Match_PreferE32;
674
Tom Stellard45bb48e2015-06-13 03:28:10 +0000675 return Match_Success;
676}
677
678
679bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
680 OperandVector &Operands,
681 MCStreamer &Out,
682 uint64_t &ErrorInfo,
683 bool MatchingInlineAsm) {
684 MCInst Inst;
685
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000686 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000687 default: break;
688 case Match_Success:
689 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000690 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000691 return false;
692 case Match_MissingFeature:
693 return Error(IDLoc, "instruction not supported on this GPU");
694
695 case Match_MnemonicFail:
696 return Error(IDLoc, "unrecognized instruction mnemonic");
697
698 case Match_InvalidOperand: {
699 SMLoc ErrorLoc = IDLoc;
700 if (ErrorInfo != ~0ULL) {
701 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000702 return Error(IDLoc, "too few operands for instruction");
703 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000704 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
705 if (ErrorLoc == SMLoc())
706 ErrorLoc = IDLoc;
707 }
708 return Error(ErrorLoc, "invalid operand for instruction");
709 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000710 case Match_PreferE32:
711 return Error(IDLoc, "internal error: instruction without _e64 suffix "
712 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000713 }
714 llvm_unreachable("Implement any new match types added!");
715}
716
Tom Stellard347ac792015-06-26 21:15:07 +0000717bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
718 uint32_t &Minor) {
719 if (getLexer().isNot(AsmToken::Integer))
720 return TokError("invalid major version");
721
722 Major = getLexer().getTok().getIntVal();
723 Lex();
724
725 if (getLexer().isNot(AsmToken::Comma))
726 return TokError("minor version number required, comma expected");
727 Lex();
728
729 if (getLexer().isNot(AsmToken::Integer))
730 return TokError("invalid minor version");
731
732 Minor = getLexer().getTok().getIntVal();
733 Lex();
734
735 return false;
736}
737
738bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
739
740 uint32_t Major;
741 uint32_t Minor;
742
743 if (ParseDirectiveMajorMinor(Major, Minor))
744 return true;
745
746 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
747 return false;
748}
749
750bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
751
752 uint32_t Major;
753 uint32_t Minor;
754 uint32_t Stepping;
755 StringRef VendorName;
756 StringRef ArchName;
757
758 // If this directive has no arguments, then use the ISA version for the
759 // targeted GPU.
760 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000761 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000762 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
763 Isa.Stepping,
764 "AMD", "AMDGPU");
765 return false;
766 }
767
768
769 if (ParseDirectiveMajorMinor(Major, Minor))
770 return true;
771
772 if (getLexer().isNot(AsmToken::Comma))
773 return TokError("stepping version number required, comma expected");
774 Lex();
775
776 if (getLexer().isNot(AsmToken::Integer))
777 return TokError("invalid stepping version");
778
779 Stepping = getLexer().getTok().getIntVal();
780 Lex();
781
782 if (getLexer().isNot(AsmToken::Comma))
783 return TokError("vendor name required, comma expected");
784 Lex();
785
786 if (getLexer().isNot(AsmToken::String))
787 return TokError("invalid vendor name");
788
789 VendorName = getLexer().getTok().getStringContents();
790 Lex();
791
792 if (getLexer().isNot(AsmToken::Comma))
793 return TokError("arch name required, comma expected");
794 Lex();
795
796 if (getLexer().isNot(AsmToken::String))
797 return TokError("invalid arch name");
798
799 ArchName = getLexer().getTok().getStringContents();
800 Lex();
801
802 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
803 VendorName, ArchName);
804 return false;
805}
806
Tom Stellardff7416b2015-06-26 21:58:31 +0000807bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
808 amd_kernel_code_t &Header) {
809
810 if (getLexer().isNot(AsmToken::Equal))
811 return TokError("expected '='");
812 Lex();
813
814 if (getLexer().isNot(AsmToken::Integer))
815 return TokError("amd_kernel_code_t values must be integers");
816
817 uint64_t Value = getLexer().getTok().getIntVal();
818 Lex();
819
820 if (ID == "kernel_code_version_major")
821 Header.amd_kernel_code_version_major = Value;
822 else if (ID == "kernel_code_version_minor")
823 Header.amd_kernel_code_version_minor = Value;
824 else if (ID == "machine_kind")
825 Header.amd_machine_kind = Value;
826 else if (ID == "machine_version_major")
827 Header.amd_machine_version_major = Value;
828 else if (ID == "machine_version_minor")
829 Header.amd_machine_version_minor = Value;
830 else if (ID == "machine_version_stepping")
831 Header.amd_machine_version_stepping = Value;
832 else if (ID == "kernel_code_entry_byte_offset")
833 Header.kernel_code_entry_byte_offset = Value;
834 else if (ID == "kernel_code_prefetch_byte_size")
835 Header.kernel_code_prefetch_byte_size = Value;
836 else if (ID == "max_scratch_backing_memory_byte_size")
837 Header.max_scratch_backing_memory_byte_size = Value;
838 else if (ID == "compute_pgm_rsrc1_vgprs")
839 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
840 else if (ID == "compute_pgm_rsrc1_sgprs")
841 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
842 else if (ID == "compute_pgm_rsrc1_priority")
843 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
844 else if (ID == "compute_pgm_rsrc1_float_mode")
845 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
846 else if (ID == "compute_pgm_rsrc1_priv")
847 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
848 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
849 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
850 else if (ID == "compute_pgm_rsrc1_debug_mode")
851 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
852 else if (ID == "compute_pgm_rsrc1_ieee_mode")
853 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
854 else if (ID == "compute_pgm_rsrc2_scratch_en")
855 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
856 else if (ID == "compute_pgm_rsrc2_user_sgpr")
857 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
858 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
859 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
860 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
861 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
862 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
863 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
864 else if (ID == "compute_pgm_rsrc2_tg_size_en")
865 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
866 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
867 Header.compute_pgm_resource_registers |=
868 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
869 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
870 Header.compute_pgm_resource_registers |=
871 (S_00B84C_EXCP_EN_MSB(Value) << 32);
872 else if (ID == "compute_pgm_rsrc2_lds_size")
873 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
874 else if (ID == "compute_pgm_rsrc2_excp_en")
875 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
876 else if (ID == "compute_pgm_resource_registers")
877 Header.compute_pgm_resource_registers = Value;
878 else if (ID == "enable_sgpr_private_segment_buffer")
879 Header.code_properties |=
880 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
881 else if (ID == "enable_sgpr_dispatch_ptr")
882 Header.code_properties |=
883 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
884 else if (ID == "enable_sgpr_queue_ptr")
885 Header.code_properties |=
886 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
887 else if (ID == "enable_sgpr_kernarg_segment_ptr")
888 Header.code_properties |=
889 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
890 else if (ID == "enable_sgpr_dispatch_id")
891 Header.code_properties |=
892 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
893 else if (ID == "enable_sgpr_flat_scratch_init")
894 Header.code_properties |=
895 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
896 else if (ID == "enable_sgpr_private_segment_size")
897 Header.code_properties |=
898 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
899 else if (ID == "enable_sgpr_grid_workgroup_count_x")
900 Header.code_properties |=
901 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
902 else if (ID == "enable_sgpr_grid_workgroup_count_y")
903 Header.code_properties |=
904 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
905 else if (ID == "enable_sgpr_grid_workgroup_count_z")
906 Header.code_properties |=
907 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
908 else if (ID == "enable_ordered_append_gds")
909 Header.code_properties |=
910 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
911 else if (ID == "private_element_size")
912 Header.code_properties |=
913 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
914 else if (ID == "is_ptr64")
915 Header.code_properties |=
916 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
917 else if (ID == "is_dynamic_callstack")
918 Header.code_properties |=
919 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
920 else if (ID == "is_debug_enabled")
921 Header.code_properties |=
922 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
923 else if (ID == "is_xnack_enabled")
924 Header.code_properties |=
925 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
926 else if (ID == "workitem_private_segment_byte_size")
927 Header.workitem_private_segment_byte_size = Value;
928 else if (ID == "workgroup_group_segment_byte_size")
929 Header.workgroup_group_segment_byte_size = Value;
930 else if (ID == "gds_segment_byte_size")
931 Header.gds_segment_byte_size = Value;
932 else if (ID == "kernarg_segment_byte_size")
933 Header.kernarg_segment_byte_size = Value;
934 else if (ID == "workgroup_fbarrier_count")
935 Header.workgroup_fbarrier_count = Value;
936 else if (ID == "wavefront_sgpr_count")
937 Header.wavefront_sgpr_count = Value;
938 else if (ID == "workitem_vgpr_count")
939 Header.workitem_vgpr_count = Value;
940 else if (ID == "reserved_vgpr_first")
941 Header.reserved_vgpr_first = Value;
942 else if (ID == "reserved_vgpr_count")
943 Header.reserved_vgpr_count = Value;
944 else if (ID == "reserved_sgpr_first")
945 Header.reserved_sgpr_first = Value;
946 else if (ID == "reserved_sgpr_count")
947 Header.reserved_sgpr_count = Value;
948 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
949 Header.debug_wavefront_private_segment_offset_sgpr = Value;
950 else if (ID == "debug_private_segment_buffer_sgpr")
951 Header.debug_private_segment_buffer_sgpr = Value;
952 else if (ID == "kernarg_segment_alignment")
953 Header.kernarg_segment_alignment = Value;
954 else if (ID == "group_segment_alignment")
955 Header.group_segment_alignment = Value;
956 else if (ID == "private_segment_alignment")
957 Header.private_segment_alignment = Value;
958 else if (ID == "wavefront_size")
959 Header.wavefront_size = Value;
960 else if (ID == "call_convention")
961 Header.call_convention = Value;
962 else if (ID == "runtime_loader_kernel_symbol")
963 Header.runtime_loader_kernel_symbol = Value;
964 else
965 return TokError("amd_kernel_code_t value not recognized.");
966
967 return false;
968}
969
970bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
971
972 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000973 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000974
975 while (true) {
976
977 if (getLexer().isNot(AsmToken::EndOfStatement))
978 return TokError("amd_kernel_code_t values must begin on a new line");
979
980 // Lex EndOfStatement. This is in a while loop, because lexing a comment
981 // will set the current token to EndOfStatement.
982 while(getLexer().is(AsmToken::EndOfStatement))
983 Lex();
984
985 if (getLexer().isNot(AsmToken::Identifier))
986 return TokError("expected value identifier or .end_amd_kernel_code_t");
987
988 StringRef ID = getLexer().getTok().getIdentifier();
989 Lex();
990
991 if (ID == ".end_amd_kernel_code_t")
992 break;
993
994 if (ParseAMDKernelCodeTValue(ID, Header))
995 return true;
996 }
997
998 getTargetStreamer().EmitAMDKernelCodeT(Header);
999
1000 return false;
1001}
1002
Tom Stellarde135ffd2015-09-25 21:41:28 +00001003bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1004 getParser().getStreamer().SwitchSection(
1005 AMDGPU::getHSATextSection(getContext()));
1006 return false;
1007}
1008
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001009bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1010 if (getLexer().isNot(AsmToken::Identifier))
1011 return TokError("expected symbol name");
1012
1013 StringRef KernelName = Parser.getTok().getString();
1014
1015 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1016 ELF::STT_AMDGPU_HSA_KERNEL);
1017 Lex();
1018 return false;
1019}
1020
Tom Stellard00f2f912015-12-02 19:47:57 +00001021bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1022 if (getLexer().isNot(AsmToken::Identifier))
1023 return TokError("expected symbol name");
1024
1025 StringRef GlobalName = Parser.getTok().getIdentifier();
1026
1027 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1028 Lex();
1029 return false;
1030}
1031
1032bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1033 if (getLexer().isNot(AsmToken::Identifier))
1034 return TokError("expected symbol name");
1035
1036 StringRef GlobalName = Parser.getTok().getIdentifier();
1037
1038 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1039 Lex();
1040 return false;
1041}
1042
1043bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1044 getParser().getStreamer().SwitchSection(
1045 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1046 return false;
1047}
1048
1049bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1050 getParser().getStreamer().SwitchSection(
1051 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1052 return false;
1053}
1054
Tom Stellard9760f032015-12-03 03:34:32 +00001055bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1056 getParser().getStreamer().SwitchSection(
1057 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1058 return false;
1059}
1060
Tom Stellard45bb48e2015-06-13 03:28:10 +00001061bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001062 StringRef IDVal = DirectiveID.getString();
1063
1064 if (IDVal == ".hsa_code_object_version")
1065 return ParseDirectiveHSACodeObjectVersion();
1066
1067 if (IDVal == ".hsa_code_object_isa")
1068 return ParseDirectiveHSACodeObjectISA();
1069
Tom Stellardff7416b2015-06-26 21:58:31 +00001070 if (IDVal == ".amd_kernel_code_t")
1071 return ParseDirectiveAMDKernelCodeT();
1072
Tom Stellarde135ffd2015-09-25 21:41:28 +00001073 if (IDVal == ".hsatext" || IDVal == ".text")
1074 return ParseSectionDirectiveHSAText();
1075
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001076 if (IDVal == ".amdgpu_hsa_kernel")
1077 return ParseDirectiveAMDGPUHsaKernel();
1078
Tom Stellard00f2f912015-12-02 19:47:57 +00001079 if (IDVal == ".amdgpu_hsa_module_global")
1080 return ParseDirectiveAMDGPUHsaModuleGlobal();
1081
1082 if (IDVal == ".amdgpu_hsa_program_global")
1083 return ParseDirectiveAMDGPUHsaProgramGlobal();
1084
1085 if (IDVal == ".hsadata_global_agent")
1086 return ParseSectionDirectiveHSADataGlobalAgent();
1087
1088 if (IDVal == ".hsadata_global_program")
1089 return ParseSectionDirectiveHSADataGlobalProgram();
1090
Tom Stellard9760f032015-12-03 03:34:32 +00001091 if (IDVal == ".hsarodata_readonly_agent")
1092 return ParseSectionDirectiveHSARodataReadonlyAgent();
1093
Tom Stellard45bb48e2015-06-13 03:28:10 +00001094 return true;
1095}
1096
Matt Arsenault68802d32015-11-05 03:11:27 +00001097bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1098 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001099 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001100 return true;
1101
Matt Arsenault3b159672015-12-01 20:31:08 +00001102 if (isSI()) {
1103 // No flat_scr
1104 switch (RegNo) {
1105 case AMDGPU::FLAT_SCR:
1106 case AMDGPU::FLAT_SCR_LO:
1107 case AMDGPU::FLAT_SCR_HI:
1108 return false;
1109 default:
1110 return true;
1111 }
1112 }
1113
Matt Arsenault68802d32015-11-05 03:11:27 +00001114 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1115 // SI/CI have.
1116 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1117 R.isValid(); ++R) {
1118 if (*R == RegNo)
1119 return false;
1120 }
1121
1122 return true;
1123}
1124
Tom Stellard45bb48e2015-06-13 03:28:10 +00001125static bool operandsHaveModifiers(const OperandVector &Operands) {
1126
1127 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1128 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1129 if (Op.isRegKind() && Op.hasModifiers())
1130 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001131 if (Op.isImm() && Op.hasModifiers())
1132 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001133 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1134 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1135 return true;
1136 }
1137 return false;
1138}
1139
1140AMDGPUAsmParser::OperandMatchResultTy
1141AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1142
1143 // Try to parse with a custom parser
1144 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1145
1146 // If we successfully parsed the operand or if there as an error parsing,
1147 // we are done.
1148 //
1149 // If we are parsing after we reach EndOfStatement then this means we
1150 // are appending default values to the Operands list. This is only done
1151 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001152 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001153 getLexer().is(AsmToken::EndOfStatement))
1154 return ResTy;
1155
1156 bool Negate = false, Abs = false;
1157 if (getLexer().getKind()== AsmToken::Minus) {
1158 Parser.Lex();
1159 Negate = true;
1160 }
1161
1162 if (getLexer().getKind() == AsmToken::Pipe) {
1163 Parser.Lex();
1164 Abs = true;
1165 }
1166
1167 switch(getLexer().getKind()) {
1168 case AsmToken::Integer: {
1169 SMLoc S = Parser.getTok().getLoc();
1170 int64_t IntVal;
1171 if (getParser().parseAbsoluteExpression(IntVal))
1172 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001173 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001174 Error(S, "invalid immediate: only 32-bit values are legal");
1175 return MatchOperand_ParseFail;
1176 }
1177
Tom Stellard45bb48e2015-06-13 03:28:10 +00001178 if (Negate)
1179 IntVal *= -1;
1180 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1181 return MatchOperand_Success;
1182 }
1183 case AsmToken::Real: {
1184 // FIXME: We should emit an error if a double precisions floating-point
1185 // value is used. I'm not sure the best way to detect this.
1186 SMLoc S = Parser.getTok().getLoc();
1187 int64_t IntVal;
1188 if (getParser().parseAbsoluteExpression(IntVal))
1189 return MatchOperand_ParseFail;
1190
1191 APFloat F((float)BitsToDouble(IntVal));
1192 if (Negate)
1193 F.changeSign();
1194 Operands.push_back(
1195 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1196 return MatchOperand_Success;
1197 }
1198 case AsmToken::Identifier: {
1199 SMLoc S, E;
1200 unsigned RegNo;
1201 if (!ParseRegister(RegNo, S, E)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001202 unsigned Modifiers = 0;
1203
1204 if (Negate)
1205 Modifiers |= 0x1;
1206
1207 if (Abs) {
1208 if (getLexer().getKind() != AsmToken::Pipe)
1209 return MatchOperand_ParseFail;
1210 Parser.Lex();
1211 Modifiers |= 0x2;
1212 }
1213
Tom Stellard45bb48e2015-06-13 03:28:10 +00001214 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard2b65ed32015-12-21 18:44:27 +00001215 RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
Tom Stellard45bb48e2015-06-13 03:28:10 +00001216 isForcedVOP3()));
1217
Tom Stellarda90b9522016-02-11 03:28:15 +00001218 if (Modifiers) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001219 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1220 RegOp.setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001221 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001222 } else {
1223 ResTy = parseVOP3OptionalOps(Operands);
1224 if (ResTy == MatchOperand_NoMatch) {
1225 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1226 S));
1227 Parser.Lex();
1228 }
1229 }
1230 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001231 }
1232 default:
1233 return MatchOperand_NoMatch;
1234 }
1235}
1236
1237bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1238 StringRef Name,
1239 SMLoc NameLoc, OperandVector &Operands) {
1240
1241 // Clear any forced encodings from the previous instruction.
1242 setForcedEncodingSize(0);
1243
1244 if (Name.endswith("_e64"))
1245 setForcedEncodingSize(64);
1246 else if (Name.endswith("_e32"))
1247 setForcedEncodingSize(32);
1248
1249 // Add the instruction mnemonic
1250 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1251
1252 while (!getLexer().is(AsmToken::EndOfStatement)) {
1253 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1254
1255 // Eat the comma or space if there is one.
1256 if (getLexer().is(AsmToken::Comma))
1257 Parser.Lex();
1258
1259 switch (Res) {
1260 case MatchOperand_Success: break;
1261 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1262 "failed parsing operand.");
1263 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1264 "not a valid operand.");
1265 }
1266 }
1267
Tom Stellard45bb48e2015-06-13 03:28:10 +00001268 return false;
1269}
1270
1271//===----------------------------------------------------------------------===//
1272// Utility functions
1273//===----------------------------------------------------------------------===//
1274
1275AMDGPUAsmParser::OperandMatchResultTy
1276AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1277 int64_t Default) {
1278
1279 // We are at the end of the statement, and this is a default argument, so
1280 // use a default value.
1281 if (getLexer().is(AsmToken::EndOfStatement)) {
1282 Int = Default;
1283 return MatchOperand_Success;
1284 }
1285
1286 switch(getLexer().getKind()) {
1287 default: return MatchOperand_NoMatch;
1288 case AsmToken::Identifier: {
1289 StringRef OffsetName = Parser.getTok().getString();
1290 if (!OffsetName.equals(Prefix))
1291 return MatchOperand_NoMatch;
1292
1293 Parser.Lex();
1294 if (getLexer().isNot(AsmToken::Colon))
1295 return MatchOperand_ParseFail;
1296
1297 Parser.Lex();
1298 if (getLexer().isNot(AsmToken::Integer))
1299 return MatchOperand_ParseFail;
1300
1301 if (getParser().parseAbsoluteExpression(Int))
1302 return MatchOperand_ParseFail;
1303 break;
1304 }
1305 }
1306 return MatchOperand_Success;
1307}
1308
1309AMDGPUAsmParser::OperandMatchResultTy
1310AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1311 enum AMDGPUOperand::ImmTy ImmTy) {
1312
1313 SMLoc S = Parser.getTok().getLoc();
1314 int64_t Offset = 0;
1315
1316 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1317 if (Res != MatchOperand_Success)
1318 return Res;
1319
1320 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1321 return MatchOperand_Success;
1322}
1323
1324AMDGPUAsmParser::OperandMatchResultTy
1325AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1326 enum AMDGPUOperand::ImmTy ImmTy) {
1327 int64_t Bit = 0;
1328 SMLoc S = Parser.getTok().getLoc();
1329
1330 // We are at the end of the statement, and this is a default argument, so
1331 // use a default value.
1332 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1333 switch(getLexer().getKind()) {
1334 case AsmToken::Identifier: {
1335 StringRef Tok = Parser.getTok().getString();
1336 if (Tok == Name) {
1337 Bit = 1;
1338 Parser.Lex();
1339 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1340 Bit = 0;
1341 Parser.Lex();
1342 } else {
1343 return MatchOperand_NoMatch;
1344 }
1345 break;
1346 }
1347 default:
1348 return MatchOperand_NoMatch;
1349 }
1350 }
1351
1352 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1353 return MatchOperand_Success;
1354}
1355
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001356typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1357
1358void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1359 auto i = OptionalIdx.find(ImmT);
1360 if (i != OptionalIdx.end()) {
1361 unsigned Idx = i->second;
1362 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1363 } else {
1364 Inst.addOperand(MCOperand::createImm(0));
1365 }
1366}
1367
Tom Stellard45bb48e2015-06-13 03:28:10 +00001368static bool operandsHasOptionalOp(const OperandVector &Operands,
1369 const OptionalOperand &OOp) {
1370 for (unsigned i = 0; i < Operands.size(); i++) {
1371 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1372 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1373 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1374 return true;
1375
1376 }
1377 return false;
1378}
1379
1380AMDGPUAsmParser::OperandMatchResultTy
1381AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1382 OperandVector &Operands) {
1383 SMLoc S = Parser.getTok().getLoc();
1384 for (const OptionalOperand &Op : OptionalOps) {
1385 if (operandsHasOptionalOp(Operands, Op))
1386 continue;
1387 AMDGPUAsmParser::OperandMatchResultTy Res;
1388 int64_t Value;
1389 if (Op.IsBit) {
1390 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1391 if (Res == MatchOperand_NoMatch)
1392 continue;
1393 return Res;
1394 }
1395
1396 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1397
1398 if (Res == MatchOperand_NoMatch)
1399 continue;
1400
1401 if (Res != MatchOperand_Success)
1402 return Res;
1403
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001404 bool DefaultValue = (Value == Op.Default);
1405
Tom Stellard45bb48e2015-06-13 03:28:10 +00001406 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1407 return MatchOperand_ParseFail;
1408 }
1409
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001410 if (!DefaultValue) {
1411 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1412 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001413 return MatchOperand_Success;
1414 }
1415 return MatchOperand_NoMatch;
1416}
1417
1418//===----------------------------------------------------------------------===//
1419// ds
1420//===----------------------------------------------------------------------===//
1421
1422static const OptionalOperand DSOptionalOps [] = {
1423 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1424 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1425};
1426
1427static const OptionalOperand DSOptionalOpsOff01 [] = {
1428 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1429 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1430 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1431};
1432
1433AMDGPUAsmParser::OperandMatchResultTy
1434AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1435 return parseOptionalOps(DSOptionalOps, Operands);
1436}
1437AMDGPUAsmParser::OperandMatchResultTy
1438AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1439 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1440}
1441
1442AMDGPUAsmParser::OperandMatchResultTy
1443AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1444 SMLoc S = Parser.getTok().getLoc();
1445 AMDGPUAsmParser::OperandMatchResultTy Res =
1446 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1447 if (Res == MatchOperand_NoMatch) {
1448 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1449 AMDGPUOperand::ImmTyOffset));
1450 Res = MatchOperand_Success;
1451 }
1452 return Res;
1453}
1454
1455bool AMDGPUOperand::isDSOffset() const {
1456 return isImm() && isUInt<16>(getImm());
1457}
1458
1459bool AMDGPUOperand::isDSOffset01() const {
1460 return isImm() && isUInt<8>(getImm());
1461}
1462
1463void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1464 const OperandVector &Operands) {
1465
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001466 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001467
1468 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1469 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1470
1471 // Add the register arguments
1472 if (Op.isReg()) {
1473 Op.addRegOperands(Inst, 1);
1474 continue;
1475 }
1476
1477 // Handle optional arguments
1478 OptionalIdx[Op.getImmTy()] = i;
1479 }
1480
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001481 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1482 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1483 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001484
Tom Stellard45bb48e2015-06-13 03:28:10 +00001485 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1486}
1487
1488void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1489
1490 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1491 bool GDSOnly = false;
1492
1493 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1494 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1495
1496 // Add the register arguments
1497 if (Op.isReg()) {
1498 Op.addRegOperands(Inst, 1);
1499 continue;
1500 }
1501
1502 if (Op.isToken() && Op.getToken() == "gds") {
1503 GDSOnly = true;
1504 continue;
1505 }
1506
1507 // Handle optional arguments
1508 OptionalIdx[Op.getImmTy()] = i;
1509 }
1510
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001511 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1512 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001513
1514 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001515 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001516 }
1517 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1518}
1519
1520
1521//===----------------------------------------------------------------------===//
1522// s_waitcnt
1523//===----------------------------------------------------------------------===//
1524
1525bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1526 StringRef CntName = Parser.getTok().getString();
1527 int64_t CntVal;
1528
1529 Parser.Lex();
1530 if (getLexer().isNot(AsmToken::LParen))
1531 return true;
1532
1533 Parser.Lex();
1534 if (getLexer().isNot(AsmToken::Integer))
1535 return true;
1536
1537 if (getParser().parseAbsoluteExpression(CntVal))
1538 return true;
1539
1540 if (getLexer().isNot(AsmToken::RParen))
1541 return true;
1542
1543 Parser.Lex();
1544 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1545 Parser.Lex();
1546
1547 int CntShift;
1548 int CntMask;
1549
1550 if (CntName == "vmcnt") {
1551 CntMask = 0xf;
1552 CntShift = 0;
1553 } else if (CntName == "expcnt") {
1554 CntMask = 0x7;
1555 CntShift = 4;
1556 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001557 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001558 CntShift = 8;
1559 } else {
1560 return true;
1561 }
1562
1563 IntVal &= ~(CntMask << CntShift);
1564 IntVal |= (CntVal << CntShift);
1565 return false;
1566}
1567
1568AMDGPUAsmParser::OperandMatchResultTy
1569AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1570 // Disable all counters by default.
1571 // vmcnt [3:0]
1572 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001573 // lgkmcnt [11:8]
1574 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001575 SMLoc S = Parser.getTok().getLoc();
1576
1577 switch(getLexer().getKind()) {
1578 default: return MatchOperand_ParseFail;
1579 case AsmToken::Integer:
1580 // The operand can be an integer value.
1581 if (getParser().parseAbsoluteExpression(CntVal))
1582 return MatchOperand_ParseFail;
1583 break;
1584
1585 case AsmToken::Identifier:
1586 do {
1587 if (parseCnt(CntVal))
1588 return MatchOperand_ParseFail;
1589 } while(getLexer().isNot(AsmToken::EndOfStatement));
1590 break;
1591 }
1592 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1593 return MatchOperand_Success;
1594}
1595
1596bool AMDGPUOperand::isSWaitCnt() const {
1597 return isImm();
1598}
1599
1600//===----------------------------------------------------------------------===//
1601// sopp branch targets
1602//===----------------------------------------------------------------------===//
1603
1604AMDGPUAsmParser::OperandMatchResultTy
1605AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1606 SMLoc S = Parser.getTok().getLoc();
1607
1608 switch (getLexer().getKind()) {
1609 default: return MatchOperand_ParseFail;
1610 case AsmToken::Integer: {
1611 int64_t Imm;
1612 if (getParser().parseAbsoluteExpression(Imm))
1613 return MatchOperand_ParseFail;
1614 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1615 return MatchOperand_Success;
1616 }
1617
1618 case AsmToken::Identifier:
1619 Operands.push_back(AMDGPUOperand::CreateExpr(
1620 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1621 Parser.getTok().getString()), getContext()), S));
1622 Parser.Lex();
1623 return MatchOperand_Success;
1624 }
1625}
1626
1627//===----------------------------------------------------------------------===//
1628// flat
1629//===----------------------------------------------------------------------===//
1630
1631static const OptionalOperand FlatOptionalOps [] = {
1632 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1633 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1634 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1635};
1636
1637static const OptionalOperand FlatAtomicOptionalOps [] = {
1638 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1639 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1640};
1641
1642AMDGPUAsmParser::OperandMatchResultTy
1643AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1644 return parseOptionalOps(FlatOptionalOps, Operands);
1645}
1646
1647AMDGPUAsmParser::OperandMatchResultTy
1648AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1649 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1650}
1651
1652void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1653 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001654 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001655
1656 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1657 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1658
1659 // Add the register arguments
1660 if (Op.isReg()) {
1661 Op.addRegOperands(Inst, 1);
1662 continue;
1663 }
1664
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001665 OptionalIdx[Op.getImmTy()] = i;
1666 }
1667 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1668 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1669 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1670}
1671
1672
1673void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1674 const OperandVector &Operands) {
1675 OptionalImmIndexMap OptionalIdx;
1676
1677 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1678 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1679
1680 // Add the register arguments
1681 if (Op.isReg()) {
1682 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001683 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001684 }
1685
1686 // Handle 'glc' token for flat atomics.
1687 if (Op.isToken()) {
1688 continue;
1689 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001690
1691 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001692 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001693 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001694 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1695 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001696}
1697
1698//===----------------------------------------------------------------------===//
1699// mubuf
1700//===----------------------------------------------------------------------===//
1701
1702static const OptionalOperand MubufOptionalOps [] = {
1703 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1704 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1705 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1706 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1707};
1708
1709AMDGPUAsmParser::OperandMatchResultTy
1710AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1711 return parseOptionalOps(MubufOptionalOps, Operands);
1712}
1713
1714AMDGPUAsmParser::OperandMatchResultTy
1715AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1716 return parseIntWithPrefix("offset", Operands);
1717}
1718
1719AMDGPUAsmParser::OperandMatchResultTy
1720AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1721 return parseNamedBit("glc", Operands);
1722}
1723
1724AMDGPUAsmParser::OperandMatchResultTy
1725AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1726 return parseNamedBit("slc", Operands);
1727}
1728
1729AMDGPUAsmParser::OperandMatchResultTy
1730AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1731 return parseNamedBit("tfe", Operands);
1732}
1733
1734bool AMDGPUOperand::isMubufOffset() const {
1735 return isImm() && isUInt<12>(getImm());
1736}
1737
1738void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1739 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001740 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001741
1742 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1743 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1744
1745 // Add the register arguments
1746 if (Op.isReg()) {
1747 Op.addRegOperands(Inst, 1);
1748 continue;
1749 }
1750
1751 // Handle the case where soffset is an immediate
1752 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1753 Op.addImmOperands(Inst, 1);
1754 continue;
1755 }
1756
1757 // Handle tokens like 'offen' which are sometimes hard-coded into the
1758 // asm string. There are no MCInst operands for these.
1759 if (Op.isToken()) {
1760 continue;
1761 }
1762 assert(Op.isImm());
1763
1764 // Handle optional arguments
1765 OptionalIdx[Op.getImmTy()] = i;
1766 }
1767
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001768 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1769 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1770 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1771 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001772}
1773
1774//===----------------------------------------------------------------------===//
1775// mimg
1776//===----------------------------------------------------------------------===//
1777
1778AMDGPUAsmParser::OperandMatchResultTy
1779AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001780 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001781}
1782
1783AMDGPUAsmParser::OperandMatchResultTy
1784AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001785 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1786}
1787
1788AMDGPUAsmParser::OperandMatchResultTy
1789AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1790 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001791}
1792
1793AMDGPUAsmParser::OperandMatchResultTy
1794AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001795 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1796}
1797
1798AMDGPUAsmParser::OperandMatchResultTy
1799AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1800 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001801}
1802
1803//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001804// smrd
1805//===----------------------------------------------------------------------===//
1806
1807bool AMDGPUOperand::isSMRDOffset() const {
1808
1809 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1810 // information here.
1811 return isImm() && isUInt<8>(getImm());
1812}
1813
1814bool AMDGPUOperand::isSMRDLiteralOffset() const {
1815 // 32-bit literals are only supported on CI and we only want to use them
1816 // when the offset is > 8-bits.
1817 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1818}
1819
1820//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001821// vop3
1822//===----------------------------------------------------------------------===//
1823
1824static bool ConvertOmodMul(int64_t &Mul) {
1825 if (Mul != 1 && Mul != 2 && Mul != 4)
1826 return false;
1827
1828 Mul >>= 1;
1829 return true;
1830}
1831
1832static bool ConvertOmodDiv(int64_t &Div) {
1833 if (Div == 1) {
1834 Div = 0;
1835 return true;
1836 }
1837
1838 if (Div == 2) {
1839 Div = 3;
1840 return true;
1841 }
1842
1843 return false;
1844}
1845
1846static const OptionalOperand VOP3OptionalOps [] = {
1847 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1848 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1849 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1850};
1851
1852static bool isVOP3(OperandVector &Operands) {
1853 if (operandsHaveModifiers(Operands))
1854 return true;
1855
Tom Stellarda90b9522016-02-11 03:28:15 +00001856 if (Operands.size() >= 2) {
1857 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001858
Tom Stellarda90b9522016-02-11 03:28:15 +00001859 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1860 return true;
1861 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001862
1863 if (Operands.size() >= 5)
1864 return true;
1865
1866 if (Operands.size() > 3) {
1867 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Benjamin Kramerac5e36f2016-02-12 12:37:21 +00001868 if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1869 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001870 return true;
1871 }
1872 return false;
1873}
1874
1875AMDGPUAsmParser::OperandMatchResultTy
1876AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1877
1878 // The value returned by this function may change after parsing
1879 // an operand so store the original value here.
1880 bool HasModifiers = operandsHaveModifiers(Operands);
1881
1882 bool IsVOP3 = isVOP3(Operands);
1883 if (HasModifiers || IsVOP3 ||
1884 getLexer().isNot(AsmToken::EndOfStatement) ||
1885 getForcedEncodingSize() == 64) {
1886
1887 AMDGPUAsmParser::OperandMatchResultTy Res =
1888 parseOptionalOps(VOP3OptionalOps, Operands);
1889
1890 if (!HasModifiers && Res == MatchOperand_Success) {
1891 // We have added a modifier operation, so we need to make sure all
1892 // previous register operands have modifiers
1893 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1894 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001895 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001896 Op.setModifiers(0);
1897 }
1898 }
1899 return Res;
1900 }
1901 return MatchOperand_NoMatch;
1902}
1903
Tom Stellarda90b9522016-02-11 03:28:15 +00001904void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1905 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001906 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001907 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001908 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1909 }
1910 for (unsigned E = Operands.size(); I != E; ++I)
1911 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1912}
1913
1914void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001915 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1916 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001917 cvtVOP3(Inst, Operands);
1918 } else {
1919 cvtId(Inst, Operands);
1920 }
1921}
1922
1923void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1924 if (operandsHaveModifiers(Operands)) {
1925 cvtVOP3(Inst, Operands);
1926 } else {
1927 cvtId(Inst, Operands);
1928 }
1929}
1930
1931void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1932 cvtVOP3(Inst, Operands);
1933}
1934
1935void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1936 unsigned I = 1;
1937 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001938 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001939 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001940 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001941
Tom Stellarda90b9522016-02-11 03:28:15 +00001942 unsigned ClampIdx = 0, OModIdx = 0;
1943 for (unsigned E = Operands.size(); I != E; ++I) {
1944 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001945 if (Op.isRegOrImmWithInputMods()) {
1946 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Tom Stellarda90b9522016-02-11 03:28:15 +00001947 } else if (Op.isClamp()) {
1948 ClampIdx = I;
1949 } else if (Op.isOMod()) {
1950 OModIdx = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001951 } else {
1952 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001953 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001954 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001955
Tom Stellarda90b9522016-02-11 03:28:15 +00001956 if (ClampIdx) {
1957 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]);
1958 Op.addImmOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001959 } else {
Tom Stellarda90b9522016-02-11 03:28:15 +00001960 Inst.addOperand(MCOperand::createImm(0));
1961 }
1962 if (OModIdx) {
1963 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]);
1964 Op.addImmOperands(Inst, 1);
1965 } else {
1966 Inst.addOperand(MCOperand::createImm(0));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001967 }
1968}
1969
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001970void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
1971 OptionalImmIndexMap OptionalIdx;
1972
1973 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1974 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1975
1976 // Add the register arguments
1977 if (Op.isRegOrImm()) {
1978 Op.addRegOrImmOperands(Inst, 1);
1979 continue;
1980 } else if (Op.isImmModifier()) {
1981 OptionalIdx[Op.getImmTy()] = i;
1982 } else {
1983 assert(false);
1984 }
1985 }
1986
1987 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1988 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1989 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1990 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1992 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1993 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1994 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1995}
1996
1997
Tom Stellard45bb48e2015-06-13 03:28:10 +00001998/// Force static initialization.
1999extern "C" void LLVMInitializeAMDGPUAsmParser() {
2000 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2001 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2002}
2003
2004#define GET_REGISTER_MATCHER
2005#define GET_MATCHER_IMPLEMENTATION
2006#include "AMDGPUGenAsmMatcher.inc"