blob: 9e493c8720bb889617849062a5996887a2c36635 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000015#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/SmallString.h"
18#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000028#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000032#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000033#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000070 ImmTyOMod,
71 ImmTyDMask,
72 ImmTyUNorm,
73 ImmTyDA,
74 ImmTyR128,
75 ImmTyLWE,
Tom Stellard45bb48e2015-06-13 03:28:10 +000076 };
77
78 struct TokOp {
79 const char *Data;
80 unsigned Length;
81 };
82
83 struct ImmOp {
84 bool IsFPImm;
85 ImmTy Type;
86 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000087 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000088 };
89
90 struct RegOp {
91 unsigned RegNo;
92 int Modifiers;
93 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000094 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +000095 bool IsForcedVOP3;
96 };
97
98 union {
99 TokOp Tok;
100 ImmOp Imm;
101 RegOp Reg;
102 const MCExpr *Expr;
103 };
104
105 void addImmOperands(MCInst &Inst, unsigned N) const {
106 Inst.addOperand(MCOperand::createImm(getImm()));
107 }
108
109 StringRef getToken() const {
110 return StringRef(Tok.Data, Tok.Length);
111 }
112
113 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000114 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000115 }
116
117 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000118 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000119 addRegOperands(Inst, N);
120 else
121 addImmOperands(Inst, N);
122 }
123
Tom Stellardd93a34f2016-02-22 19:17:56 +0000124 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
125 if (isRegKind()) {
126 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
127 addRegOperands(Inst, N);
128 } else {
129 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
130 addImmOperands(Inst, N);
131 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000132 }
133
134 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
135 if (isImm())
136 addImmOperands(Inst, N);
137 else {
138 assert(isExpr());
139 Inst.addOperand(MCOperand::createExpr(Expr));
140 }
141 }
142
143 bool defaultTokenHasSuffix() const {
144 StringRef Token(Tok.Data, Tok.Length);
145
146 return Token.endswith("_e32") || Token.endswith("_e64");
147 }
148
149 bool isToken() const override {
150 return Kind == Token;
151 }
152
153 bool isImm() const override {
154 return Kind == Immediate;
155 }
156
Tom Stellardd93a34f2016-02-22 19:17:56 +0000157 bool isInlinableImm() const {
158 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
159 immediates are inlinable (e.g. "clamp" attribute is not) */ )
160 return false;
161 // TODO: We should avoid using host float here. It would be better to
162 // check the float bit values which is what a few other places do.
163 // We've had bot failures before due to weird NaN support on mips hosts.
164 const float F = BitsToFloat(Imm.Val);
165 // TODO: Add 1/(2*pi) for VI
166 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000167 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000168 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000169 }
170
171 bool isDSOffset0() const {
172 assert(isImm());
173 return Imm.Type == ImmTyDSOffset0;
174 }
175
176 bool isDSOffset1() const {
177 assert(isImm());
178 return Imm.Type == ImmTyDSOffset1;
179 }
180
181 int64_t getImm() const {
182 return Imm.Val;
183 }
184
185 enum ImmTy getImmTy() const {
186 assert(isImm());
187 return Imm.Type;
188 }
189
190 bool isRegKind() const {
191 return Kind == Register;
192 }
193
194 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000195 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000196 }
197
Tom Stellardd93a34f2016-02-22 19:17:56 +0000198 bool isRegOrImmWithInputMods() const {
199 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000200 }
201
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000202 bool isImmTy(ImmTy ImmT) const {
203 return isImm() && Imm.Type == ImmT;
204 }
205
Tom Stellarda90b9522016-02-11 03:28:15 +0000206 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000207 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000208 }
209
210 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000211 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000212 }
213
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000214 bool isImmModifier() const {
215 return Kind == Immediate && Imm.Type != ImmTyNone;
216 }
217
218 bool isDMask() const {
219 return isImmTy(ImmTyDMask);
220 }
221
222 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
223 bool isDA() const { return isImmTy(ImmTyDA); }
224 bool isR128() const { return isImmTy(ImmTyUNorm); }
225 bool isLWE() const { return isImmTy(ImmTyLWE); }
226
Tom Stellarda90b9522016-02-11 03:28:15 +0000227 bool isMod() const {
228 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000229 }
230
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000231 bool isGDS() const { return isImmTy(ImmTyGDS); }
232 bool isGLC() const { return isImmTy(ImmTyGLC); }
233 bool isSLC() const { return isImmTy(ImmTySLC); }
234 bool isTFE() const { return isImmTy(ImmTyTFE); }
235
Tom Stellard45bb48e2015-06-13 03:28:10 +0000236 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000237 assert(isReg() || (isImm() && Imm.Modifiers == 0));
238 if (isReg())
239 Reg.Modifiers = Mods;
240 else
241 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000242 }
243
244 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000245 assert(isRegKind() || isImm());
246 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000247 }
248
249 unsigned getReg() const override {
250 return Reg.RegNo;
251 }
252
253 bool isRegOrImm() const {
254 return isReg() || isImm();
255 }
256
257 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000258 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000259 }
260
261 bool isSCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000262 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000263 }
264
Matt Arsenault86d336e2015-09-08 21:15:00 +0000265 bool isSCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000266 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
267 }
268
269 bool isSSrc32() const {
270 return isImm() || isSCSrc32();
271 }
272
273 bool isSSrc64() const {
274 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
275 // See isVSrc64().
276 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000277 }
278
Tom Stellard45bb48e2015-06-13 03:28:10 +0000279 bool isVCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000280 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000281 }
282
283 bool isVCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000284 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000285 }
286
287 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000288 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000289 }
290
291 bool isVSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000292 // TODO: Check if the 64-bit value (coming from assembly source) can be
293 // narrowed to 32 bits (in the instruction stream). That require knowledge
294 // of instruction type (unsigned/signed, floating or "untyped"/B64),
295 // see [AMD GCN3 ISA 6.3.1].
296 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
297 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000298 }
299
300 bool isMem() const override {
301 return false;
302 }
303
304 bool isExpr() const {
305 return Kind == Expression;
306 }
307
308 bool isSoppBrTarget() const {
309 return isExpr() || isImm();
310 }
311
312 SMLoc getStartLoc() const override {
313 return StartLoc;
314 }
315
316 SMLoc getEndLoc() const override {
317 return EndLoc;
318 }
319
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000320 void print(raw_ostream &OS) const override {
321 switch (Kind) {
322 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000323 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000324 break;
325 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000326 if (Imm.Type != AMDGPUOperand::ImmTyNone)
327 OS << getImm();
328 else
329 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000330 break;
331 case Token:
332 OS << '\'' << getToken() << '\'';
333 break;
334 case Expression:
335 OS << "<expr " << *Expr << '>';
336 break;
337 }
338 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000339
340 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
341 enum ImmTy Type = ImmTyNone,
342 bool IsFPImm = false) {
343 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
344 Op->Imm.Val = Val;
345 Op->Imm.IsFPImm = IsFPImm;
346 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000347 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000348 Op->StartLoc = Loc;
349 Op->EndLoc = Loc;
350 return Op;
351 }
352
353 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
354 bool HasExplicitEncodingSize = true) {
355 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
356 Res->Tok.Data = Str.data();
357 Res->Tok.Length = Str.size();
358 Res->StartLoc = Loc;
359 Res->EndLoc = Loc;
360 return Res;
361 }
362
363 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
364 SMLoc E,
365 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000366 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000367 bool ForceVOP3) {
368 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
369 Op->Reg.RegNo = RegNo;
370 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000371 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000372 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000373 Op->Reg.IsForcedVOP3 = ForceVOP3;
374 Op->StartLoc = S;
375 Op->EndLoc = E;
376 return Op;
377 }
378
379 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
380 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
381 Op->Expr = Expr;
382 Op->StartLoc = S;
383 Op->EndLoc = S;
384 return Op;
385 }
386
387 bool isDSOffset() const;
388 bool isDSOffset01() const;
389 bool isSWaitCnt() const;
390 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000391 bool isSMRDOffset() const;
392 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000393};
394
395class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000396 const MCInstrInfo &MII;
397 MCAsmParser &Parser;
398
399 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000400
Matt Arsenault3b159672015-12-01 20:31:08 +0000401 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000402 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000403 }
404
405 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000406 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000407 }
408
Matt Arsenault68802d32015-11-05 03:11:27 +0000409 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000410 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000411 }
412
413 bool hasSGPR102_SGPR103() const {
414 return !isVI();
415 }
416
Tom Stellard45bb48e2015-06-13 03:28:10 +0000417 /// @name Auto-generated Match Functions
418 /// {
419
420#define GET_ASSEMBLER_HEADER
421#include "AMDGPUGenAsmMatcher.inc"
422
423 /// }
424
Tom Stellard347ac792015-06-26 21:15:07 +0000425private:
426 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
427 bool ParseDirectiveHSACodeObjectVersion();
428 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000429 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
430 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000431 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000432 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000433 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000434 bool ParseDirectiveAMDGPUHsaModuleGlobal();
435 bool ParseDirectiveAMDGPUHsaProgramGlobal();
436 bool ParseSectionDirectiveHSADataGlobalAgent();
437 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000438 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000439
Tom Stellard45bb48e2015-06-13 03:28:10 +0000440public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000441public:
442 enum AMDGPUMatchResultTy {
443 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
444 };
445
Akira Hatanakab11ef082015-11-14 06:35:56 +0000446 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000447 const MCInstrInfo &MII,
448 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000449 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000450 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000451 MCAsmParserExtension::Initialize(Parser);
452
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000453 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000454 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000455 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000456 }
457
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000458 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000459 }
460
Tom Stellard347ac792015-06-26 21:15:07 +0000461 AMDGPUTargetStreamer &getTargetStreamer() {
462 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
463 return static_cast<AMDGPUTargetStreamer &>(TS);
464 }
465
Tom Stellard45bb48e2015-06-13 03:28:10 +0000466 unsigned getForcedEncodingSize() const {
467 return ForcedEncodingSize;
468 }
469
470 void setForcedEncodingSize(unsigned Size) {
471 ForcedEncodingSize = Size;
472 }
473
474 bool isForcedVOP3() const {
475 return ForcedEncodingSize == 64;
476 }
477
478 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
479 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
480 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
481 OperandVector &Operands, MCStreamer &Out,
482 uint64_t &ErrorInfo,
483 bool MatchingInlineAsm) override;
484 bool ParseDirective(AsmToken DirectiveID) override;
485 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
486 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
487 SMLoc NameLoc, OperandVector &Operands) override;
488
489 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
490 int64_t Default = 0);
491 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
492 OperandVector &Operands,
493 enum AMDGPUOperand::ImmTy ImmTy =
494 AMDGPUOperand::ImmTyNone);
495 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
496 enum AMDGPUOperand::ImmTy ImmTy =
497 AMDGPUOperand::ImmTyNone);
498 OperandMatchResultTy parseOptionalOps(
499 const ArrayRef<OptionalOperand> &OptionalOps,
500 OperandVector &Operands);
501
502
503 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
504 void cvtDS(MCInst &Inst, const OperandVector &Operands);
505 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
506 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
507 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
508
509 bool parseCnt(int64_t &IntVal);
510 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
511 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
512
513 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
514 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
515 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000516 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000517
518 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
519 OperandMatchResultTy parseOffset(OperandVector &Operands);
520 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
521 OperandMatchResultTy parseGLC(OperandVector &Operands);
522 OperandMatchResultTy parseSLC(OperandVector &Operands);
523 OperandMatchResultTy parseTFE(OperandVector &Operands);
524
525 OperandMatchResultTy parseDMask(OperandVector &Operands);
526 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000527 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000528 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000529 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000530
Tom Stellarda90b9522016-02-11 03:28:15 +0000531 void cvtId(MCInst &Inst, const OperandVector &Operands);
532 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
533 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
534 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000535 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000536
537 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000538 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000539 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
540};
541
542struct OptionalOperand {
543 const char *Name;
544 AMDGPUOperand::ImmTy Type;
545 bool IsBit;
546 int64_t Default;
547 bool (*ConvertResult)(int64_t&);
548};
549
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000550}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000551
Matt Arsenault967c2f52015-11-03 22:50:32 +0000552static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000553 if (IsVgpr) {
554 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000555 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000556 case 1: return AMDGPU::VGPR_32RegClassID;
557 case 2: return AMDGPU::VReg_64RegClassID;
558 case 3: return AMDGPU::VReg_96RegClassID;
559 case 4: return AMDGPU::VReg_128RegClassID;
560 case 8: return AMDGPU::VReg_256RegClassID;
561 case 16: return AMDGPU::VReg_512RegClassID;
562 }
563 }
564
565 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000566 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 case 1: return AMDGPU::SGPR_32RegClassID;
568 case 2: return AMDGPU::SGPR_64RegClassID;
569 case 4: return AMDGPU::SReg_128RegClassID;
570 case 8: return AMDGPU::SReg_256RegClassID;
571 case 16: return AMDGPU::SReg_512RegClassID;
572 }
573}
574
Craig Topper4e9b03d62015-09-21 00:18:00 +0000575static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576
577 return StringSwitch<unsigned>(RegName)
578 .Case("exec", AMDGPU::EXEC)
579 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000580 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581 .Case("m0", AMDGPU::M0)
582 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000583 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
584 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000585 .Case("vcc_lo", AMDGPU::VCC_LO)
586 .Case("vcc_hi", AMDGPU::VCC_HI)
587 .Case("exec_lo", AMDGPU::EXEC_LO)
588 .Case("exec_hi", AMDGPU::EXEC_HI)
589 .Default(0);
590}
591
592bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
593 const AsmToken Tok = Parser.getTok();
594 StartLoc = Tok.getLoc();
595 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000596 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
597
Matt Arsenault57116cc2015-09-10 21:51:15 +0000598 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000599 RegNo = getRegForName(RegName);
600
601 if (RegNo) {
602 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000603 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000604 }
605
606 // Match vgprs and sgprs
607 if (RegName[0] != 's' && RegName[0] != 'v')
608 return true;
609
610 bool IsVgpr = RegName[0] == 'v';
611 unsigned RegWidth;
612 unsigned RegIndexInClass;
613 if (RegName.size() > 1) {
614 // We have a 32-bit register
615 RegWidth = 1;
616 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
617 return true;
618 Parser.Lex();
619 } else {
620 // We have a register greater than 32-bits.
621
622 int64_t RegLo, RegHi;
623 Parser.Lex();
624 if (getLexer().isNot(AsmToken::LBrac))
625 return true;
626
627 Parser.Lex();
628 if (getParser().parseAbsoluteExpression(RegLo))
629 return true;
630
631 if (getLexer().isNot(AsmToken::Colon))
632 return true;
633
634 Parser.Lex();
635 if (getParser().parseAbsoluteExpression(RegHi))
636 return true;
637
638 if (getLexer().isNot(AsmToken::RBrac))
639 return true;
640
641 Parser.Lex();
642 RegWidth = (RegHi - RegLo) + 1;
643 if (IsVgpr) {
644 // VGPR registers aren't aligned.
645 RegIndexInClass = RegLo;
646 } else {
647 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000648 unsigned Size = std::min(RegWidth, 4u);
649 if (RegLo % Size != 0)
650 return true;
651
652 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000653 }
654 }
655
Matt Arsenault967c2f52015-11-03 22:50:32 +0000656 int RCID = getRegClass(IsVgpr, RegWidth);
657 if (RCID == -1)
658 return true;
659
660 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000661 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000662 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000663
664 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000665 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000666}
667
668unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
669
670 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
671
672 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
673 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
674 return Match_InvalidOperand;
675
Tom Stellard88e0b252015-10-06 15:57:53 +0000676 if ((TSFlags & SIInstrFlags::VOP3) &&
677 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
678 getForcedEncodingSize() != 64)
679 return Match_PreferE32;
680
Tom Stellard45bb48e2015-06-13 03:28:10 +0000681 return Match_Success;
682}
683
684
685bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
686 OperandVector &Operands,
687 MCStreamer &Out,
688 uint64_t &ErrorInfo,
689 bool MatchingInlineAsm) {
690 MCInst Inst;
691
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000692 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693 default: break;
694 case Match_Success:
695 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000696 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697 return false;
698 case Match_MissingFeature:
699 return Error(IDLoc, "instruction not supported on this GPU");
700
701 case Match_MnemonicFail:
702 return Error(IDLoc, "unrecognized instruction mnemonic");
703
704 case Match_InvalidOperand: {
705 SMLoc ErrorLoc = IDLoc;
706 if (ErrorInfo != ~0ULL) {
707 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000708 return Error(IDLoc, "too few operands for instruction");
709 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000710 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
711 if (ErrorLoc == SMLoc())
712 ErrorLoc = IDLoc;
713 }
714 return Error(ErrorLoc, "invalid operand for instruction");
715 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000716 case Match_PreferE32:
717 return Error(IDLoc, "internal error: instruction without _e64 suffix "
718 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000719 }
720 llvm_unreachable("Implement any new match types added!");
721}
722
Tom Stellard347ac792015-06-26 21:15:07 +0000723bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
724 uint32_t &Minor) {
725 if (getLexer().isNot(AsmToken::Integer))
726 return TokError("invalid major version");
727
728 Major = getLexer().getTok().getIntVal();
729 Lex();
730
731 if (getLexer().isNot(AsmToken::Comma))
732 return TokError("minor version number required, comma expected");
733 Lex();
734
735 if (getLexer().isNot(AsmToken::Integer))
736 return TokError("invalid minor version");
737
738 Minor = getLexer().getTok().getIntVal();
739 Lex();
740
741 return false;
742}
743
744bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
745
746 uint32_t Major;
747 uint32_t Minor;
748
749 if (ParseDirectiveMajorMinor(Major, Minor))
750 return true;
751
752 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
753 return false;
754}
755
756bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
757
758 uint32_t Major;
759 uint32_t Minor;
760 uint32_t Stepping;
761 StringRef VendorName;
762 StringRef ArchName;
763
764 // If this directive has no arguments, then use the ISA version for the
765 // targeted GPU.
766 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000767 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000768 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
769 Isa.Stepping,
770 "AMD", "AMDGPU");
771 return false;
772 }
773
774
775 if (ParseDirectiveMajorMinor(Major, Minor))
776 return true;
777
778 if (getLexer().isNot(AsmToken::Comma))
779 return TokError("stepping version number required, comma expected");
780 Lex();
781
782 if (getLexer().isNot(AsmToken::Integer))
783 return TokError("invalid stepping version");
784
785 Stepping = getLexer().getTok().getIntVal();
786 Lex();
787
788 if (getLexer().isNot(AsmToken::Comma))
789 return TokError("vendor name required, comma expected");
790 Lex();
791
792 if (getLexer().isNot(AsmToken::String))
793 return TokError("invalid vendor name");
794
795 VendorName = getLexer().getTok().getStringContents();
796 Lex();
797
798 if (getLexer().isNot(AsmToken::Comma))
799 return TokError("arch name required, comma expected");
800 Lex();
801
802 if (getLexer().isNot(AsmToken::String))
803 return TokError("invalid arch name");
804
805 ArchName = getLexer().getTok().getStringContents();
806 Lex();
807
808 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
809 VendorName, ArchName);
810 return false;
811}
812
Tom Stellardff7416b2015-06-26 21:58:31 +0000813bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
814 amd_kernel_code_t &Header) {
Nikolay Haustovf0f24622016-03-02 10:54:21 +0000815
816 if (getLexer().isNot(AsmToken::Equal))
817 return TokError("expected '='");
Tom Stellardff7416b2015-06-26 21:58:31 +0000818 Lex();
Nikolay Haustovf0f24622016-03-02 10:54:21 +0000819
820 if (getLexer().isNot(AsmToken::Integer))
821 return TokError("amd_kernel_code_t values must be integers");
822
823 uint64_t Value = getLexer().getTok().getIntVal();
824 Lex();
825
826 if (ID == "kernel_code_version_major")
827 Header.amd_kernel_code_version_major = Value;
828 else if (ID == "kernel_code_version_minor")
829 Header.amd_kernel_code_version_minor = Value;
830 else if (ID == "machine_kind")
831 Header.amd_machine_kind = Value;
832 else if (ID == "machine_version_major")
833 Header.amd_machine_version_major = Value;
834 else if (ID == "machine_version_minor")
835 Header.amd_machine_version_minor = Value;
836 else if (ID == "machine_version_stepping")
837 Header.amd_machine_version_stepping = Value;
838 else if (ID == "kernel_code_entry_byte_offset")
839 Header.kernel_code_entry_byte_offset = Value;
840 else if (ID == "kernel_code_prefetch_byte_size")
841 Header.kernel_code_prefetch_byte_size = Value;
842 else if (ID == "max_scratch_backing_memory_byte_size")
843 Header.max_scratch_backing_memory_byte_size = Value;
844 else if (ID == "compute_pgm_rsrc1_vgprs")
845 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
846 else if (ID == "compute_pgm_rsrc1_sgprs")
847 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
848 else if (ID == "compute_pgm_rsrc1_priority")
849 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
850 else if (ID == "compute_pgm_rsrc1_float_mode")
851 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
852 else if (ID == "compute_pgm_rsrc1_priv")
853 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
854 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
855 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
856 else if (ID == "compute_pgm_rsrc1_debug_mode")
857 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
858 else if (ID == "compute_pgm_rsrc1_ieee_mode")
859 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
860 else if (ID == "compute_pgm_rsrc2_scratch_en")
861 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
862 else if (ID == "compute_pgm_rsrc2_user_sgpr")
863 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
864 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
865 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
866 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
867 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
868 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
869 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
870 else if (ID == "compute_pgm_rsrc2_tg_size_en")
871 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
872 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
873 Header.compute_pgm_resource_registers |=
874 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
875 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
876 Header.compute_pgm_resource_registers |=
877 (S_00B84C_EXCP_EN_MSB(Value) << 32);
878 else if (ID == "compute_pgm_rsrc2_lds_size")
879 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
880 else if (ID == "compute_pgm_rsrc2_excp_en")
881 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
882 else if (ID == "compute_pgm_resource_registers")
883 Header.compute_pgm_resource_registers = Value;
884 else if (ID == "enable_sgpr_private_segment_buffer")
885 Header.code_properties |=
886 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
887 else if (ID == "enable_sgpr_dispatch_ptr")
888 Header.code_properties |=
889 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
890 else if (ID == "enable_sgpr_queue_ptr")
891 Header.code_properties |=
892 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
893 else if (ID == "enable_sgpr_kernarg_segment_ptr")
894 Header.code_properties |=
895 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
896 else if (ID == "enable_sgpr_dispatch_id")
897 Header.code_properties |=
898 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
899 else if (ID == "enable_sgpr_flat_scratch_init")
900 Header.code_properties |=
901 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
902 else if (ID == "enable_sgpr_private_segment_size")
903 Header.code_properties |=
904 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
905 else if (ID == "enable_sgpr_grid_workgroup_count_x")
906 Header.code_properties |=
907 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
908 else if (ID == "enable_sgpr_grid_workgroup_count_y")
909 Header.code_properties |=
910 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
911 else if (ID == "enable_sgpr_grid_workgroup_count_z")
912 Header.code_properties |=
913 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
914 else if (ID == "enable_ordered_append_gds")
915 Header.code_properties |=
916 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
917 else if (ID == "private_element_size")
918 Header.code_properties |=
919 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
920 else if (ID == "is_ptr64")
921 Header.code_properties |=
922 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
923 else if (ID == "is_dynamic_callstack")
924 Header.code_properties |=
925 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
926 else if (ID == "is_debug_enabled")
927 Header.code_properties |=
928 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
929 else if (ID == "is_xnack_enabled")
930 Header.code_properties |=
931 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
932 else if (ID == "workitem_private_segment_byte_size")
933 Header.workitem_private_segment_byte_size = Value;
934 else if (ID == "workgroup_group_segment_byte_size")
935 Header.workgroup_group_segment_byte_size = Value;
936 else if (ID == "gds_segment_byte_size")
937 Header.gds_segment_byte_size = Value;
938 else if (ID == "kernarg_segment_byte_size")
939 Header.kernarg_segment_byte_size = Value;
940 else if (ID == "workgroup_fbarrier_count")
941 Header.workgroup_fbarrier_count = Value;
942 else if (ID == "wavefront_sgpr_count")
943 Header.wavefront_sgpr_count = Value;
944 else if (ID == "workitem_vgpr_count")
945 Header.workitem_vgpr_count = Value;
946 else if (ID == "reserved_vgpr_first")
947 Header.reserved_vgpr_first = Value;
948 else if (ID == "reserved_vgpr_count")
949 Header.reserved_vgpr_count = Value;
950 else if (ID == "reserved_sgpr_first")
951 Header.reserved_sgpr_first = Value;
952 else if (ID == "reserved_sgpr_count")
953 Header.reserved_sgpr_count = Value;
954 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
955 Header.debug_wavefront_private_segment_offset_sgpr = Value;
956 else if (ID == "debug_private_segment_buffer_sgpr")
957 Header.debug_private_segment_buffer_sgpr = Value;
958 else if (ID == "kernarg_segment_alignment")
959 Header.kernarg_segment_alignment = Value;
960 else if (ID == "group_segment_alignment")
961 Header.group_segment_alignment = Value;
962 else if (ID == "private_segment_alignment")
963 Header.private_segment_alignment = Value;
964 else if (ID == "wavefront_size")
965 Header.wavefront_size = Value;
966 else if (ID == "call_convention")
967 Header.call_convention = Value;
968 else if (ID == "runtime_loader_kernel_symbol")
969 Header.runtime_loader_kernel_symbol = Value;
970 else
971 return TokError("amd_kernel_code_t value not recognized.");
972
Tom Stellardff7416b2015-06-26 21:58:31 +0000973 return false;
974}
975
976bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
977
978 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000979 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000980
981 while (true) {
982
983 if (getLexer().isNot(AsmToken::EndOfStatement))
984 return TokError("amd_kernel_code_t values must begin on a new line");
985
986 // Lex EndOfStatement. This is in a while loop, because lexing a comment
987 // will set the current token to EndOfStatement.
988 while(getLexer().is(AsmToken::EndOfStatement))
989 Lex();
990
991 if (getLexer().isNot(AsmToken::Identifier))
992 return TokError("expected value identifier or .end_amd_kernel_code_t");
993
994 StringRef ID = getLexer().getTok().getIdentifier();
995 Lex();
996
997 if (ID == ".end_amd_kernel_code_t")
998 break;
999
1000 if (ParseAMDKernelCodeTValue(ID, Header))
1001 return true;
1002 }
1003
1004 getTargetStreamer().EmitAMDKernelCodeT(Header);
1005
1006 return false;
1007}
1008
Tom Stellarde135ffd2015-09-25 21:41:28 +00001009bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1010 getParser().getStreamer().SwitchSection(
1011 AMDGPU::getHSATextSection(getContext()));
1012 return false;
1013}
1014
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001015bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1016 if (getLexer().isNot(AsmToken::Identifier))
1017 return TokError("expected symbol name");
1018
1019 StringRef KernelName = Parser.getTok().getString();
1020
1021 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1022 ELF::STT_AMDGPU_HSA_KERNEL);
1023 Lex();
1024 return false;
1025}
1026
Tom Stellard00f2f912015-12-02 19:47:57 +00001027bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1028 if (getLexer().isNot(AsmToken::Identifier))
1029 return TokError("expected symbol name");
1030
1031 StringRef GlobalName = Parser.getTok().getIdentifier();
1032
1033 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1034 Lex();
1035 return false;
1036}
1037
1038bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1039 if (getLexer().isNot(AsmToken::Identifier))
1040 return TokError("expected symbol name");
1041
1042 StringRef GlobalName = Parser.getTok().getIdentifier();
1043
1044 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1045 Lex();
1046 return false;
1047}
1048
1049bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1050 getParser().getStreamer().SwitchSection(
1051 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1052 return false;
1053}
1054
1055bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1056 getParser().getStreamer().SwitchSection(
1057 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1058 return false;
1059}
1060
Tom Stellard9760f032015-12-03 03:34:32 +00001061bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1062 getParser().getStreamer().SwitchSection(
1063 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1064 return false;
1065}
1066
Tom Stellard45bb48e2015-06-13 03:28:10 +00001067bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001068 StringRef IDVal = DirectiveID.getString();
1069
1070 if (IDVal == ".hsa_code_object_version")
1071 return ParseDirectiveHSACodeObjectVersion();
1072
1073 if (IDVal == ".hsa_code_object_isa")
1074 return ParseDirectiveHSACodeObjectISA();
1075
Tom Stellardff7416b2015-06-26 21:58:31 +00001076 if (IDVal == ".amd_kernel_code_t")
1077 return ParseDirectiveAMDKernelCodeT();
1078
Tom Stellarde135ffd2015-09-25 21:41:28 +00001079 if (IDVal == ".hsatext" || IDVal == ".text")
1080 return ParseSectionDirectiveHSAText();
1081
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001082 if (IDVal == ".amdgpu_hsa_kernel")
1083 return ParseDirectiveAMDGPUHsaKernel();
1084
Tom Stellard00f2f912015-12-02 19:47:57 +00001085 if (IDVal == ".amdgpu_hsa_module_global")
1086 return ParseDirectiveAMDGPUHsaModuleGlobal();
1087
1088 if (IDVal == ".amdgpu_hsa_program_global")
1089 return ParseDirectiveAMDGPUHsaProgramGlobal();
1090
1091 if (IDVal == ".hsadata_global_agent")
1092 return ParseSectionDirectiveHSADataGlobalAgent();
1093
1094 if (IDVal == ".hsadata_global_program")
1095 return ParseSectionDirectiveHSADataGlobalProgram();
1096
Tom Stellard9760f032015-12-03 03:34:32 +00001097 if (IDVal == ".hsarodata_readonly_agent")
1098 return ParseSectionDirectiveHSARodataReadonlyAgent();
1099
Tom Stellard45bb48e2015-06-13 03:28:10 +00001100 return true;
1101}
1102
Matt Arsenault68802d32015-11-05 03:11:27 +00001103bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1104 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001105 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001106 return true;
1107
Matt Arsenault3b159672015-12-01 20:31:08 +00001108 if (isSI()) {
1109 // No flat_scr
1110 switch (RegNo) {
1111 case AMDGPU::FLAT_SCR:
1112 case AMDGPU::FLAT_SCR_LO:
1113 case AMDGPU::FLAT_SCR_HI:
1114 return false;
1115 default:
1116 return true;
1117 }
1118 }
1119
Matt Arsenault68802d32015-11-05 03:11:27 +00001120 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1121 // SI/CI have.
1122 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1123 R.isValid(); ++R) {
1124 if (*R == RegNo)
1125 return false;
1126 }
1127
1128 return true;
1129}
1130
Tom Stellard45bb48e2015-06-13 03:28:10 +00001131static bool operandsHaveModifiers(const OperandVector &Operands) {
1132
1133 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1134 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1135 if (Op.isRegKind() && Op.hasModifiers())
1136 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001137 if (Op.isImm() && Op.hasModifiers())
1138 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001139 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1140 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1141 return true;
1142 }
1143 return false;
1144}
1145
1146AMDGPUAsmParser::OperandMatchResultTy
1147AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1148
1149 // Try to parse with a custom parser
1150 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1151
1152 // If we successfully parsed the operand or if there as an error parsing,
1153 // we are done.
1154 //
1155 // If we are parsing after we reach EndOfStatement then this means we
1156 // are appending default values to the Operands list. This is only done
1157 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001158 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001159 getLexer().is(AsmToken::EndOfStatement))
1160 return ResTy;
1161
1162 bool Negate = false, Abs = false;
1163 if (getLexer().getKind()== AsmToken::Minus) {
1164 Parser.Lex();
1165 Negate = true;
1166 }
1167
1168 if (getLexer().getKind() == AsmToken::Pipe) {
1169 Parser.Lex();
1170 Abs = true;
1171 }
1172
1173 switch(getLexer().getKind()) {
1174 case AsmToken::Integer: {
1175 SMLoc S = Parser.getTok().getLoc();
1176 int64_t IntVal;
1177 if (getParser().parseAbsoluteExpression(IntVal))
1178 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001179 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001180 Error(S, "invalid immediate: only 32-bit values are legal");
1181 return MatchOperand_ParseFail;
1182 }
1183
Tom Stellard45bb48e2015-06-13 03:28:10 +00001184 if (Negate)
1185 IntVal *= -1;
1186 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1187 return MatchOperand_Success;
1188 }
1189 case AsmToken::Real: {
1190 // FIXME: We should emit an error if a double precisions floating-point
1191 // value is used. I'm not sure the best way to detect this.
1192 SMLoc S = Parser.getTok().getLoc();
1193 int64_t IntVal;
1194 if (getParser().parseAbsoluteExpression(IntVal))
1195 return MatchOperand_ParseFail;
1196
1197 APFloat F((float)BitsToDouble(IntVal));
1198 if (Negate)
1199 F.changeSign();
1200 Operands.push_back(
1201 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1202 return MatchOperand_Success;
1203 }
1204 case AsmToken::Identifier: {
1205 SMLoc S, E;
1206 unsigned RegNo;
1207 if (!ParseRegister(RegNo, S, E)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001208 unsigned Modifiers = 0;
1209
1210 if (Negate)
1211 Modifiers |= 0x1;
1212
1213 if (Abs) {
1214 if (getLexer().getKind() != AsmToken::Pipe)
1215 return MatchOperand_ParseFail;
1216 Parser.Lex();
1217 Modifiers |= 0x2;
1218 }
1219
Tom Stellard45bb48e2015-06-13 03:28:10 +00001220 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard2b65ed32015-12-21 18:44:27 +00001221 RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
Tom Stellard45bb48e2015-06-13 03:28:10 +00001222 isForcedVOP3()));
1223
Tom Stellarda90b9522016-02-11 03:28:15 +00001224 if (Modifiers) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001225 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1226 RegOp.setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001227 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001228 } else {
1229 ResTy = parseVOP3OptionalOps(Operands);
1230 if (ResTy == MatchOperand_NoMatch) {
1231 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1232 S));
1233 Parser.Lex();
1234 }
1235 }
1236 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001237 }
1238 default:
1239 return MatchOperand_NoMatch;
1240 }
1241}
1242
1243bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1244 StringRef Name,
1245 SMLoc NameLoc, OperandVector &Operands) {
1246
1247 // Clear any forced encodings from the previous instruction.
1248 setForcedEncodingSize(0);
1249
1250 if (Name.endswith("_e64"))
1251 setForcedEncodingSize(64);
1252 else if (Name.endswith("_e32"))
1253 setForcedEncodingSize(32);
1254
1255 // Add the instruction mnemonic
1256 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1257
1258 while (!getLexer().is(AsmToken::EndOfStatement)) {
1259 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1260
1261 // Eat the comma or space if there is one.
1262 if (getLexer().is(AsmToken::Comma))
1263 Parser.Lex();
1264
1265 switch (Res) {
1266 case MatchOperand_Success: break;
1267 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1268 "failed parsing operand.");
1269 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1270 "not a valid operand.");
1271 }
1272 }
1273
Tom Stellard45bb48e2015-06-13 03:28:10 +00001274 return false;
1275}
1276
1277//===----------------------------------------------------------------------===//
1278// Utility functions
1279//===----------------------------------------------------------------------===//
1280
1281AMDGPUAsmParser::OperandMatchResultTy
1282AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1283 int64_t Default) {
1284
1285 // We are at the end of the statement, and this is a default argument, so
1286 // use a default value.
1287 if (getLexer().is(AsmToken::EndOfStatement)) {
1288 Int = Default;
1289 return MatchOperand_Success;
1290 }
1291
1292 switch(getLexer().getKind()) {
1293 default: return MatchOperand_NoMatch;
1294 case AsmToken::Identifier: {
1295 StringRef OffsetName = Parser.getTok().getString();
1296 if (!OffsetName.equals(Prefix))
1297 return MatchOperand_NoMatch;
1298
1299 Parser.Lex();
1300 if (getLexer().isNot(AsmToken::Colon))
1301 return MatchOperand_ParseFail;
1302
1303 Parser.Lex();
1304 if (getLexer().isNot(AsmToken::Integer))
1305 return MatchOperand_ParseFail;
1306
1307 if (getParser().parseAbsoluteExpression(Int))
1308 return MatchOperand_ParseFail;
1309 break;
1310 }
1311 }
1312 return MatchOperand_Success;
1313}
1314
1315AMDGPUAsmParser::OperandMatchResultTy
1316AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1317 enum AMDGPUOperand::ImmTy ImmTy) {
1318
1319 SMLoc S = Parser.getTok().getLoc();
1320 int64_t Offset = 0;
1321
1322 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1323 if (Res != MatchOperand_Success)
1324 return Res;
1325
1326 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1327 return MatchOperand_Success;
1328}
1329
1330AMDGPUAsmParser::OperandMatchResultTy
1331AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1332 enum AMDGPUOperand::ImmTy ImmTy) {
1333 int64_t Bit = 0;
1334 SMLoc S = Parser.getTok().getLoc();
1335
1336 // We are at the end of the statement, and this is a default argument, so
1337 // use a default value.
1338 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1339 switch(getLexer().getKind()) {
1340 case AsmToken::Identifier: {
1341 StringRef Tok = Parser.getTok().getString();
1342 if (Tok == Name) {
1343 Bit = 1;
1344 Parser.Lex();
1345 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1346 Bit = 0;
1347 Parser.Lex();
1348 } else {
1349 return MatchOperand_NoMatch;
1350 }
1351 break;
1352 }
1353 default:
1354 return MatchOperand_NoMatch;
1355 }
1356 }
1357
1358 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1359 return MatchOperand_Success;
1360}
1361
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001362typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1363
1364void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1365 auto i = OptionalIdx.find(ImmT);
1366 if (i != OptionalIdx.end()) {
1367 unsigned Idx = i->second;
1368 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1369 } else {
1370 Inst.addOperand(MCOperand::createImm(0));
1371 }
1372}
1373
Tom Stellard45bb48e2015-06-13 03:28:10 +00001374static bool operandsHasOptionalOp(const OperandVector &Operands,
1375 const OptionalOperand &OOp) {
1376 for (unsigned i = 0; i < Operands.size(); i++) {
1377 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1378 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1379 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1380 return true;
1381
1382 }
1383 return false;
1384}
1385
1386AMDGPUAsmParser::OperandMatchResultTy
1387AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1388 OperandVector &Operands) {
1389 SMLoc S = Parser.getTok().getLoc();
1390 for (const OptionalOperand &Op : OptionalOps) {
1391 if (operandsHasOptionalOp(Operands, Op))
1392 continue;
1393 AMDGPUAsmParser::OperandMatchResultTy Res;
1394 int64_t Value;
1395 if (Op.IsBit) {
1396 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1397 if (Res == MatchOperand_NoMatch)
1398 continue;
1399 return Res;
1400 }
1401
1402 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1403
1404 if (Res == MatchOperand_NoMatch)
1405 continue;
1406
1407 if (Res != MatchOperand_Success)
1408 return Res;
1409
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001410 bool DefaultValue = (Value == Op.Default);
1411
Tom Stellard45bb48e2015-06-13 03:28:10 +00001412 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1413 return MatchOperand_ParseFail;
1414 }
1415
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001416 if (!DefaultValue) {
1417 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1418 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001419 return MatchOperand_Success;
1420 }
1421 return MatchOperand_NoMatch;
1422}
1423
1424//===----------------------------------------------------------------------===//
1425// ds
1426//===----------------------------------------------------------------------===//
1427
1428static const OptionalOperand DSOptionalOps [] = {
1429 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1430 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1431};
1432
1433static const OptionalOperand DSOptionalOpsOff01 [] = {
1434 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1435 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1436 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1437};
1438
1439AMDGPUAsmParser::OperandMatchResultTy
1440AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1441 return parseOptionalOps(DSOptionalOps, Operands);
1442}
1443AMDGPUAsmParser::OperandMatchResultTy
1444AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1445 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1446}
1447
1448AMDGPUAsmParser::OperandMatchResultTy
1449AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1450 SMLoc S = Parser.getTok().getLoc();
1451 AMDGPUAsmParser::OperandMatchResultTy Res =
1452 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1453 if (Res == MatchOperand_NoMatch) {
1454 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1455 AMDGPUOperand::ImmTyOffset));
1456 Res = MatchOperand_Success;
1457 }
1458 return Res;
1459}
1460
1461bool AMDGPUOperand::isDSOffset() const {
1462 return isImm() && isUInt<16>(getImm());
1463}
1464
1465bool AMDGPUOperand::isDSOffset01() const {
1466 return isImm() && isUInt<8>(getImm());
1467}
1468
1469void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1470 const OperandVector &Operands) {
1471
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001472 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001473
1474 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1475 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1476
1477 // Add the register arguments
1478 if (Op.isReg()) {
1479 Op.addRegOperands(Inst, 1);
1480 continue;
1481 }
1482
1483 // Handle optional arguments
1484 OptionalIdx[Op.getImmTy()] = i;
1485 }
1486
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001487 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1488 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1489 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001490
Tom Stellard45bb48e2015-06-13 03:28:10 +00001491 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1492}
1493
1494void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1495
1496 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1497 bool GDSOnly = false;
1498
1499 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1500 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1501
1502 // Add the register arguments
1503 if (Op.isReg()) {
1504 Op.addRegOperands(Inst, 1);
1505 continue;
1506 }
1507
1508 if (Op.isToken() && Op.getToken() == "gds") {
1509 GDSOnly = true;
1510 continue;
1511 }
1512
1513 // Handle optional arguments
1514 OptionalIdx[Op.getImmTy()] = i;
1515 }
1516
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001517 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1518 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001519
1520 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001521 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001522 }
1523 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1524}
1525
1526
1527//===----------------------------------------------------------------------===//
1528// s_waitcnt
1529//===----------------------------------------------------------------------===//
1530
1531bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1532 StringRef CntName = Parser.getTok().getString();
1533 int64_t CntVal;
1534
1535 Parser.Lex();
1536 if (getLexer().isNot(AsmToken::LParen))
1537 return true;
1538
1539 Parser.Lex();
1540 if (getLexer().isNot(AsmToken::Integer))
1541 return true;
1542
1543 if (getParser().parseAbsoluteExpression(CntVal))
1544 return true;
1545
1546 if (getLexer().isNot(AsmToken::RParen))
1547 return true;
1548
1549 Parser.Lex();
1550 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1551 Parser.Lex();
1552
1553 int CntShift;
1554 int CntMask;
1555
1556 if (CntName == "vmcnt") {
1557 CntMask = 0xf;
1558 CntShift = 0;
1559 } else if (CntName == "expcnt") {
1560 CntMask = 0x7;
1561 CntShift = 4;
1562 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001563 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001564 CntShift = 8;
1565 } else {
1566 return true;
1567 }
1568
1569 IntVal &= ~(CntMask << CntShift);
1570 IntVal |= (CntVal << CntShift);
1571 return false;
1572}
1573
1574AMDGPUAsmParser::OperandMatchResultTy
1575AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1576 // Disable all counters by default.
1577 // vmcnt [3:0]
1578 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001579 // lgkmcnt [11:8]
1580 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001581 SMLoc S = Parser.getTok().getLoc();
1582
1583 switch(getLexer().getKind()) {
1584 default: return MatchOperand_ParseFail;
1585 case AsmToken::Integer:
1586 // The operand can be an integer value.
1587 if (getParser().parseAbsoluteExpression(CntVal))
1588 return MatchOperand_ParseFail;
1589 break;
1590
1591 case AsmToken::Identifier:
1592 do {
1593 if (parseCnt(CntVal))
1594 return MatchOperand_ParseFail;
1595 } while(getLexer().isNot(AsmToken::EndOfStatement));
1596 break;
1597 }
1598 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1599 return MatchOperand_Success;
1600}
1601
1602bool AMDGPUOperand::isSWaitCnt() const {
1603 return isImm();
1604}
1605
1606//===----------------------------------------------------------------------===//
1607// sopp branch targets
1608//===----------------------------------------------------------------------===//
1609
1610AMDGPUAsmParser::OperandMatchResultTy
1611AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1612 SMLoc S = Parser.getTok().getLoc();
1613
1614 switch (getLexer().getKind()) {
1615 default: return MatchOperand_ParseFail;
1616 case AsmToken::Integer: {
1617 int64_t Imm;
1618 if (getParser().parseAbsoluteExpression(Imm))
1619 return MatchOperand_ParseFail;
1620 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1621 return MatchOperand_Success;
1622 }
1623
1624 case AsmToken::Identifier:
1625 Operands.push_back(AMDGPUOperand::CreateExpr(
1626 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1627 Parser.getTok().getString()), getContext()), S));
1628 Parser.Lex();
1629 return MatchOperand_Success;
1630 }
1631}
1632
1633//===----------------------------------------------------------------------===//
1634// flat
1635//===----------------------------------------------------------------------===//
1636
1637static const OptionalOperand FlatOptionalOps [] = {
1638 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1639 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1640 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1641};
1642
1643static const OptionalOperand FlatAtomicOptionalOps [] = {
1644 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1645 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1646};
1647
1648AMDGPUAsmParser::OperandMatchResultTy
1649AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1650 return parseOptionalOps(FlatOptionalOps, Operands);
1651}
1652
1653AMDGPUAsmParser::OperandMatchResultTy
1654AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1655 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1656}
1657
1658void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1659 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001660 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001661
1662 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1663 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1664
1665 // Add the register arguments
1666 if (Op.isReg()) {
1667 Op.addRegOperands(Inst, 1);
1668 continue;
1669 }
1670
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001671 OptionalIdx[Op.getImmTy()] = i;
1672 }
1673 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1674 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1675 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1676}
1677
1678
1679void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1680 const OperandVector &Operands) {
1681 OptionalImmIndexMap OptionalIdx;
1682
1683 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1684 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1685
1686 // Add the register arguments
1687 if (Op.isReg()) {
1688 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001689 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001690 }
1691
1692 // Handle 'glc' token for flat atomics.
1693 if (Op.isToken()) {
1694 continue;
1695 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001696
1697 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001698 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001699 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001700 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1701 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001702}
1703
1704//===----------------------------------------------------------------------===//
1705// mubuf
1706//===----------------------------------------------------------------------===//
1707
1708static const OptionalOperand MubufOptionalOps [] = {
1709 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1710 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1711 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1712 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1713};
1714
1715AMDGPUAsmParser::OperandMatchResultTy
1716AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1717 return parseOptionalOps(MubufOptionalOps, Operands);
1718}
1719
1720AMDGPUAsmParser::OperandMatchResultTy
1721AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1722 return parseIntWithPrefix("offset", Operands);
1723}
1724
1725AMDGPUAsmParser::OperandMatchResultTy
1726AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1727 return parseNamedBit("glc", Operands);
1728}
1729
1730AMDGPUAsmParser::OperandMatchResultTy
1731AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1732 return parseNamedBit("slc", Operands);
1733}
1734
1735AMDGPUAsmParser::OperandMatchResultTy
1736AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1737 return parseNamedBit("tfe", Operands);
1738}
1739
1740bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001741 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001742}
1743
1744void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1745 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001746 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001747
1748 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1749 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1750
1751 // Add the register arguments
1752 if (Op.isReg()) {
1753 Op.addRegOperands(Inst, 1);
1754 continue;
1755 }
1756
1757 // Handle the case where soffset is an immediate
1758 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1759 Op.addImmOperands(Inst, 1);
1760 continue;
1761 }
1762
1763 // Handle tokens like 'offen' which are sometimes hard-coded into the
1764 // asm string. There are no MCInst operands for these.
1765 if (Op.isToken()) {
1766 continue;
1767 }
1768 assert(Op.isImm());
1769
1770 // Handle optional arguments
1771 OptionalIdx[Op.getImmTy()] = i;
1772 }
1773
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001774 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1775 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1776 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1777 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001778}
1779
1780//===----------------------------------------------------------------------===//
1781// mimg
1782//===----------------------------------------------------------------------===//
1783
1784AMDGPUAsmParser::OperandMatchResultTy
1785AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001786 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001787}
1788
1789AMDGPUAsmParser::OperandMatchResultTy
1790AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001791 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1792}
1793
1794AMDGPUAsmParser::OperandMatchResultTy
1795AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1796 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001797}
1798
1799AMDGPUAsmParser::OperandMatchResultTy
1800AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001801 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1802}
1803
1804AMDGPUAsmParser::OperandMatchResultTy
1805AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1806 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001807}
1808
1809//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001810// smrd
1811//===----------------------------------------------------------------------===//
1812
1813bool AMDGPUOperand::isSMRDOffset() const {
1814
1815 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1816 // information here.
1817 return isImm() && isUInt<8>(getImm());
1818}
1819
1820bool AMDGPUOperand::isSMRDLiteralOffset() const {
1821 // 32-bit literals are only supported on CI and we only want to use them
1822 // when the offset is > 8-bits.
1823 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1824}
1825
1826//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001827// vop3
1828//===----------------------------------------------------------------------===//
1829
1830static bool ConvertOmodMul(int64_t &Mul) {
1831 if (Mul != 1 && Mul != 2 && Mul != 4)
1832 return false;
1833
1834 Mul >>= 1;
1835 return true;
1836}
1837
1838static bool ConvertOmodDiv(int64_t &Div) {
1839 if (Div == 1) {
1840 Div = 0;
1841 return true;
1842 }
1843
1844 if (Div == 2) {
1845 Div = 3;
1846 return true;
1847 }
1848
1849 return false;
1850}
1851
1852static const OptionalOperand VOP3OptionalOps [] = {
1853 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1854 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1855 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1856};
1857
1858static bool isVOP3(OperandVector &Operands) {
1859 if (operandsHaveModifiers(Operands))
1860 return true;
1861
Tom Stellarda90b9522016-02-11 03:28:15 +00001862 if (Operands.size() >= 2) {
1863 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001864
Tom Stellarda90b9522016-02-11 03:28:15 +00001865 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1866 return true;
1867 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001868
1869 if (Operands.size() >= 5)
1870 return true;
1871
1872 if (Operands.size() > 3) {
1873 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Benjamin Kramerac5e36f2016-02-12 12:37:21 +00001874 if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1875 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001876 return true;
1877 }
1878 return false;
1879}
1880
1881AMDGPUAsmParser::OperandMatchResultTy
1882AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1883
1884 // The value returned by this function may change after parsing
1885 // an operand so store the original value here.
1886 bool HasModifiers = operandsHaveModifiers(Operands);
1887
1888 bool IsVOP3 = isVOP3(Operands);
1889 if (HasModifiers || IsVOP3 ||
1890 getLexer().isNot(AsmToken::EndOfStatement) ||
1891 getForcedEncodingSize() == 64) {
1892
1893 AMDGPUAsmParser::OperandMatchResultTy Res =
1894 parseOptionalOps(VOP3OptionalOps, Operands);
1895
1896 if (!HasModifiers && Res == MatchOperand_Success) {
1897 // We have added a modifier operation, so we need to make sure all
1898 // previous register operands have modifiers
1899 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1900 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001901 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001902 Op.setModifiers(0);
1903 }
1904 }
1905 return Res;
1906 }
1907 return MatchOperand_NoMatch;
1908}
1909
Tom Stellarda90b9522016-02-11 03:28:15 +00001910void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1911 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001912 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001913 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001914 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1915 }
1916 for (unsigned E = Operands.size(); I != E; ++I)
1917 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1918}
1919
1920void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001921 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1922 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001923 cvtVOP3(Inst, Operands);
1924 } else {
1925 cvtId(Inst, Operands);
1926 }
1927}
1928
1929void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1930 if (operandsHaveModifiers(Operands)) {
1931 cvtVOP3(Inst, Operands);
1932 } else {
1933 cvtId(Inst, Operands);
1934 }
1935}
1936
1937void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1938 cvtVOP3(Inst, Operands);
1939}
1940
1941void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001942 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00001943 unsigned I = 1;
1944 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001945 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001946 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001947 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001948
Tom Stellarda90b9522016-02-11 03:28:15 +00001949 for (unsigned E = Operands.size(); I != E; ++I) {
1950 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001951 if (Op.isRegOrImmWithInputMods()) {
1952 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001953 } else if (Op.isImm()) {
1954 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001955 } else {
1956 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001957 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001958 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001959
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001960 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
1961 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001962}
1963
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001964void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001965 unsigned I = 1;
1966 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1967 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1968 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1969 }
1970
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001971 OptionalImmIndexMap OptionalIdx;
1972
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001973 for (unsigned E = Operands.size(); I != E; ++I) {
1974 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001975
1976 // Add the register arguments
1977 if (Op.isRegOrImm()) {
1978 Op.addRegOrImmOperands(Inst, 1);
1979 continue;
1980 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001981 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001982 } else {
1983 assert(false);
1984 }
1985 }
1986
1987 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1988 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1989 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1990 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1991 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1992 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1993 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1994 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1995}
1996
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001997void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
1998 unsigned I = 1;
1999 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2000 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2001 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2002 }
2003
2004 // Add src, same as dst
2005 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2006
2007 OptionalImmIndexMap OptionalIdx;
2008
2009 for (unsigned E = Operands.size(); I != E; ++I) {
2010 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2011
2012 // Add the register arguments
2013 if (Op.isRegOrImm()) {
2014 Op.addRegOrImmOperands(Inst, 1);
2015 continue;
2016 } else if (Op.isImmModifier()) {
2017 OptionalIdx[Op.getImmTy()] = I;
2018 } else {
2019 assert(false);
2020 }
2021 }
2022
2023 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2024 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2025 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2026 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2027 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2028 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2029 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2030 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2031}
2032
2033
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002034
Tom Stellard45bb48e2015-06-13 03:28:10 +00002035/// Force static initialization.
2036extern "C" void LLVMInitializeAMDGPUAsmParser() {
2037 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2038 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2039}
2040
2041#define GET_REGISTER_MATCHER
2042#define GET_MATCHER_IMPLEMENTATION
2043#include "AMDGPUGenAsmMatcher.inc"