blob: 67d7e08fc32389784bae339049a389067bee2770 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000015#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/SmallString.h"
18#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000028#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000032#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000033#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
70 ImmTyOMod
71 };
72
73 struct TokOp {
74 const char *Data;
75 unsigned Length;
76 };
77
78 struct ImmOp {
79 bool IsFPImm;
80 ImmTy Type;
81 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000082 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000083 };
84
85 struct RegOp {
86 unsigned RegNo;
87 int Modifiers;
88 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000089 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +000090 bool IsForcedVOP3;
91 };
92
93 union {
94 TokOp Tok;
95 ImmOp Imm;
96 RegOp Reg;
97 const MCExpr *Expr;
98 };
99
100 void addImmOperands(MCInst &Inst, unsigned N) const {
101 Inst.addOperand(MCOperand::createImm(getImm()));
102 }
103
104 StringRef getToken() const {
105 return StringRef(Tok.Data, Tok.Length);
106 }
107
108 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000109 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000110 }
111
112 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000113 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000114 addRegOperands(Inst, N);
115 else
116 addImmOperands(Inst, N);
117 }
118
Tom Stellardd93a34f2016-02-22 19:17:56 +0000119 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
120 if (isRegKind()) {
121 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
122 addRegOperands(Inst, N);
123 } else {
124 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
125 addImmOperands(Inst, N);
126 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 }
128
129 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
130 if (isImm())
131 addImmOperands(Inst, N);
132 else {
133 assert(isExpr());
134 Inst.addOperand(MCOperand::createExpr(Expr));
135 }
136 }
137
138 bool defaultTokenHasSuffix() const {
139 StringRef Token(Tok.Data, Tok.Length);
140
141 return Token.endswith("_e32") || Token.endswith("_e64");
142 }
143
144 bool isToken() const override {
145 return Kind == Token;
146 }
147
148 bool isImm() const override {
149 return Kind == Immediate;
150 }
151
Tom Stellardd93a34f2016-02-22 19:17:56 +0000152 bool isInlinableImm() const {
153 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
154 immediates are inlinable (e.g. "clamp" attribute is not) */ )
155 return false;
156 // TODO: We should avoid using host float here. It would be better to
157 // check the float bit values which is what a few other places do.
158 // We've had bot failures before due to weird NaN support on mips hosts.
159 const float F = BitsToFloat(Imm.Val);
160 // TODO: Add 1/(2*pi) for VI
161 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000162 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000163 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000164 }
165
166 bool isDSOffset0() const {
167 assert(isImm());
168 return Imm.Type == ImmTyDSOffset0;
169 }
170
171 bool isDSOffset1() const {
172 assert(isImm());
173 return Imm.Type == ImmTyDSOffset1;
174 }
175
176 int64_t getImm() const {
177 return Imm.Val;
178 }
179
180 enum ImmTy getImmTy() const {
181 assert(isImm());
182 return Imm.Type;
183 }
184
185 bool isRegKind() const {
186 return Kind == Register;
187 }
188
189 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000190 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 }
192
Tom Stellardd93a34f2016-02-22 19:17:56 +0000193 bool isRegOrImmWithInputMods() const {
194 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000195 }
196
197 bool isClamp() const {
198 return isImm() && Imm.Type == ImmTyClamp;
199 }
200
201 bool isOMod() const {
202 return isImm() && Imm.Type == ImmTyOMod;
203 }
204
205 bool isMod() const {
206 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000207 }
208
209 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000210 assert(isReg() || (isImm() && Imm.Modifiers == 0));
211 if (isReg())
212 Reg.Modifiers = Mods;
213 else
214 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000215 }
216
217 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000218 assert(isRegKind() || isImm());
219 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000220 }
221
222 unsigned getReg() const override {
223 return Reg.RegNo;
224 }
225
226 bool isRegOrImm() const {
227 return isReg() || isImm();
228 }
229
230 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000231 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000232 }
233
234 bool isSCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000235 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000236 }
237
Matt Arsenault86d336e2015-09-08 21:15:00 +0000238 bool isSCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000239 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
240 }
241
242 bool isSSrc32() const {
243 return isImm() || isSCSrc32();
244 }
245
246 bool isSSrc64() const {
247 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
248 // See isVSrc64().
249 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000250 }
251
Tom Stellard45bb48e2015-06-13 03:28:10 +0000252 bool isVCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000253 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000254 }
255
256 bool isVCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000257 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000258 }
259
260 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000261 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000262 }
263
264 bool isVSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000265 // TODO: Check if the 64-bit value (coming from assembly source) can be
266 // narrowed to 32 bits (in the instruction stream). That require knowledge
267 // of instruction type (unsigned/signed, floating or "untyped"/B64),
268 // see [AMD GCN3 ISA 6.3.1].
269 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
270 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000271 }
272
273 bool isMem() const override {
274 return false;
275 }
276
277 bool isExpr() const {
278 return Kind == Expression;
279 }
280
281 bool isSoppBrTarget() const {
282 return isExpr() || isImm();
283 }
284
285 SMLoc getStartLoc() const override {
286 return StartLoc;
287 }
288
289 SMLoc getEndLoc() const override {
290 return EndLoc;
291 }
292
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000293 void print(raw_ostream &OS) const override {
294 switch (Kind) {
295 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000296 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000297 break;
298 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000299 if (Imm.Type != AMDGPUOperand::ImmTyNone)
300 OS << getImm();
301 else
302 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000303 break;
304 case Token:
305 OS << '\'' << getToken() << '\'';
306 break;
307 case Expression:
308 OS << "<expr " << *Expr << '>';
309 break;
310 }
311 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000312
313 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
314 enum ImmTy Type = ImmTyNone,
315 bool IsFPImm = false) {
316 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
317 Op->Imm.Val = Val;
318 Op->Imm.IsFPImm = IsFPImm;
319 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000320 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 Op->StartLoc = Loc;
322 Op->EndLoc = Loc;
323 return Op;
324 }
325
326 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
327 bool HasExplicitEncodingSize = true) {
328 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
329 Res->Tok.Data = Str.data();
330 Res->Tok.Length = Str.size();
331 Res->StartLoc = Loc;
332 Res->EndLoc = Loc;
333 return Res;
334 }
335
336 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
337 SMLoc E,
338 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000339 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000340 bool ForceVOP3) {
341 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
342 Op->Reg.RegNo = RegNo;
343 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000344 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000345 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000346 Op->Reg.IsForcedVOP3 = ForceVOP3;
347 Op->StartLoc = S;
348 Op->EndLoc = E;
349 return Op;
350 }
351
352 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
353 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
354 Op->Expr = Expr;
355 Op->StartLoc = S;
356 Op->EndLoc = S;
357 return Op;
358 }
359
360 bool isDSOffset() const;
361 bool isDSOffset01() const;
362 bool isSWaitCnt() const;
363 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000364 bool isSMRDOffset() const;
365 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000366};
367
368class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000369 const MCInstrInfo &MII;
370 MCAsmParser &Parser;
371
372 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000373
Matt Arsenault3b159672015-12-01 20:31:08 +0000374 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000375 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000376 }
377
378 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000379 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000380 }
381
Matt Arsenault68802d32015-11-05 03:11:27 +0000382 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000383 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000384 }
385
386 bool hasSGPR102_SGPR103() const {
387 return !isVI();
388 }
389
Tom Stellard45bb48e2015-06-13 03:28:10 +0000390 /// @name Auto-generated Match Functions
391 /// {
392
393#define GET_ASSEMBLER_HEADER
394#include "AMDGPUGenAsmMatcher.inc"
395
396 /// }
397
Tom Stellard347ac792015-06-26 21:15:07 +0000398private:
399 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
400 bool ParseDirectiveHSACodeObjectVersion();
401 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000402 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
403 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000404 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000405 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000406 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000407 bool ParseDirectiveAMDGPUHsaModuleGlobal();
408 bool ParseDirectiveAMDGPUHsaProgramGlobal();
409 bool ParseSectionDirectiveHSADataGlobalAgent();
410 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000411 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000412
Tom Stellard45bb48e2015-06-13 03:28:10 +0000413public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000414public:
415 enum AMDGPUMatchResultTy {
416 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
417 };
418
Akira Hatanakab11ef082015-11-14 06:35:56 +0000419 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000420 const MCInstrInfo &MII,
421 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000422 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000423 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000424 MCAsmParserExtension::Initialize(Parser);
425
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000426 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000427 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000428 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000429 }
430
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000431 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000432 }
433
Tom Stellard347ac792015-06-26 21:15:07 +0000434 AMDGPUTargetStreamer &getTargetStreamer() {
435 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
436 return static_cast<AMDGPUTargetStreamer &>(TS);
437 }
438
Tom Stellard45bb48e2015-06-13 03:28:10 +0000439 unsigned getForcedEncodingSize() const {
440 return ForcedEncodingSize;
441 }
442
443 void setForcedEncodingSize(unsigned Size) {
444 ForcedEncodingSize = Size;
445 }
446
447 bool isForcedVOP3() const {
448 return ForcedEncodingSize == 64;
449 }
450
451 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
452 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
453 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
454 OperandVector &Operands, MCStreamer &Out,
455 uint64_t &ErrorInfo,
456 bool MatchingInlineAsm) override;
457 bool ParseDirective(AsmToken DirectiveID) override;
458 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
459 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
460 SMLoc NameLoc, OperandVector &Operands) override;
461
462 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
463 int64_t Default = 0);
464 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
465 OperandVector &Operands,
466 enum AMDGPUOperand::ImmTy ImmTy =
467 AMDGPUOperand::ImmTyNone);
468 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
469 enum AMDGPUOperand::ImmTy ImmTy =
470 AMDGPUOperand::ImmTyNone);
471 OperandMatchResultTy parseOptionalOps(
472 const ArrayRef<OptionalOperand> &OptionalOps,
473 OperandVector &Operands);
474
475
476 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
477 void cvtDS(MCInst &Inst, const OperandVector &Operands);
478 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
479 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
480 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
481
482 bool parseCnt(int64_t &IntVal);
483 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
484 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
485
486 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
487 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
488 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000489 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000490
491 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
492 OperandMatchResultTy parseOffset(OperandVector &Operands);
493 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
494 OperandMatchResultTy parseGLC(OperandVector &Operands);
495 OperandMatchResultTy parseSLC(OperandVector &Operands);
496 OperandMatchResultTy parseTFE(OperandVector &Operands);
497
498 OperandMatchResultTy parseDMask(OperandVector &Operands);
499 OperandMatchResultTy parseUNorm(OperandVector &Operands);
500 OperandMatchResultTy parseR128(OperandVector &Operands);
501
Tom Stellarda90b9522016-02-11 03:28:15 +0000502 void cvtId(MCInst &Inst, const OperandVector &Operands);
503 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
504 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
505 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000506 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
507 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
508};
509
510struct OptionalOperand {
511 const char *Name;
512 AMDGPUOperand::ImmTy Type;
513 bool IsBit;
514 int64_t Default;
515 bool (*ConvertResult)(int64_t&);
516};
517
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000518}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000519
Matt Arsenault967c2f52015-11-03 22:50:32 +0000520static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000521 if (IsVgpr) {
522 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000523 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000524 case 1: return AMDGPU::VGPR_32RegClassID;
525 case 2: return AMDGPU::VReg_64RegClassID;
526 case 3: return AMDGPU::VReg_96RegClassID;
527 case 4: return AMDGPU::VReg_128RegClassID;
528 case 8: return AMDGPU::VReg_256RegClassID;
529 case 16: return AMDGPU::VReg_512RegClassID;
530 }
531 }
532
533 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000534 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000535 case 1: return AMDGPU::SGPR_32RegClassID;
536 case 2: return AMDGPU::SGPR_64RegClassID;
537 case 4: return AMDGPU::SReg_128RegClassID;
538 case 8: return AMDGPU::SReg_256RegClassID;
539 case 16: return AMDGPU::SReg_512RegClassID;
540 }
541}
542
Craig Topper4e9b03d62015-09-21 00:18:00 +0000543static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000544
545 return StringSwitch<unsigned>(RegName)
546 .Case("exec", AMDGPU::EXEC)
547 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000548 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000549 .Case("m0", AMDGPU::M0)
550 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000551 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
552 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000553 .Case("vcc_lo", AMDGPU::VCC_LO)
554 .Case("vcc_hi", AMDGPU::VCC_HI)
555 .Case("exec_lo", AMDGPU::EXEC_LO)
556 .Case("exec_hi", AMDGPU::EXEC_HI)
557 .Default(0);
558}
559
560bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
561 const AsmToken Tok = Parser.getTok();
562 StartLoc = Tok.getLoc();
563 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000564 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
565
Matt Arsenault57116cc2015-09-10 21:51:15 +0000566 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000567 RegNo = getRegForName(RegName);
568
569 if (RegNo) {
570 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000571 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000572 }
573
574 // Match vgprs and sgprs
575 if (RegName[0] != 's' && RegName[0] != 'v')
576 return true;
577
578 bool IsVgpr = RegName[0] == 'v';
579 unsigned RegWidth;
580 unsigned RegIndexInClass;
581 if (RegName.size() > 1) {
582 // We have a 32-bit register
583 RegWidth = 1;
584 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
585 return true;
586 Parser.Lex();
587 } else {
588 // We have a register greater than 32-bits.
589
590 int64_t RegLo, RegHi;
591 Parser.Lex();
592 if (getLexer().isNot(AsmToken::LBrac))
593 return true;
594
595 Parser.Lex();
596 if (getParser().parseAbsoluteExpression(RegLo))
597 return true;
598
599 if (getLexer().isNot(AsmToken::Colon))
600 return true;
601
602 Parser.Lex();
603 if (getParser().parseAbsoluteExpression(RegHi))
604 return true;
605
606 if (getLexer().isNot(AsmToken::RBrac))
607 return true;
608
609 Parser.Lex();
610 RegWidth = (RegHi - RegLo) + 1;
611 if (IsVgpr) {
612 // VGPR registers aren't aligned.
613 RegIndexInClass = RegLo;
614 } else {
615 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000616 unsigned Size = std::min(RegWidth, 4u);
617 if (RegLo % Size != 0)
618 return true;
619
620 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000621 }
622 }
623
Matt Arsenault967c2f52015-11-03 22:50:32 +0000624 int RCID = getRegClass(IsVgpr, RegWidth);
625 if (RCID == -1)
626 return true;
627
628 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000629 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000630 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000631
632 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000633 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000634}
635
636unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
637
638 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
639
640 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
641 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
642 return Match_InvalidOperand;
643
Tom Stellard88e0b252015-10-06 15:57:53 +0000644 if ((TSFlags & SIInstrFlags::VOP3) &&
645 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
646 getForcedEncodingSize() != 64)
647 return Match_PreferE32;
648
Tom Stellard45bb48e2015-06-13 03:28:10 +0000649 return Match_Success;
650}
651
652
653bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
654 OperandVector &Operands,
655 MCStreamer &Out,
656 uint64_t &ErrorInfo,
657 bool MatchingInlineAsm) {
658 MCInst Inst;
659
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000660 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000661 default: break;
662 case Match_Success:
663 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000664 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000665 return false;
666 case Match_MissingFeature:
667 return Error(IDLoc, "instruction not supported on this GPU");
668
669 case Match_MnemonicFail:
670 return Error(IDLoc, "unrecognized instruction mnemonic");
671
672 case Match_InvalidOperand: {
673 SMLoc ErrorLoc = IDLoc;
674 if (ErrorInfo != ~0ULL) {
675 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000676 return Error(IDLoc, "too few operands for instruction");
677 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000678 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
679 if (ErrorLoc == SMLoc())
680 ErrorLoc = IDLoc;
681 }
682 return Error(ErrorLoc, "invalid operand for instruction");
683 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000684 case Match_PreferE32:
685 return Error(IDLoc, "internal error: instruction without _e64 suffix "
686 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000687 }
688 llvm_unreachable("Implement any new match types added!");
689}
690
Tom Stellard347ac792015-06-26 21:15:07 +0000691bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
692 uint32_t &Minor) {
693 if (getLexer().isNot(AsmToken::Integer))
694 return TokError("invalid major version");
695
696 Major = getLexer().getTok().getIntVal();
697 Lex();
698
699 if (getLexer().isNot(AsmToken::Comma))
700 return TokError("minor version number required, comma expected");
701 Lex();
702
703 if (getLexer().isNot(AsmToken::Integer))
704 return TokError("invalid minor version");
705
706 Minor = getLexer().getTok().getIntVal();
707 Lex();
708
709 return false;
710}
711
712bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
713
714 uint32_t Major;
715 uint32_t Minor;
716
717 if (ParseDirectiveMajorMinor(Major, Minor))
718 return true;
719
720 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
721 return false;
722}
723
724bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
725
726 uint32_t Major;
727 uint32_t Minor;
728 uint32_t Stepping;
729 StringRef VendorName;
730 StringRef ArchName;
731
732 // If this directive has no arguments, then use the ISA version for the
733 // targeted GPU.
734 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000735 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000736 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
737 Isa.Stepping,
738 "AMD", "AMDGPU");
739 return false;
740 }
741
742
743 if (ParseDirectiveMajorMinor(Major, Minor))
744 return true;
745
746 if (getLexer().isNot(AsmToken::Comma))
747 return TokError("stepping version number required, comma expected");
748 Lex();
749
750 if (getLexer().isNot(AsmToken::Integer))
751 return TokError("invalid stepping version");
752
753 Stepping = getLexer().getTok().getIntVal();
754 Lex();
755
756 if (getLexer().isNot(AsmToken::Comma))
757 return TokError("vendor name required, comma expected");
758 Lex();
759
760 if (getLexer().isNot(AsmToken::String))
761 return TokError("invalid vendor name");
762
763 VendorName = getLexer().getTok().getStringContents();
764 Lex();
765
766 if (getLexer().isNot(AsmToken::Comma))
767 return TokError("arch name required, comma expected");
768 Lex();
769
770 if (getLexer().isNot(AsmToken::String))
771 return TokError("invalid arch name");
772
773 ArchName = getLexer().getTok().getStringContents();
774 Lex();
775
776 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
777 VendorName, ArchName);
778 return false;
779}
780
Tom Stellardff7416b2015-06-26 21:58:31 +0000781bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
782 amd_kernel_code_t &Header) {
783
784 if (getLexer().isNot(AsmToken::Equal))
785 return TokError("expected '='");
786 Lex();
787
788 if (getLexer().isNot(AsmToken::Integer))
789 return TokError("amd_kernel_code_t values must be integers");
790
791 uint64_t Value = getLexer().getTok().getIntVal();
792 Lex();
793
794 if (ID == "kernel_code_version_major")
795 Header.amd_kernel_code_version_major = Value;
796 else if (ID == "kernel_code_version_minor")
797 Header.amd_kernel_code_version_minor = Value;
798 else if (ID == "machine_kind")
799 Header.amd_machine_kind = Value;
800 else if (ID == "machine_version_major")
801 Header.amd_machine_version_major = Value;
802 else if (ID == "machine_version_minor")
803 Header.amd_machine_version_minor = Value;
804 else if (ID == "machine_version_stepping")
805 Header.amd_machine_version_stepping = Value;
806 else if (ID == "kernel_code_entry_byte_offset")
807 Header.kernel_code_entry_byte_offset = Value;
808 else if (ID == "kernel_code_prefetch_byte_size")
809 Header.kernel_code_prefetch_byte_size = Value;
810 else if (ID == "max_scratch_backing_memory_byte_size")
811 Header.max_scratch_backing_memory_byte_size = Value;
812 else if (ID == "compute_pgm_rsrc1_vgprs")
813 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
814 else if (ID == "compute_pgm_rsrc1_sgprs")
815 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
816 else if (ID == "compute_pgm_rsrc1_priority")
817 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
818 else if (ID == "compute_pgm_rsrc1_float_mode")
819 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
820 else if (ID == "compute_pgm_rsrc1_priv")
821 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
822 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
823 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
824 else if (ID == "compute_pgm_rsrc1_debug_mode")
825 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
826 else if (ID == "compute_pgm_rsrc1_ieee_mode")
827 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
828 else if (ID == "compute_pgm_rsrc2_scratch_en")
829 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
830 else if (ID == "compute_pgm_rsrc2_user_sgpr")
831 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
832 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
833 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
834 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
835 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
836 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
837 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
838 else if (ID == "compute_pgm_rsrc2_tg_size_en")
839 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
840 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
841 Header.compute_pgm_resource_registers |=
842 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
843 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
844 Header.compute_pgm_resource_registers |=
845 (S_00B84C_EXCP_EN_MSB(Value) << 32);
846 else if (ID == "compute_pgm_rsrc2_lds_size")
847 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
848 else if (ID == "compute_pgm_rsrc2_excp_en")
849 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
850 else if (ID == "compute_pgm_resource_registers")
851 Header.compute_pgm_resource_registers = Value;
852 else if (ID == "enable_sgpr_private_segment_buffer")
853 Header.code_properties |=
854 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
855 else if (ID == "enable_sgpr_dispatch_ptr")
856 Header.code_properties |=
857 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
858 else if (ID == "enable_sgpr_queue_ptr")
859 Header.code_properties |=
860 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
861 else if (ID == "enable_sgpr_kernarg_segment_ptr")
862 Header.code_properties |=
863 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
864 else if (ID == "enable_sgpr_dispatch_id")
865 Header.code_properties |=
866 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
867 else if (ID == "enable_sgpr_flat_scratch_init")
868 Header.code_properties |=
869 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
870 else if (ID == "enable_sgpr_private_segment_size")
871 Header.code_properties |=
872 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
873 else if (ID == "enable_sgpr_grid_workgroup_count_x")
874 Header.code_properties |=
875 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
876 else if (ID == "enable_sgpr_grid_workgroup_count_y")
877 Header.code_properties |=
878 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
879 else if (ID == "enable_sgpr_grid_workgroup_count_z")
880 Header.code_properties |=
881 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
882 else if (ID == "enable_ordered_append_gds")
883 Header.code_properties |=
884 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
885 else if (ID == "private_element_size")
886 Header.code_properties |=
887 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
888 else if (ID == "is_ptr64")
889 Header.code_properties |=
890 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
891 else if (ID == "is_dynamic_callstack")
892 Header.code_properties |=
893 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
894 else if (ID == "is_debug_enabled")
895 Header.code_properties |=
896 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
897 else if (ID == "is_xnack_enabled")
898 Header.code_properties |=
899 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
900 else if (ID == "workitem_private_segment_byte_size")
901 Header.workitem_private_segment_byte_size = Value;
902 else if (ID == "workgroup_group_segment_byte_size")
903 Header.workgroup_group_segment_byte_size = Value;
904 else if (ID == "gds_segment_byte_size")
905 Header.gds_segment_byte_size = Value;
906 else if (ID == "kernarg_segment_byte_size")
907 Header.kernarg_segment_byte_size = Value;
908 else if (ID == "workgroup_fbarrier_count")
909 Header.workgroup_fbarrier_count = Value;
910 else if (ID == "wavefront_sgpr_count")
911 Header.wavefront_sgpr_count = Value;
912 else if (ID == "workitem_vgpr_count")
913 Header.workitem_vgpr_count = Value;
914 else if (ID == "reserved_vgpr_first")
915 Header.reserved_vgpr_first = Value;
916 else if (ID == "reserved_vgpr_count")
917 Header.reserved_vgpr_count = Value;
918 else if (ID == "reserved_sgpr_first")
919 Header.reserved_sgpr_first = Value;
920 else if (ID == "reserved_sgpr_count")
921 Header.reserved_sgpr_count = Value;
922 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
923 Header.debug_wavefront_private_segment_offset_sgpr = Value;
924 else if (ID == "debug_private_segment_buffer_sgpr")
925 Header.debug_private_segment_buffer_sgpr = Value;
926 else if (ID == "kernarg_segment_alignment")
927 Header.kernarg_segment_alignment = Value;
928 else if (ID == "group_segment_alignment")
929 Header.group_segment_alignment = Value;
930 else if (ID == "private_segment_alignment")
931 Header.private_segment_alignment = Value;
932 else if (ID == "wavefront_size")
933 Header.wavefront_size = Value;
934 else if (ID == "call_convention")
935 Header.call_convention = Value;
936 else if (ID == "runtime_loader_kernel_symbol")
937 Header.runtime_loader_kernel_symbol = Value;
938 else
939 return TokError("amd_kernel_code_t value not recognized.");
940
941 return false;
942}
943
944bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
945
946 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000947 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000948
949 while (true) {
950
951 if (getLexer().isNot(AsmToken::EndOfStatement))
952 return TokError("amd_kernel_code_t values must begin on a new line");
953
954 // Lex EndOfStatement. This is in a while loop, because lexing a comment
955 // will set the current token to EndOfStatement.
956 while(getLexer().is(AsmToken::EndOfStatement))
957 Lex();
958
959 if (getLexer().isNot(AsmToken::Identifier))
960 return TokError("expected value identifier or .end_amd_kernel_code_t");
961
962 StringRef ID = getLexer().getTok().getIdentifier();
963 Lex();
964
965 if (ID == ".end_amd_kernel_code_t")
966 break;
967
968 if (ParseAMDKernelCodeTValue(ID, Header))
969 return true;
970 }
971
972 getTargetStreamer().EmitAMDKernelCodeT(Header);
973
974 return false;
975}
976
Tom Stellarde135ffd2015-09-25 21:41:28 +0000977bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
978 getParser().getStreamer().SwitchSection(
979 AMDGPU::getHSATextSection(getContext()));
980 return false;
981}
982
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000983bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
984 if (getLexer().isNot(AsmToken::Identifier))
985 return TokError("expected symbol name");
986
987 StringRef KernelName = Parser.getTok().getString();
988
989 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
990 ELF::STT_AMDGPU_HSA_KERNEL);
991 Lex();
992 return false;
993}
994
Tom Stellard00f2f912015-12-02 19:47:57 +0000995bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
996 if (getLexer().isNot(AsmToken::Identifier))
997 return TokError("expected symbol name");
998
999 StringRef GlobalName = Parser.getTok().getIdentifier();
1000
1001 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1002 Lex();
1003 return false;
1004}
1005
1006bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1007 if (getLexer().isNot(AsmToken::Identifier))
1008 return TokError("expected symbol name");
1009
1010 StringRef GlobalName = Parser.getTok().getIdentifier();
1011
1012 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1013 Lex();
1014 return false;
1015}
1016
1017bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1018 getParser().getStreamer().SwitchSection(
1019 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1020 return false;
1021}
1022
1023bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1024 getParser().getStreamer().SwitchSection(
1025 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1026 return false;
1027}
1028
Tom Stellard9760f032015-12-03 03:34:32 +00001029bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1030 getParser().getStreamer().SwitchSection(
1031 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1032 return false;
1033}
1034
Tom Stellard45bb48e2015-06-13 03:28:10 +00001035bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001036 StringRef IDVal = DirectiveID.getString();
1037
1038 if (IDVal == ".hsa_code_object_version")
1039 return ParseDirectiveHSACodeObjectVersion();
1040
1041 if (IDVal == ".hsa_code_object_isa")
1042 return ParseDirectiveHSACodeObjectISA();
1043
Tom Stellardff7416b2015-06-26 21:58:31 +00001044 if (IDVal == ".amd_kernel_code_t")
1045 return ParseDirectiveAMDKernelCodeT();
1046
Tom Stellarde135ffd2015-09-25 21:41:28 +00001047 if (IDVal == ".hsatext" || IDVal == ".text")
1048 return ParseSectionDirectiveHSAText();
1049
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001050 if (IDVal == ".amdgpu_hsa_kernel")
1051 return ParseDirectiveAMDGPUHsaKernel();
1052
Tom Stellard00f2f912015-12-02 19:47:57 +00001053 if (IDVal == ".amdgpu_hsa_module_global")
1054 return ParseDirectiveAMDGPUHsaModuleGlobal();
1055
1056 if (IDVal == ".amdgpu_hsa_program_global")
1057 return ParseDirectiveAMDGPUHsaProgramGlobal();
1058
1059 if (IDVal == ".hsadata_global_agent")
1060 return ParseSectionDirectiveHSADataGlobalAgent();
1061
1062 if (IDVal == ".hsadata_global_program")
1063 return ParseSectionDirectiveHSADataGlobalProgram();
1064
Tom Stellard9760f032015-12-03 03:34:32 +00001065 if (IDVal == ".hsarodata_readonly_agent")
1066 return ParseSectionDirectiveHSARodataReadonlyAgent();
1067
Tom Stellard45bb48e2015-06-13 03:28:10 +00001068 return true;
1069}
1070
Matt Arsenault68802d32015-11-05 03:11:27 +00001071bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1072 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001073 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001074 return true;
1075
Matt Arsenault3b159672015-12-01 20:31:08 +00001076 if (isSI()) {
1077 // No flat_scr
1078 switch (RegNo) {
1079 case AMDGPU::FLAT_SCR:
1080 case AMDGPU::FLAT_SCR_LO:
1081 case AMDGPU::FLAT_SCR_HI:
1082 return false;
1083 default:
1084 return true;
1085 }
1086 }
1087
Matt Arsenault68802d32015-11-05 03:11:27 +00001088 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1089 // SI/CI have.
1090 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1091 R.isValid(); ++R) {
1092 if (*R == RegNo)
1093 return false;
1094 }
1095
1096 return true;
1097}
1098
Tom Stellard45bb48e2015-06-13 03:28:10 +00001099static bool operandsHaveModifiers(const OperandVector &Operands) {
1100
1101 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1102 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1103 if (Op.isRegKind() && Op.hasModifiers())
1104 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001105 if (Op.isImm() && Op.hasModifiers())
1106 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001107 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1108 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1109 return true;
1110 }
1111 return false;
1112}
1113
1114AMDGPUAsmParser::OperandMatchResultTy
1115AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1116
1117 // Try to parse with a custom parser
1118 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1119
1120 // If we successfully parsed the operand or if there as an error parsing,
1121 // we are done.
1122 //
1123 // If we are parsing after we reach EndOfStatement then this means we
1124 // are appending default values to the Operands list. This is only done
1125 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001126 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001127 getLexer().is(AsmToken::EndOfStatement))
1128 return ResTy;
1129
1130 bool Negate = false, Abs = false;
1131 if (getLexer().getKind()== AsmToken::Minus) {
1132 Parser.Lex();
1133 Negate = true;
1134 }
1135
1136 if (getLexer().getKind() == AsmToken::Pipe) {
1137 Parser.Lex();
1138 Abs = true;
1139 }
1140
1141 switch(getLexer().getKind()) {
1142 case AsmToken::Integer: {
1143 SMLoc S = Parser.getTok().getLoc();
1144 int64_t IntVal;
1145 if (getParser().parseAbsoluteExpression(IntVal))
1146 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001147 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001148 Error(S, "invalid immediate: only 32-bit values are legal");
1149 return MatchOperand_ParseFail;
1150 }
1151
Tom Stellard45bb48e2015-06-13 03:28:10 +00001152 if (Negate)
1153 IntVal *= -1;
1154 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1155 return MatchOperand_Success;
1156 }
1157 case AsmToken::Real: {
1158 // FIXME: We should emit an error if a double precisions floating-point
1159 // value is used. I'm not sure the best way to detect this.
1160 SMLoc S = Parser.getTok().getLoc();
1161 int64_t IntVal;
1162 if (getParser().parseAbsoluteExpression(IntVal))
1163 return MatchOperand_ParseFail;
1164
1165 APFloat F((float)BitsToDouble(IntVal));
1166 if (Negate)
1167 F.changeSign();
1168 Operands.push_back(
1169 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1170 return MatchOperand_Success;
1171 }
1172 case AsmToken::Identifier: {
1173 SMLoc S, E;
1174 unsigned RegNo;
1175 if (!ParseRegister(RegNo, S, E)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001176 unsigned Modifiers = 0;
1177
1178 if (Negate)
1179 Modifiers |= 0x1;
1180
1181 if (Abs) {
1182 if (getLexer().getKind() != AsmToken::Pipe)
1183 return MatchOperand_ParseFail;
1184 Parser.Lex();
1185 Modifiers |= 0x2;
1186 }
1187
Tom Stellard45bb48e2015-06-13 03:28:10 +00001188 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard2b65ed32015-12-21 18:44:27 +00001189 RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
Tom Stellard45bb48e2015-06-13 03:28:10 +00001190 isForcedVOP3()));
1191
Tom Stellarda90b9522016-02-11 03:28:15 +00001192 if (Modifiers) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001193 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1194 RegOp.setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001195 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001196 } else {
1197 ResTy = parseVOP3OptionalOps(Operands);
1198 if (ResTy == MatchOperand_NoMatch) {
1199 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1200 S));
1201 Parser.Lex();
1202 }
1203 }
1204 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001205 }
1206 default:
1207 return MatchOperand_NoMatch;
1208 }
1209}
1210
1211bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1212 StringRef Name,
1213 SMLoc NameLoc, OperandVector &Operands) {
1214
1215 // Clear any forced encodings from the previous instruction.
1216 setForcedEncodingSize(0);
1217
1218 if (Name.endswith("_e64"))
1219 setForcedEncodingSize(64);
1220 else if (Name.endswith("_e32"))
1221 setForcedEncodingSize(32);
1222
1223 // Add the instruction mnemonic
1224 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1225
1226 while (!getLexer().is(AsmToken::EndOfStatement)) {
1227 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1228
1229 // Eat the comma or space if there is one.
1230 if (getLexer().is(AsmToken::Comma))
1231 Parser.Lex();
1232
1233 switch (Res) {
1234 case MatchOperand_Success: break;
1235 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1236 "failed parsing operand.");
1237 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1238 "not a valid operand.");
1239 }
1240 }
1241
Tom Stellard45bb48e2015-06-13 03:28:10 +00001242 return false;
1243}
1244
1245//===----------------------------------------------------------------------===//
1246// Utility functions
1247//===----------------------------------------------------------------------===//
1248
1249AMDGPUAsmParser::OperandMatchResultTy
1250AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1251 int64_t Default) {
1252
1253 // We are at the end of the statement, and this is a default argument, so
1254 // use a default value.
1255 if (getLexer().is(AsmToken::EndOfStatement)) {
1256 Int = Default;
1257 return MatchOperand_Success;
1258 }
1259
1260 switch(getLexer().getKind()) {
1261 default: return MatchOperand_NoMatch;
1262 case AsmToken::Identifier: {
1263 StringRef OffsetName = Parser.getTok().getString();
1264 if (!OffsetName.equals(Prefix))
1265 return MatchOperand_NoMatch;
1266
1267 Parser.Lex();
1268 if (getLexer().isNot(AsmToken::Colon))
1269 return MatchOperand_ParseFail;
1270
1271 Parser.Lex();
1272 if (getLexer().isNot(AsmToken::Integer))
1273 return MatchOperand_ParseFail;
1274
1275 if (getParser().parseAbsoluteExpression(Int))
1276 return MatchOperand_ParseFail;
1277 break;
1278 }
1279 }
1280 return MatchOperand_Success;
1281}
1282
1283AMDGPUAsmParser::OperandMatchResultTy
1284AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1285 enum AMDGPUOperand::ImmTy ImmTy) {
1286
1287 SMLoc S = Parser.getTok().getLoc();
1288 int64_t Offset = 0;
1289
1290 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1291 if (Res != MatchOperand_Success)
1292 return Res;
1293
1294 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1295 return MatchOperand_Success;
1296}
1297
1298AMDGPUAsmParser::OperandMatchResultTy
1299AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1300 enum AMDGPUOperand::ImmTy ImmTy) {
1301 int64_t Bit = 0;
1302 SMLoc S = Parser.getTok().getLoc();
1303
1304 // We are at the end of the statement, and this is a default argument, so
1305 // use a default value.
1306 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1307 switch(getLexer().getKind()) {
1308 case AsmToken::Identifier: {
1309 StringRef Tok = Parser.getTok().getString();
1310 if (Tok == Name) {
1311 Bit = 1;
1312 Parser.Lex();
1313 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1314 Bit = 0;
1315 Parser.Lex();
1316 } else {
1317 return MatchOperand_NoMatch;
1318 }
1319 break;
1320 }
1321 default:
1322 return MatchOperand_NoMatch;
1323 }
1324 }
1325
1326 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1327 return MatchOperand_Success;
1328}
1329
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001330typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1331
1332void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands, OptionalImmIndexMap& OptionalIdx, enum AMDGPUOperand::ImmTy ImmT) {
1333 auto i = OptionalIdx.find(ImmT);
1334 if (i != OptionalIdx.end()) {
1335 unsigned Idx = i->second;
1336 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1337 } else {
1338 Inst.addOperand(MCOperand::createImm(0));
1339 }
1340}
1341
Tom Stellard45bb48e2015-06-13 03:28:10 +00001342static bool operandsHasOptionalOp(const OperandVector &Operands,
1343 const OptionalOperand &OOp) {
1344 for (unsigned i = 0; i < Operands.size(); i++) {
1345 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1346 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1347 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1348 return true;
1349
1350 }
1351 return false;
1352}
1353
1354AMDGPUAsmParser::OperandMatchResultTy
1355AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1356 OperandVector &Operands) {
1357 SMLoc S = Parser.getTok().getLoc();
1358 for (const OptionalOperand &Op : OptionalOps) {
1359 if (operandsHasOptionalOp(Operands, Op))
1360 continue;
1361 AMDGPUAsmParser::OperandMatchResultTy Res;
1362 int64_t Value;
1363 if (Op.IsBit) {
1364 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1365 if (Res == MatchOperand_NoMatch)
1366 continue;
1367 return Res;
1368 }
1369
1370 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1371
1372 if (Res == MatchOperand_NoMatch)
1373 continue;
1374
1375 if (Res != MatchOperand_Success)
1376 return Res;
1377
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001378 bool DefaultValue = (Value == Op.Default);
1379
Tom Stellard45bb48e2015-06-13 03:28:10 +00001380 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1381 return MatchOperand_ParseFail;
1382 }
1383
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001384 if (!DefaultValue) {
1385 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1386 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001387 return MatchOperand_Success;
1388 }
1389 return MatchOperand_NoMatch;
1390}
1391
1392//===----------------------------------------------------------------------===//
1393// ds
1394//===----------------------------------------------------------------------===//
1395
1396static const OptionalOperand DSOptionalOps [] = {
1397 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1398 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1399};
1400
1401static const OptionalOperand DSOptionalOpsOff01 [] = {
1402 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1403 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1404 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1405};
1406
1407AMDGPUAsmParser::OperandMatchResultTy
1408AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1409 return parseOptionalOps(DSOptionalOps, Operands);
1410}
1411AMDGPUAsmParser::OperandMatchResultTy
1412AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1413 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1414}
1415
1416AMDGPUAsmParser::OperandMatchResultTy
1417AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1418 SMLoc S = Parser.getTok().getLoc();
1419 AMDGPUAsmParser::OperandMatchResultTy Res =
1420 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1421 if (Res == MatchOperand_NoMatch) {
1422 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1423 AMDGPUOperand::ImmTyOffset));
1424 Res = MatchOperand_Success;
1425 }
1426 return Res;
1427}
1428
1429bool AMDGPUOperand::isDSOffset() const {
1430 return isImm() && isUInt<16>(getImm());
1431}
1432
1433bool AMDGPUOperand::isDSOffset01() const {
1434 return isImm() && isUInt<8>(getImm());
1435}
1436
1437void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1438 const OperandVector &Operands) {
1439
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001440 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001441
1442 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1443 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1444
1445 // Add the register arguments
1446 if (Op.isReg()) {
1447 Op.addRegOperands(Inst, 1);
1448 continue;
1449 }
1450
1451 // Handle optional arguments
1452 OptionalIdx[Op.getImmTy()] = i;
1453 }
1454
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001455 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1456 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1457 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001458
Tom Stellard45bb48e2015-06-13 03:28:10 +00001459 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1460}
1461
1462void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1463
1464 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1465 bool GDSOnly = false;
1466
1467 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1468 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1469
1470 // Add the register arguments
1471 if (Op.isReg()) {
1472 Op.addRegOperands(Inst, 1);
1473 continue;
1474 }
1475
1476 if (Op.isToken() && Op.getToken() == "gds") {
1477 GDSOnly = true;
1478 continue;
1479 }
1480
1481 // Handle optional arguments
1482 OptionalIdx[Op.getImmTy()] = i;
1483 }
1484
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001485 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1486 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001487
1488 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001489 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001490 }
1491 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1492}
1493
1494
1495//===----------------------------------------------------------------------===//
1496// s_waitcnt
1497//===----------------------------------------------------------------------===//
1498
1499bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1500 StringRef CntName = Parser.getTok().getString();
1501 int64_t CntVal;
1502
1503 Parser.Lex();
1504 if (getLexer().isNot(AsmToken::LParen))
1505 return true;
1506
1507 Parser.Lex();
1508 if (getLexer().isNot(AsmToken::Integer))
1509 return true;
1510
1511 if (getParser().parseAbsoluteExpression(CntVal))
1512 return true;
1513
1514 if (getLexer().isNot(AsmToken::RParen))
1515 return true;
1516
1517 Parser.Lex();
1518 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1519 Parser.Lex();
1520
1521 int CntShift;
1522 int CntMask;
1523
1524 if (CntName == "vmcnt") {
1525 CntMask = 0xf;
1526 CntShift = 0;
1527 } else if (CntName == "expcnt") {
1528 CntMask = 0x7;
1529 CntShift = 4;
1530 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001531 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001532 CntShift = 8;
1533 } else {
1534 return true;
1535 }
1536
1537 IntVal &= ~(CntMask << CntShift);
1538 IntVal |= (CntVal << CntShift);
1539 return false;
1540}
1541
1542AMDGPUAsmParser::OperandMatchResultTy
1543AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1544 // Disable all counters by default.
1545 // vmcnt [3:0]
1546 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001547 // lgkmcnt [11:8]
1548 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001549 SMLoc S = Parser.getTok().getLoc();
1550
1551 switch(getLexer().getKind()) {
1552 default: return MatchOperand_ParseFail;
1553 case AsmToken::Integer:
1554 // The operand can be an integer value.
1555 if (getParser().parseAbsoluteExpression(CntVal))
1556 return MatchOperand_ParseFail;
1557 break;
1558
1559 case AsmToken::Identifier:
1560 do {
1561 if (parseCnt(CntVal))
1562 return MatchOperand_ParseFail;
1563 } while(getLexer().isNot(AsmToken::EndOfStatement));
1564 break;
1565 }
1566 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1567 return MatchOperand_Success;
1568}
1569
1570bool AMDGPUOperand::isSWaitCnt() const {
1571 return isImm();
1572}
1573
1574//===----------------------------------------------------------------------===//
1575// sopp branch targets
1576//===----------------------------------------------------------------------===//
1577
1578AMDGPUAsmParser::OperandMatchResultTy
1579AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1580 SMLoc S = Parser.getTok().getLoc();
1581
1582 switch (getLexer().getKind()) {
1583 default: return MatchOperand_ParseFail;
1584 case AsmToken::Integer: {
1585 int64_t Imm;
1586 if (getParser().parseAbsoluteExpression(Imm))
1587 return MatchOperand_ParseFail;
1588 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1589 return MatchOperand_Success;
1590 }
1591
1592 case AsmToken::Identifier:
1593 Operands.push_back(AMDGPUOperand::CreateExpr(
1594 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1595 Parser.getTok().getString()), getContext()), S));
1596 Parser.Lex();
1597 return MatchOperand_Success;
1598 }
1599}
1600
1601//===----------------------------------------------------------------------===//
1602// flat
1603//===----------------------------------------------------------------------===//
1604
1605static const OptionalOperand FlatOptionalOps [] = {
1606 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1607 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1608 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1609};
1610
1611static const OptionalOperand FlatAtomicOptionalOps [] = {
1612 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1613 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1614};
1615
1616AMDGPUAsmParser::OperandMatchResultTy
1617AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1618 return parseOptionalOps(FlatOptionalOps, Operands);
1619}
1620
1621AMDGPUAsmParser::OperandMatchResultTy
1622AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1623 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1624}
1625
1626void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1627 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001628 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001629
1630 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1631 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1632
1633 // Add the register arguments
1634 if (Op.isReg()) {
1635 Op.addRegOperands(Inst, 1);
1636 continue;
1637 }
1638
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001639 OptionalIdx[Op.getImmTy()] = i;
1640 }
1641 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1642 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1643 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1644}
1645
1646
1647void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1648 const OperandVector &Operands) {
1649 OptionalImmIndexMap OptionalIdx;
1650
1651 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1652 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1653
1654 // Add the register arguments
1655 if (Op.isReg()) {
1656 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001657 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001658 }
1659
1660 // Handle 'glc' token for flat atomics.
1661 if (Op.isToken()) {
1662 continue;
1663 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001664
1665 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001666 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001667 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001668 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1669 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001670}
1671
1672//===----------------------------------------------------------------------===//
1673// mubuf
1674//===----------------------------------------------------------------------===//
1675
1676static const OptionalOperand MubufOptionalOps [] = {
1677 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1678 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1679 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1680 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1681};
1682
1683AMDGPUAsmParser::OperandMatchResultTy
1684AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1685 return parseOptionalOps(MubufOptionalOps, Operands);
1686}
1687
1688AMDGPUAsmParser::OperandMatchResultTy
1689AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1690 return parseIntWithPrefix("offset", Operands);
1691}
1692
1693AMDGPUAsmParser::OperandMatchResultTy
1694AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1695 return parseNamedBit("glc", Operands);
1696}
1697
1698AMDGPUAsmParser::OperandMatchResultTy
1699AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1700 return parseNamedBit("slc", Operands);
1701}
1702
1703AMDGPUAsmParser::OperandMatchResultTy
1704AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1705 return parseNamedBit("tfe", Operands);
1706}
1707
1708bool AMDGPUOperand::isMubufOffset() const {
1709 return isImm() && isUInt<12>(getImm());
1710}
1711
1712void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1713 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001714 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001715
1716 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1717 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1718
1719 // Add the register arguments
1720 if (Op.isReg()) {
1721 Op.addRegOperands(Inst, 1);
1722 continue;
1723 }
1724
1725 // Handle the case where soffset is an immediate
1726 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1727 Op.addImmOperands(Inst, 1);
1728 continue;
1729 }
1730
1731 // Handle tokens like 'offen' which are sometimes hard-coded into the
1732 // asm string. There are no MCInst operands for these.
1733 if (Op.isToken()) {
1734 continue;
1735 }
1736 assert(Op.isImm());
1737
1738 // Handle optional arguments
1739 OptionalIdx[Op.getImmTy()] = i;
1740 }
1741
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001742 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1743 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1744 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1745 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001746}
1747
1748//===----------------------------------------------------------------------===//
1749// mimg
1750//===----------------------------------------------------------------------===//
1751
1752AMDGPUAsmParser::OperandMatchResultTy
1753AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1754 return parseIntWithPrefix("dmask", Operands);
1755}
1756
1757AMDGPUAsmParser::OperandMatchResultTy
1758AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1759 return parseNamedBit("unorm", Operands);
1760}
1761
1762AMDGPUAsmParser::OperandMatchResultTy
1763AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1764 return parseNamedBit("r128", Operands);
1765}
1766
1767//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001768// smrd
1769//===----------------------------------------------------------------------===//
1770
1771bool AMDGPUOperand::isSMRDOffset() const {
1772
1773 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1774 // information here.
1775 return isImm() && isUInt<8>(getImm());
1776}
1777
1778bool AMDGPUOperand::isSMRDLiteralOffset() const {
1779 // 32-bit literals are only supported on CI and we only want to use them
1780 // when the offset is > 8-bits.
1781 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1782}
1783
1784//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001785// vop3
1786//===----------------------------------------------------------------------===//
1787
1788static bool ConvertOmodMul(int64_t &Mul) {
1789 if (Mul != 1 && Mul != 2 && Mul != 4)
1790 return false;
1791
1792 Mul >>= 1;
1793 return true;
1794}
1795
1796static bool ConvertOmodDiv(int64_t &Div) {
1797 if (Div == 1) {
1798 Div = 0;
1799 return true;
1800 }
1801
1802 if (Div == 2) {
1803 Div = 3;
1804 return true;
1805 }
1806
1807 return false;
1808}
1809
1810static const OptionalOperand VOP3OptionalOps [] = {
1811 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1812 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1813 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1814};
1815
1816static bool isVOP3(OperandVector &Operands) {
1817 if (operandsHaveModifiers(Operands))
1818 return true;
1819
Tom Stellarda90b9522016-02-11 03:28:15 +00001820 if (Operands.size() >= 2) {
1821 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001822
Tom Stellarda90b9522016-02-11 03:28:15 +00001823 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1824 return true;
1825 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001826
1827 if (Operands.size() >= 5)
1828 return true;
1829
1830 if (Operands.size() > 3) {
1831 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Benjamin Kramerac5e36f2016-02-12 12:37:21 +00001832 if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1833 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001834 return true;
1835 }
1836 return false;
1837}
1838
1839AMDGPUAsmParser::OperandMatchResultTy
1840AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1841
1842 // The value returned by this function may change after parsing
1843 // an operand so store the original value here.
1844 bool HasModifiers = operandsHaveModifiers(Operands);
1845
1846 bool IsVOP3 = isVOP3(Operands);
1847 if (HasModifiers || IsVOP3 ||
1848 getLexer().isNot(AsmToken::EndOfStatement) ||
1849 getForcedEncodingSize() == 64) {
1850
1851 AMDGPUAsmParser::OperandMatchResultTy Res =
1852 parseOptionalOps(VOP3OptionalOps, Operands);
1853
1854 if (!HasModifiers && Res == MatchOperand_Success) {
1855 // We have added a modifier operation, so we need to make sure all
1856 // previous register operands have modifiers
1857 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1858 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001859 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001860 Op.setModifiers(0);
1861 }
1862 }
1863 return Res;
1864 }
1865 return MatchOperand_NoMatch;
1866}
1867
Tom Stellarda90b9522016-02-11 03:28:15 +00001868void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1869 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001870 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001871 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001872 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1873 }
1874 for (unsigned E = Operands.size(); I != E; ++I)
1875 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1876}
1877
1878void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001879 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1880 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001881 cvtVOP3(Inst, Operands);
1882 } else {
1883 cvtId(Inst, Operands);
1884 }
1885}
1886
1887void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1888 if (operandsHaveModifiers(Operands)) {
1889 cvtVOP3(Inst, Operands);
1890 } else {
1891 cvtId(Inst, Operands);
1892 }
1893}
1894
1895void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1896 cvtVOP3(Inst, Operands);
1897}
1898
1899void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1900 unsigned I = 1;
1901 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001902 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001903 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001904 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001905
Tom Stellarda90b9522016-02-11 03:28:15 +00001906 unsigned ClampIdx = 0, OModIdx = 0;
1907 for (unsigned E = Operands.size(); I != E; ++I) {
1908 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001909 if (Op.isRegOrImmWithInputMods()) {
1910 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Tom Stellarda90b9522016-02-11 03:28:15 +00001911 } else if (Op.isClamp()) {
1912 ClampIdx = I;
1913 } else if (Op.isOMod()) {
1914 OModIdx = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001915 } else {
1916 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001917 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001918 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001919
Tom Stellarda90b9522016-02-11 03:28:15 +00001920 if (ClampIdx) {
1921 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]);
1922 Op.addImmOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001923 } else {
Tom Stellarda90b9522016-02-11 03:28:15 +00001924 Inst.addOperand(MCOperand::createImm(0));
1925 }
1926 if (OModIdx) {
1927 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]);
1928 Op.addImmOperands(Inst, 1);
1929 } else {
1930 Inst.addOperand(MCOperand::createImm(0));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001931 }
1932}
1933
1934/// Force static initialization.
1935extern "C" void LLVMInitializeAMDGPUAsmParser() {
1936 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1937 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1938}
1939
1940#define GET_REGISTER_MATCHER
1941#define GET_MATCHER_IMPLEMENTATION
1942#include "AMDGPUGenAsmMatcher.inc"