blob: 6458955b3ef33367b67f8f9a836ecf5f2e393cb9 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000015#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000016#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/SmallString.h"
18#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000028#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000032#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000033#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
70 ImmTyOMod
71 };
72
73 struct TokOp {
74 const char *Data;
75 unsigned Length;
76 };
77
78 struct ImmOp {
79 bool IsFPImm;
80 ImmTy Type;
81 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000082 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000083 };
84
85 struct RegOp {
86 unsigned RegNo;
87 int Modifiers;
88 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000089 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +000090 bool IsForcedVOP3;
91 };
92
93 union {
94 TokOp Tok;
95 ImmOp Imm;
96 RegOp Reg;
97 const MCExpr *Expr;
98 };
99
100 void addImmOperands(MCInst &Inst, unsigned N) const {
101 Inst.addOperand(MCOperand::createImm(getImm()));
102 }
103
104 StringRef getToken() const {
105 return StringRef(Tok.Data, Tok.Length);
106 }
107
108 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000109 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000110 }
111
112 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000113 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000114 addRegOperands(Inst, N);
115 else
116 addImmOperands(Inst, N);
117 }
118
Tom Stellardd93a34f2016-02-22 19:17:56 +0000119 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
120 if (isRegKind()) {
121 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
122 addRegOperands(Inst, N);
123 } else {
124 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
125 addImmOperands(Inst, N);
126 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 }
128
129 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
130 if (isImm())
131 addImmOperands(Inst, N);
132 else {
133 assert(isExpr());
134 Inst.addOperand(MCOperand::createExpr(Expr));
135 }
136 }
137
138 bool defaultTokenHasSuffix() const {
139 StringRef Token(Tok.Data, Tok.Length);
140
141 return Token.endswith("_e32") || Token.endswith("_e64");
142 }
143
144 bool isToken() const override {
145 return Kind == Token;
146 }
147
148 bool isImm() const override {
149 return Kind == Immediate;
150 }
151
Tom Stellardd93a34f2016-02-22 19:17:56 +0000152 bool isInlinableImm() const {
153 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
154 immediates are inlinable (e.g. "clamp" attribute is not) */ )
155 return false;
156 // TODO: We should avoid using host float here. It would be better to
157 // check the float bit values which is what a few other places do.
158 // We've had bot failures before due to weird NaN support on mips hosts.
159 const float F = BitsToFloat(Imm.Val);
160 // TODO: Add 1/(2*pi) for VI
161 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000162 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000163 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000164 }
165
166 bool isDSOffset0() const {
167 assert(isImm());
168 return Imm.Type == ImmTyDSOffset0;
169 }
170
171 bool isDSOffset1() const {
172 assert(isImm());
173 return Imm.Type == ImmTyDSOffset1;
174 }
175
176 int64_t getImm() const {
177 return Imm.Val;
178 }
179
180 enum ImmTy getImmTy() const {
181 assert(isImm());
182 return Imm.Type;
183 }
184
185 bool isRegKind() const {
186 return Kind == Register;
187 }
188
189 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000190 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000191 }
192
Tom Stellardd93a34f2016-02-22 19:17:56 +0000193 bool isRegOrImmWithInputMods() const {
194 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000195 }
196
197 bool isClamp() const {
198 return isImm() && Imm.Type == ImmTyClamp;
199 }
200
201 bool isOMod() const {
202 return isImm() && Imm.Type == ImmTyOMod;
203 }
204
205 bool isMod() const {
206 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000207 }
208
209 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000210 assert(isReg() || (isImm() && Imm.Modifiers == 0));
211 if (isReg())
212 Reg.Modifiers = Mods;
213 else
214 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000215 }
216
217 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000218 assert(isRegKind() || isImm());
219 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000220 }
221
222 unsigned getReg() const override {
223 return Reg.RegNo;
224 }
225
226 bool isRegOrImm() const {
227 return isReg() || isImm();
228 }
229
230 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000231 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000232 }
233
234 bool isSCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000235 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000236 }
237
Matt Arsenault86d336e2015-09-08 21:15:00 +0000238 bool isSCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000239 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
240 }
241
242 bool isSSrc32() const {
243 return isImm() || isSCSrc32();
244 }
245
246 bool isSSrc64() const {
247 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
248 // See isVSrc64().
249 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000250 }
251
Tom Stellard45bb48e2015-06-13 03:28:10 +0000252 bool isVCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000253 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000254 }
255
256 bool isVCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000257 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000258 }
259
260 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000261 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000262 }
263
264 bool isVSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000265 // TODO: Check if the 64-bit value (coming from assembly source) can be
266 // narrowed to 32 bits (in the instruction stream). That require knowledge
267 // of instruction type (unsigned/signed, floating or "untyped"/B64),
268 // see [AMD GCN3 ISA 6.3.1].
269 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
270 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000271 }
272
273 bool isMem() const override {
274 return false;
275 }
276
277 bool isExpr() const {
278 return Kind == Expression;
279 }
280
281 bool isSoppBrTarget() const {
282 return isExpr() || isImm();
283 }
284
285 SMLoc getStartLoc() const override {
286 return StartLoc;
287 }
288
289 SMLoc getEndLoc() const override {
290 return EndLoc;
291 }
292
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000293 void print(raw_ostream &OS) const override {
294 switch (Kind) {
295 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000296 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000297 break;
298 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000299 if (Imm.Type != AMDGPUOperand::ImmTyNone)
300 OS << getImm();
301 else
302 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000303 break;
304 case Token:
305 OS << '\'' << getToken() << '\'';
306 break;
307 case Expression:
308 OS << "<expr " << *Expr << '>';
309 break;
310 }
311 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000312
313 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
314 enum ImmTy Type = ImmTyNone,
315 bool IsFPImm = false) {
316 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
317 Op->Imm.Val = Val;
318 Op->Imm.IsFPImm = IsFPImm;
319 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000320 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 Op->StartLoc = Loc;
322 Op->EndLoc = Loc;
323 return Op;
324 }
325
326 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
327 bool HasExplicitEncodingSize = true) {
328 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
329 Res->Tok.Data = Str.data();
330 Res->Tok.Length = Str.size();
331 Res->StartLoc = Loc;
332 Res->EndLoc = Loc;
333 return Res;
334 }
335
336 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
337 SMLoc E,
338 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000339 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000340 bool ForceVOP3) {
341 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
342 Op->Reg.RegNo = RegNo;
343 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000344 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000345 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000346 Op->Reg.IsForcedVOP3 = ForceVOP3;
347 Op->StartLoc = S;
348 Op->EndLoc = E;
349 return Op;
350 }
351
352 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
353 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
354 Op->Expr = Expr;
355 Op->StartLoc = S;
356 Op->EndLoc = S;
357 return Op;
358 }
359
360 bool isDSOffset() const;
361 bool isDSOffset01() const;
362 bool isSWaitCnt() const;
363 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000364 bool isSMRDOffset() const;
365 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000366};
367
368class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000369 const MCInstrInfo &MII;
370 MCAsmParser &Parser;
371
372 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000373
Matt Arsenault3b159672015-12-01 20:31:08 +0000374 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000375 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000376 }
377
378 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000379 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000380 }
381
Matt Arsenault68802d32015-11-05 03:11:27 +0000382 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000383 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000384 }
385
386 bool hasSGPR102_SGPR103() const {
387 return !isVI();
388 }
389
Tom Stellard45bb48e2015-06-13 03:28:10 +0000390 /// @name Auto-generated Match Functions
391 /// {
392
393#define GET_ASSEMBLER_HEADER
394#include "AMDGPUGenAsmMatcher.inc"
395
396 /// }
397
Tom Stellard347ac792015-06-26 21:15:07 +0000398private:
399 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
400 bool ParseDirectiveHSACodeObjectVersion();
401 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000402 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
403 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000404 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000405 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000406 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000407 bool ParseDirectiveAMDGPUHsaModuleGlobal();
408 bool ParseDirectiveAMDGPUHsaProgramGlobal();
409 bool ParseSectionDirectiveHSADataGlobalAgent();
410 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000411 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000412
Tom Stellard45bb48e2015-06-13 03:28:10 +0000413public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000414public:
415 enum AMDGPUMatchResultTy {
416 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
417 };
418
Akira Hatanakab11ef082015-11-14 06:35:56 +0000419 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000420 const MCInstrInfo &MII,
421 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000422 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000423 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000424 MCAsmParserExtension::Initialize(Parser);
425
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000426 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000427 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000428 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000429 }
430
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000431 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000432 }
433
Tom Stellard347ac792015-06-26 21:15:07 +0000434 AMDGPUTargetStreamer &getTargetStreamer() {
435 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
436 return static_cast<AMDGPUTargetStreamer &>(TS);
437 }
438
Tom Stellard45bb48e2015-06-13 03:28:10 +0000439 unsigned getForcedEncodingSize() const {
440 return ForcedEncodingSize;
441 }
442
443 void setForcedEncodingSize(unsigned Size) {
444 ForcedEncodingSize = Size;
445 }
446
447 bool isForcedVOP3() const {
448 return ForcedEncodingSize == 64;
449 }
450
451 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
452 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
453 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
454 OperandVector &Operands, MCStreamer &Out,
455 uint64_t &ErrorInfo,
456 bool MatchingInlineAsm) override;
457 bool ParseDirective(AsmToken DirectiveID) override;
458 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
459 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
460 SMLoc NameLoc, OperandVector &Operands) override;
461
462 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
463 int64_t Default = 0);
464 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
465 OperandVector &Operands,
466 enum AMDGPUOperand::ImmTy ImmTy =
467 AMDGPUOperand::ImmTyNone);
468 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
469 enum AMDGPUOperand::ImmTy ImmTy =
470 AMDGPUOperand::ImmTyNone);
471 OperandMatchResultTy parseOptionalOps(
472 const ArrayRef<OptionalOperand> &OptionalOps,
473 OperandVector &Operands);
474
475
476 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
477 void cvtDS(MCInst &Inst, const OperandVector &Operands);
478 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
479 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
480 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
481
482 bool parseCnt(int64_t &IntVal);
483 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
484 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
485
486 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
487 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
488 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
489
490 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
491 OperandMatchResultTy parseOffset(OperandVector &Operands);
492 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
493 OperandMatchResultTy parseGLC(OperandVector &Operands);
494 OperandMatchResultTy parseSLC(OperandVector &Operands);
495 OperandMatchResultTy parseTFE(OperandVector &Operands);
496
497 OperandMatchResultTy parseDMask(OperandVector &Operands);
498 OperandMatchResultTy parseUNorm(OperandVector &Operands);
499 OperandMatchResultTy parseR128(OperandVector &Operands);
500
Tom Stellarda90b9522016-02-11 03:28:15 +0000501 void cvtId(MCInst &Inst, const OperandVector &Operands);
502 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
503 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
504 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000505 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
506 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
507};
508
509struct OptionalOperand {
510 const char *Name;
511 AMDGPUOperand::ImmTy Type;
512 bool IsBit;
513 int64_t Default;
514 bool (*ConvertResult)(int64_t&);
515};
516
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000517}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000518
Matt Arsenault967c2f52015-11-03 22:50:32 +0000519static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000520 if (IsVgpr) {
521 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000522 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000523 case 1: return AMDGPU::VGPR_32RegClassID;
524 case 2: return AMDGPU::VReg_64RegClassID;
525 case 3: return AMDGPU::VReg_96RegClassID;
526 case 4: return AMDGPU::VReg_128RegClassID;
527 case 8: return AMDGPU::VReg_256RegClassID;
528 case 16: return AMDGPU::VReg_512RegClassID;
529 }
530 }
531
532 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000533 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000534 case 1: return AMDGPU::SGPR_32RegClassID;
535 case 2: return AMDGPU::SGPR_64RegClassID;
536 case 4: return AMDGPU::SReg_128RegClassID;
537 case 8: return AMDGPU::SReg_256RegClassID;
538 case 16: return AMDGPU::SReg_512RegClassID;
539 }
540}
541
Craig Topper4e9b03d62015-09-21 00:18:00 +0000542static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543
544 return StringSwitch<unsigned>(RegName)
545 .Case("exec", AMDGPU::EXEC)
546 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000547 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000548 .Case("m0", AMDGPU::M0)
549 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000550 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
551 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000552 .Case("vcc_lo", AMDGPU::VCC_LO)
553 .Case("vcc_hi", AMDGPU::VCC_HI)
554 .Case("exec_lo", AMDGPU::EXEC_LO)
555 .Case("exec_hi", AMDGPU::EXEC_HI)
556 .Default(0);
557}
558
559bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
560 const AsmToken Tok = Parser.getTok();
561 StartLoc = Tok.getLoc();
562 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000563 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
564
Matt Arsenault57116cc2015-09-10 21:51:15 +0000565 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000566 RegNo = getRegForName(RegName);
567
568 if (RegNo) {
569 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000570 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000571 }
572
573 // Match vgprs and sgprs
574 if (RegName[0] != 's' && RegName[0] != 'v')
575 return true;
576
577 bool IsVgpr = RegName[0] == 'v';
578 unsigned RegWidth;
579 unsigned RegIndexInClass;
580 if (RegName.size() > 1) {
581 // We have a 32-bit register
582 RegWidth = 1;
583 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
584 return true;
585 Parser.Lex();
586 } else {
587 // We have a register greater than 32-bits.
588
589 int64_t RegLo, RegHi;
590 Parser.Lex();
591 if (getLexer().isNot(AsmToken::LBrac))
592 return true;
593
594 Parser.Lex();
595 if (getParser().parseAbsoluteExpression(RegLo))
596 return true;
597
598 if (getLexer().isNot(AsmToken::Colon))
599 return true;
600
601 Parser.Lex();
602 if (getParser().parseAbsoluteExpression(RegHi))
603 return true;
604
605 if (getLexer().isNot(AsmToken::RBrac))
606 return true;
607
608 Parser.Lex();
609 RegWidth = (RegHi - RegLo) + 1;
610 if (IsVgpr) {
611 // VGPR registers aren't aligned.
612 RegIndexInClass = RegLo;
613 } else {
614 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000615 unsigned Size = std::min(RegWidth, 4u);
616 if (RegLo % Size != 0)
617 return true;
618
619 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000620 }
621 }
622
Matt Arsenault967c2f52015-11-03 22:50:32 +0000623 int RCID = getRegClass(IsVgpr, RegWidth);
624 if (RCID == -1)
625 return true;
626
627 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000628 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000629 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000630
631 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000632 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000633}
634
635unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
636
637 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
638
639 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
640 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
641 return Match_InvalidOperand;
642
Tom Stellard88e0b252015-10-06 15:57:53 +0000643 if ((TSFlags & SIInstrFlags::VOP3) &&
644 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
645 getForcedEncodingSize() != 64)
646 return Match_PreferE32;
647
Tom Stellard45bb48e2015-06-13 03:28:10 +0000648 return Match_Success;
649}
650
651
652bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
653 OperandVector &Operands,
654 MCStreamer &Out,
655 uint64_t &ErrorInfo,
656 bool MatchingInlineAsm) {
657 MCInst Inst;
658
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000659 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000660 default: break;
661 case Match_Success:
662 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000663 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000664 return false;
665 case Match_MissingFeature:
666 return Error(IDLoc, "instruction not supported on this GPU");
667
668 case Match_MnemonicFail:
669 return Error(IDLoc, "unrecognized instruction mnemonic");
670
671 case Match_InvalidOperand: {
672 SMLoc ErrorLoc = IDLoc;
673 if (ErrorInfo != ~0ULL) {
674 if (ErrorInfo >= Operands.size()) {
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +0000675 if (isForcedVOP3()) {
676 // If 64-bit encoding has been forced we can end up with no
677 // clamp or omod operands if none of the registers have modifiers,
678 // so we need to add these to the operand list.
679 AMDGPUOperand &LastOp =
680 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
681 if (LastOp.isRegKind() ||
682 (LastOp.isImm() &&
683 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
684 SMLoc S = Parser.getTok().getLoc();
685 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
686 AMDGPUOperand::ImmTyClamp));
687 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
688 AMDGPUOperand::ImmTyOMod));
689 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
690 Out, ErrorInfo,
691 MatchingInlineAsm);
692 if (!Res)
693 return Res;
694 }
695
696 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000697 return Error(IDLoc, "too few operands for instruction");
698 }
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +0000699
Tom Stellard45bb48e2015-06-13 03:28:10 +0000700 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
701 if (ErrorLoc == SMLoc())
702 ErrorLoc = IDLoc;
703 }
704 return Error(ErrorLoc, "invalid operand for instruction");
705 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000706 case Match_PreferE32:
707 return Error(IDLoc, "internal error: instruction without _e64 suffix "
708 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000709 }
710 llvm_unreachable("Implement any new match types added!");
711}
712
Tom Stellard347ac792015-06-26 21:15:07 +0000713bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
714 uint32_t &Minor) {
715 if (getLexer().isNot(AsmToken::Integer))
716 return TokError("invalid major version");
717
718 Major = getLexer().getTok().getIntVal();
719 Lex();
720
721 if (getLexer().isNot(AsmToken::Comma))
722 return TokError("minor version number required, comma expected");
723 Lex();
724
725 if (getLexer().isNot(AsmToken::Integer))
726 return TokError("invalid minor version");
727
728 Minor = getLexer().getTok().getIntVal();
729 Lex();
730
731 return false;
732}
733
734bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
735
736 uint32_t Major;
737 uint32_t Minor;
738
739 if (ParseDirectiveMajorMinor(Major, Minor))
740 return true;
741
742 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
743 return false;
744}
745
746bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
747
748 uint32_t Major;
749 uint32_t Minor;
750 uint32_t Stepping;
751 StringRef VendorName;
752 StringRef ArchName;
753
754 // If this directive has no arguments, then use the ISA version for the
755 // targeted GPU.
756 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000757 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000758 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
759 Isa.Stepping,
760 "AMD", "AMDGPU");
761 return false;
762 }
763
764
765 if (ParseDirectiveMajorMinor(Major, Minor))
766 return true;
767
768 if (getLexer().isNot(AsmToken::Comma))
769 return TokError("stepping version number required, comma expected");
770 Lex();
771
772 if (getLexer().isNot(AsmToken::Integer))
773 return TokError("invalid stepping version");
774
775 Stepping = getLexer().getTok().getIntVal();
776 Lex();
777
778 if (getLexer().isNot(AsmToken::Comma))
779 return TokError("vendor name required, comma expected");
780 Lex();
781
782 if (getLexer().isNot(AsmToken::String))
783 return TokError("invalid vendor name");
784
785 VendorName = getLexer().getTok().getStringContents();
786 Lex();
787
788 if (getLexer().isNot(AsmToken::Comma))
789 return TokError("arch name required, comma expected");
790 Lex();
791
792 if (getLexer().isNot(AsmToken::String))
793 return TokError("invalid arch name");
794
795 ArchName = getLexer().getTok().getStringContents();
796 Lex();
797
798 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
799 VendorName, ArchName);
800 return false;
801}
802
Tom Stellardff7416b2015-06-26 21:58:31 +0000803bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
804 amd_kernel_code_t &Header) {
805
806 if (getLexer().isNot(AsmToken::Equal))
807 return TokError("expected '='");
808 Lex();
809
810 if (getLexer().isNot(AsmToken::Integer))
811 return TokError("amd_kernel_code_t values must be integers");
812
813 uint64_t Value = getLexer().getTok().getIntVal();
814 Lex();
815
816 if (ID == "kernel_code_version_major")
817 Header.amd_kernel_code_version_major = Value;
818 else if (ID == "kernel_code_version_minor")
819 Header.amd_kernel_code_version_minor = Value;
820 else if (ID == "machine_kind")
821 Header.amd_machine_kind = Value;
822 else if (ID == "machine_version_major")
823 Header.amd_machine_version_major = Value;
824 else if (ID == "machine_version_minor")
825 Header.amd_machine_version_minor = Value;
826 else if (ID == "machine_version_stepping")
827 Header.amd_machine_version_stepping = Value;
828 else if (ID == "kernel_code_entry_byte_offset")
829 Header.kernel_code_entry_byte_offset = Value;
830 else if (ID == "kernel_code_prefetch_byte_size")
831 Header.kernel_code_prefetch_byte_size = Value;
832 else if (ID == "max_scratch_backing_memory_byte_size")
833 Header.max_scratch_backing_memory_byte_size = Value;
834 else if (ID == "compute_pgm_rsrc1_vgprs")
835 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
836 else if (ID == "compute_pgm_rsrc1_sgprs")
837 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
838 else if (ID == "compute_pgm_rsrc1_priority")
839 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
840 else if (ID == "compute_pgm_rsrc1_float_mode")
841 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
842 else if (ID == "compute_pgm_rsrc1_priv")
843 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
844 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
845 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
846 else if (ID == "compute_pgm_rsrc1_debug_mode")
847 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
848 else if (ID == "compute_pgm_rsrc1_ieee_mode")
849 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
850 else if (ID == "compute_pgm_rsrc2_scratch_en")
851 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
852 else if (ID == "compute_pgm_rsrc2_user_sgpr")
853 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
854 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
855 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
856 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
857 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
858 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
859 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
860 else if (ID == "compute_pgm_rsrc2_tg_size_en")
861 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
862 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
863 Header.compute_pgm_resource_registers |=
864 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
865 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
866 Header.compute_pgm_resource_registers |=
867 (S_00B84C_EXCP_EN_MSB(Value) << 32);
868 else if (ID == "compute_pgm_rsrc2_lds_size")
869 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
870 else if (ID == "compute_pgm_rsrc2_excp_en")
871 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
872 else if (ID == "compute_pgm_resource_registers")
873 Header.compute_pgm_resource_registers = Value;
874 else if (ID == "enable_sgpr_private_segment_buffer")
875 Header.code_properties |=
876 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
877 else if (ID == "enable_sgpr_dispatch_ptr")
878 Header.code_properties |=
879 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
880 else if (ID == "enable_sgpr_queue_ptr")
881 Header.code_properties |=
882 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
883 else if (ID == "enable_sgpr_kernarg_segment_ptr")
884 Header.code_properties |=
885 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
886 else if (ID == "enable_sgpr_dispatch_id")
887 Header.code_properties |=
888 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
889 else if (ID == "enable_sgpr_flat_scratch_init")
890 Header.code_properties |=
891 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
892 else if (ID == "enable_sgpr_private_segment_size")
893 Header.code_properties |=
894 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
895 else if (ID == "enable_sgpr_grid_workgroup_count_x")
896 Header.code_properties |=
897 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
898 else if (ID == "enable_sgpr_grid_workgroup_count_y")
899 Header.code_properties |=
900 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
901 else if (ID == "enable_sgpr_grid_workgroup_count_z")
902 Header.code_properties |=
903 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
904 else if (ID == "enable_ordered_append_gds")
905 Header.code_properties |=
906 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
907 else if (ID == "private_element_size")
908 Header.code_properties |=
909 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
910 else if (ID == "is_ptr64")
911 Header.code_properties |=
912 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
913 else if (ID == "is_dynamic_callstack")
914 Header.code_properties |=
915 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
916 else if (ID == "is_debug_enabled")
917 Header.code_properties |=
918 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
919 else if (ID == "is_xnack_enabled")
920 Header.code_properties |=
921 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
922 else if (ID == "workitem_private_segment_byte_size")
923 Header.workitem_private_segment_byte_size = Value;
924 else if (ID == "workgroup_group_segment_byte_size")
925 Header.workgroup_group_segment_byte_size = Value;
926 else if (ID == "gds_segment_byte_size")
927 Header.gds_segment_byte_size = Value;
928 else if (ID == "kernarg_segment_byte_size")
929 Header.kernarg_segment_byte_size = Value;
930 else if (ID == "workgroup_fbarrier_count")
931 Header.workgroup_fbarrier_count = Value;
932 else if (ID == "wavefront_sgpr_count")
933 Header.wavefront_sgpr_count = Value;
934 else if (ID == "workitem_vgpr_count")
935 Header.workitem_vgpr_count = Value;
936 else if (ID == "reserved_vgpr_first")
937 Header.reserved_vgpr_first = Value;
938 else if (ID == "reserved_vgpr_count")
939 Header.reserved_vgpr_count = Value;
940 else if (ID == "reserved_sgpr_first")
941 Header.reserved_sgpr_first = Value;
942 else if (ID == "reserved_sgpr_count")
943 Header.reserved_sgpr_count = Value;
944 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
945 Header.debug_wavefront_private_segment_offset_sgpr = Value;
946 else if (ID == "debug_private_segment_buffer_sgpr")
947 Header.debug_private_segment_buffer_sgpr = Value;
948 else if (ID == "kernarg_segment_alignment")
949 Header.kernarg_segment_alignment = Value;
950 else if (ID == "group_segment_alignment")
951 Header.group_segment_alignment = Value;
952 else if (ID == "private_segment_alignment")
953 Header.private_segment_alignment = Value;
954 else if (ID == "wavefront_size")
955 Header.wavefront_size = Value;
956 else if (ID == "call_convention")
957 Header.call_convention = Value;
958 else if (ID == "runtime_loader_kernel_symbol")
959 Header.runtime_loader_kernel_symbol = Value;
960 else
961 return TokError("amd_kernel_code_t value not recognized.");
962
963 return false;
964}
965
966bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
967
968 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000969 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000970
971 while (true) {
972
973 if (getLexer().isNot(AsmToken::EndOfStatement))
974 return TokError("amd_kernel_code_t values must begin on a new line");
975
976 // Lex EndOfStatement. This is in a while loop, because lexing a comment
977 // will set the current token to EndOfStatement.
978 while(getLexer().is(AsmToken::EndOfStatement))
979 Lex();
980
981 if (getLexer().isNot(AsmToken::Identifier))
982 return TokError("expected value identifier or .end_amd_kernel_code_t");
983
984 StringRef ID = getLexer().getTok().getIdentifier();
985 Lex();
986
987 if (ID == ".end_amd_kernel_code_t")
988 break;
989
990 if (ParseAMDKernelCodeTValue(ID, Header))
991 return true;
992 }
993
994 getTargetStreamer().EmitAMDKernelCodeT(Header);
995
996 return false;
997}
998
Tom Stellarde135ffd2015-09-25 21:41:28 +0000999bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1000 getParser().getStreamer().SwitchSection(
1001 AMDGPU::getHSATextSection(getContext()));
1002 return false;
1003}
1004
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001005bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1006 if (getLexer().isNot(AsmToken::Identifier))
1007 return TokError("expected symbol name");
1008
1009 StringRef KernelName = Parser.getTok().getString();
1010
1011 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1012 ELF::STT_AMDGPU_HSA_KERNEL);
1013 Lex();
1014 return false;
1015}
1016
Tom Stellard00f2f912015-12-02 19:47:57 +00001017bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1018 if (getLexer().isNot(AsmToken::Identifier))
1019 return TokError("expected symbol name");
1020
1021 StringRef GlobalName = Parser.getTok().getIdentifier();
1022
1023 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1024 Lex();
1025 return false;
1026}
1027
1028bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1029 if (getLexer().isNot(AsmToken::Identifier))
1030 return TokError("expected symbol name");
1031
1032 StringRef GlobalName = Parser.getTok().getIdentifier();
1033
1034 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1035 Lex();
1036 return false;
1037}
1038
1039bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1040 getParser().getStreamer().SwitchSection(
1041 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1042 return false;
1043}
1044
1045bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1046 getParser().getStreamer().SwitchSection(
1047 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1048 return false;
1049}
1050
Tom Stellard9760f032015-12-03 03:34:32 +00001051bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1052 getParser().getStreamer().SwitchSection(
1053 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1054 return false;
1055}
1056
Tom Stellard45bb48e2015-06-13 03:28:10 +00001057bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001058 StringRef IDVal = DirectiveID.getString();
1059
1060 if (IDVal == ".hsa_code_object_version")
1061 return ParseDirectiveHSACodeObjectVersion();
1062
1063 if (IDVal == ".hsa_code_object_isa")
1064 return ParseDirectiveHSACodeObjectISA();
1065
Tom Stellardff7416b2015-06-26 21:58:31 +00001066 if (IDVal == ".amd_kernel_code_t")
1067 return ParseDirectiveAMDKernelCodeT();
1068
Tom Stellarde135ffd2015-09-25 21:41:28 +00001069 if (IDVal == ".hsatext" || IDVal == ".text")
1070 return ParseSectionDirectiveHSAText();
1071
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001072 if (IDVal == ".amdgpu_hsa_kernel")
1073 return ParseDirectiveAMDGPUHsaKernel();
1074
Tom Stellard00f2f912015-12-02 19:47:57 +00001075 if (IDVal == ".amdgpu_hsa_module_global")
1076 return ParseDirectiveAMDGPUHsaModuleGlobal();
1077
1078 if (IDVal == ".amdgpu_hsa_program_global")
1079 return ParseDirectiveAMDGPUHsaProgramGlobal();
1080
1081 if (IDVal == ".hsadata_global_agent")
1082 return ParseSectionDirectiveHSADataGlobalAgent();
1083
1084 if (IDVal == ".hsadata_global_program")
1085 return ParseSectionDirectiveHSADataGlobalProgram();
1086
Tom Stellard9760f032015-12-03 03:34:32 +00001087 if (IDVal == ".hsarodata_readonly_agent")
1088 return ParseSectionDirectiveHSARodataReadonlyAgent();
1089
Tom Stellard45bb48e2015-06-13 03:28:10 +00001090 return true;
1091}
1092
Matt Arsenault68802d32015-11-05 03:11:27 +00001093bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1094 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001095 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001096 return true;
1097
Matt Arsenault3b159672015-12-01 20:31:08 +00001098 if (isSI()) {
1099 // No flat_scr
1100 switch (RegNo) {
1101 case AMDGPU::FLAT_SCR:
1102 case AMDGPU::FLAT_SCR_LO:
1103 case AMDGPU::FLAT_SCR_HI:
1104 return false;
1105 default:
1106 return true;
1107 }
1108 }
1109
Matt Arsenault68802d32015-11-05 03:11:27 +00001110 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1111 // SI/CI have.
1112 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1113 R.isValid(); ++R) {
1114 if (*R == RegNo)
1115 return false;
1116 }
1117
1118 return true;
1119}
1120
Tom Stellard45bb48e2015-06-13 03:28:10 +00001121static bool operandsHaveModifiers(const OperandVector &Operands) {
1122
1123 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1124 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1125 if (Op.isRegKind() && Op.hasModifiers())
1126 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001127 if (Op.isImm() && Op.hasModifiers())
1128 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001129 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1130 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1131 return true;
1132 }
1133 return false;
1134}
1135
1136AMDGPUAsmParser::OperandMatchResultTy
1137AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1138
1139 // Try to parse with a custom parser
1140 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1141
1142 // If we successfully parsed the operand or if there as an error parsing,
1143 // we are done.
1144 //
1145 // If we are parsing after we reach EndOfStatement then this means we
1146 // are appending default values to the Operands list. This is only done
1147 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001148 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001149 getLexer().is(AsmToken::EndOfStatement))
1150 return ResTy;
1151
1152 bool Negate = false, Abs = false;
1153 if (getLexer().getKind()== AsmToken::Minus) {
1154 Parser.Lex();
1155 Negate = true;
1156 }
1157
1158 if (getLexer().getKind() == AsmToken::Pipe) {
1159 Parser.Lex();
1160 Abs = true;
1161 }
1162
1163 switch(getLexer().getKind()) {
1164 case AsmToken::Integer: {
1165 SMLoc S = Parser.getTok().getLoc();
1166 int64_t IntVal;
1167 if (getParser().parseAbsoluteExpression(IntVal))
1168 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001169 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001170 Error(S, "invalid immediate: only 32-bit values are legal");
1171 return MatchOperand_ParseFail;
1172 }
1173
Tom Stellard45bb48e2015-06-13 03:28:10 +00001174 if (Negate)
1175 IntVal *= -1;
1176 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1177 return MatchOperand_Success;
1178 }
1179 case AsmToken::Real: {
1180 // FIXME: We should emit an error if a double precisions floating-point
1181 // value is used. I'm not sure the best way to detect this.
1182 SMLoc S = Parser.getTok().getLoc();
1183 int64_t IntVal;
1184 if (getParser().parseAbsoluteExpression(IntVal))
1185 return MatchOperand_ParseFail;
1186
1187 APFloat F((float)BitsToDouble(IntVal));
1188 if (Negate)
1189 F.changeSign();
1190 Operands.push_back(
1191 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1192 return MatchOperand_Success;
1193 }
1194 case AsmToken::Identifier: {
1195 SMLoc S, E;
1196 unsigned RegNo;
1197 if (!ParseRegister(RegNo, S, E)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001198 unsigned Modifiers = 0;
1199
1200 if (Negate)
1201 Modifiers |= 0x1;
1202
1203 if (Abs) {
1204 if (getLexer().getKind() != AsmToken::Pipe)
1205 return MatchOperand_ParseFail;
1206 Parser.Lex();
1207 Modifiers |= 0x2;
1208 }
1209
Tom Stellard45bb48e2015-06-13 03:28:10 +00001210 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard2b65ed32015-12-21 18:44:27 +00001211 RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
Tom Stellard45bb48e2015-06-13 03:28:10 +00001212 isForcedVOP3()));
1213
Tom Stellarda90b9522016-02-11 03:28:15 +00001214 if (Modifiers) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001215 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1216 RegOp.setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001217 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001218 } else {
1219 ResTy = parseVOP3OptionalOps(Operands);
1220 if (ResTy == MatchOperand_NoMatch) {
1221 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1222 S));
1223 Parser.Lex();
1224 }
1225 }
1226 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001227 }
1228 default:
1229 return MatchOperand_NoMatch;
1230 }
1231}
1232
1233bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1234 StringRef Name,
1235 SMLoc NameLoc, OperandVector &Operands) {
1236
1237 // Clear any forced encodings from the previous instruction.
1238 setForcedEncodingSize(0);
1239
1240 if (Name.endswith("_e64"))
1241 setForcedEncodingSize(64);
1242 else if (Name.endswith("_e32"))
1243 setForcedEncodingSize(32);
1244
1245 // Add the instruction mnemonic
1246 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1247
1248 while (!getLexer().is(AsmToken::EndOfStatement)) {
1249 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1250
1251 // Eat the comma or space if there is one.
1252 if (getLexer().is(AsmToken::Comma))
1253 Parser.Lex();
1254
1255 switch (Res) {
1256 case MatchOperand_Success: break;
1257 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1258 "failed parsing operand.");
1259 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1260 "not a valid operand.");
1261 }
1262 }
1263
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001264 // Once we reach end of statement, continue parsing so we can add default
1265 // values for optional arguments.
1266 AMDGPUAsmParser::OperandMatchResultTy Res;
1267 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
1268 if (Res != MatchOperand_Success)
1269 return Error(getLexer().getLoc(), "failed parsing operand.");
1270 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001271 return false;
1272}
1273
1274//===----------------------------------------------------------------------===//
1275// Utility functions
1276//===----------------------------------------------------------------------===//
1277
1278AMDGPUAsmParser::OperandMatchResultTy
1279AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1280 int64_t Default) {
1281
1282 // We are at the end of the statement, and this is a default argument, so
1283 // use a default value.
1284 if (getLexer().is(AsmToken::EndOfStatement)) {
1285 Int = Default;
1286 return MatchOperand_Success;
1287 }
1288
1289 switch(getLexer().getKind()) {
1290 default: return MatchOperand_NoMatch;
1291 case AsmToken::Identifier: {
1292 StringRef OffsetName = Parser.getTok().getString();
1293 if (!OffsetName.equals(Prefix))
1294 return MatchOperand_NoMatch;
1295
1296 Parser.Lex();
1297 if (getLexer().isNot(AsmToken::Colon))
1298 return MatchOperand_ParseFail;
1299
1300 Parser.Lex();
1301 if (getLexer().isNot(AsmToken::Integer))
1302 return MatchOperand_ParseFail;
1303
1304 if (getParser().parseAbsoluteExpression(Int))
1305 return MatchOperand_ParseFail;
1306 break;
1307 }
1308 }
1309 return MatchOperand_Success;
1310}
1311
1312AMDGPUAsmParser::OperandMatchResultTy
1313AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1314 enum AMDGPUOperand::ImmTy ImmTy) {
1315
1316 SMLoc S = Parser.getTok().getLoc();
1317 int64_t Offset = 0;
1318
1319 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1320 if (Res != MatchOperand_Success)
1321 return Res;
1322
1323 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1324 return MatchOperand_Success;
1325}
1326
1327AMDGPUAsmParser::OperandMatchResultTy
1328AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1329 enum AMDGPUOperand::ImmTy ImmTy) {
1330 int64_t Bit = 0;
1331 SMLoc S = Parser.getTok().getLoc();
1332
1333 // We are at the end of the statement, and this is a default argument, so
1334 // use a default value.
1335 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1336 switch(getLexer().getKind()) {
1337 case AsmToken::Identifier: {
1338 StringRef Tok = Parser.getTok().getString();
1339 if (Tok == Name) {
1340 Bit = 1;
1341 Parser.Lex();
1342 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1343 Bit = 0;
1344 Parser.Lex();
1345 } else {
1346 return MatchOperand_NoMatch;
1347 }
1348 break;
1349 }
1350 default:
1351 return MatchOperand_NoMatch;
1352 }
1353 }
1354
1355 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1356 return MatchOperand_Success;
1357}
1358
1359static bool operandsHasOptionalOp(const OperandVector &Operands,
1360 const OptionalOperand &OOp) {
1361 for (unsigned i = 0; i < Operands.size(); i++) {
1362 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1363 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1364 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1365 return true;
1366
1367 }
1368 return false;
1369}
1370
1371AMDGPUAsmParser::OperandMatchResultTy
1372AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1373 OperandVector &Operands) {
1374 SMLoc S = Parser.getTok().getLoc();
1375 for (const OptionalOperand &Op : OptionalOps) {
1376 if (operandsHasOptionalOp(Operands, Op))
1377 continue;
1378 AMDGPUAsmParser::OperandMatchResultTy Res;
1379 int64_t Value;
1380 if (Op.IsBit) {
1381 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1382 if (Res == MatchOperand_NoMatch)
1383 continue;
1384 return Res;
1385 }
1386
1387 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1388
1389 if (Res == MatchOperand_NoMatch)
1390 continue;
1391
1392 if (Res != MatchOperand_Success)
1393 return Res;
1394
1395 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1396 return MatchOperand_ParseFail;
1397 }
1398
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001399 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001400 return MatchOperand_Success;
1401 }
1402 return MatchOperand_NoMatch;
1403}
1404
1405//===----------------------------------------------------------------------===//
1406// ds
1407//===----------------------------------------------------------------------===//
1408
1409static const OptionalOperand DSOptionalOps [] = {
1410 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1411 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1412};
1413
1414static const OptionalOperand DSOptionalOpsOff01 [] = {
1415 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1416 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1417 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1418};
1419
1420AMDGPUAsmParser::OperandMatchResultTy
1421AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1422 return parseOptionalOps(DSOptionalOps, Operands);
1423}
1424AMDGPUAsmParser::OperandMatchResultTy
1425AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1426 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1427}
1428
1429AMDGPUAsmParser::OperandMatchResultTy
1430AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1431 SMLoc S = Parser.getTok().getLoc();
1432 AMDGPUAsmParser::OperandMatchResultTy Res =
1433 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1434 if (Res == MatchOperand_NoMatch) {
1435 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1436 AMDGPUOperand::ImmTyOffset));
1437 Res = MatchOperand_Success;
1438 }
1439 return Res;
1440}
1441
1442bool AMDGPUOperand::isDSOffset() const {
1443 return isImm() && isUInt<16>(getImm());
1444}
1445
1446bool AMDGPUOperand::isDSOffset01() const {
1447 return isImm() && isUInt<8>(getImm());
1448}
1449
1450void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1451 const OperandVector &Operands) {
1452
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001453 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001454
1455 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1456 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1457
1458 // Add the register arguments
1459 if (Op.isReg()) {
1460 Op.addRegOperands(Inst, 1);
1461 continue;
1462 }
1463
1464 // Handle optional arguments
1465 OptionalIdx[Op.getImmTy()] = i;
1466 }
1467
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001468 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1469 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1470 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
Tom Stellard45bb48e2015-06-13 03:28:10 +00001471
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001472 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1473 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1474 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Tom Stellard45bb48e2015-06-13 03:28:10 +00001475 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1476}
1477
1478void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1479
1480 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1481 bool GDSOnly = false;
1482
1483 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1484 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1485
1486 // Add the register arguments
1487 if (Op.isReg()) {
1488 Op.addRegOperands(Inst, 1);
1489 continue;
1490 }
1491
1492 if (Op.isToken() && Op.getToken() == "gds") {
1493 GDSOnly = true;
1494 continue;
1495 }
1496
1497 // Handle optional arguments
1498 OptionalIdx[Op.getImmTy()] = i;
1499 }
1500
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001501 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1502 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
Tom Stellard45bb48e2015-06-13 03:28:10 +00001503
1504 if (!GDSOnly) {
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001505 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1506 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Tom Stellard45bb48e2015-06-13 03:28:10 +00001507 }
1508 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1509}
1510
1511
1512//===----------------------------------------------------------------------===//
1513// s_waitcnt
1514//===----------------------------------------------------------------------===//
1515
1516bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1517 StringRef CntName = Parser.getTok().getString();
1518 int64_t CntVal;
1519
1520 Parser.Lex();
1521 if (getLexer().isNot(AsmToken::LParen))
1522 return true;
1523
1524 Parser.Lex();
1525 if (getLexer().isNot(AsmToken::Integer))
1526 return true;
1527
1528 if (getParser().parseAbsoluteExpression(CntVal))
1529 return true;
1530
1531 if (getLexer().isNot(AsmToken::RParen))
1532 return true;
1533
1534 Parser.Lex();
1535 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1536 Parser.Lex();
1537
1538 int CntShift;
1539 int CntMask;
1540
1541 if (CntName == "vmcnt") {
1542 CntMask = 0xf;
1543 CntShift = 0;
1544 } else if (CntName == "expcnt") {
1545 CntMask = 0x7;
1546 CntShift = 4;
1547 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001548 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001549 CntShift = 8;
1550 } else {
1551 return true;
1552 }
1553
1554 IntVal &= ~(CntMask << CntShift);
1555 IntVal |= (CntVal << CntShift);
1556 return false;
1557}
1558
1559AMDGPUAsmParser::OperandMatchResultTy
1560AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1561 // Disable all counters by default.
1562 // vmcnt [3:0]
1563 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001564 // lgkmcnt [11:8]
1565 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001566 SMLoc S = Parser.getTok().getLoc();
1567
1568 switch(getLexer().getKind()) {
1569 default: return MatchOperand_ParseFail;
1570 case AsmToken::Integer:
1571 // The operand can be an integer value.
1572 if (getParser().parseAbsoluteExpression(CntVal))
1573 return MatchOperand_ParseFail;
1574 break;
1575
1576 case AsmToken::Identifier:
1577 do {
1578 if (parseCnt(CntVal))
1579 return MatchOperand_ParseFail;
1580 } while(getLexer().isNot(AsmToken::EndOfStatement));
1581 break;
1582 }
1583 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1584 return MatchOperand_Success;
1585}
1586
1587bool AMDGPUOperand::isSWaitCnt() const {
1588 return isImm();
1589}
1590
1591//===----------------------------------------------------------------------===//
1592// sopp branch targets
1593//===----------------------------------------------------------------------===//
1594
1595AMDGPUAsmParser::OperandMatchResultTy
1596AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1597 SMLoc S = Parser.getTok().getLoc();
1598
1599 switch (getLexer().getKind()) {
1600 default: return MatchOperand_ParseFail;
1601 case AsmToken::Integer: {
1602 int64_t Imm;
1603 if (getParser().parseAbsoluteExpression(Imm))
1604 return MatchOperand_ParseFail;
1605 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1606 return MatchOperand_Success;
1607 }
1608
1609 case AsmToken::Identifier:
1610 Operands.push_back(AMDGPUOperand::CreateExpr(
1611 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1612 Parser.getTok().getString()), getContext()), S));
1613 Parser.Lex();
1614 return MatchOperand_Success;
1615 }
1616}
1617
1618//===----------------------------------------------------------------------===//
1619// flat
1620//===----------------------------------------------------------------------===//
1621
1622static const OptionalOperand FlatOptionalOps [] = {
1623 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1624 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1625 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1626};
1627
1628static const OptionalOperand FlatAtomicOptionalOps [] = {
1629 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1630 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1631};
1632
1633AMDGPUAsmParser::OperandMatchResultTy
1634AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1635 return parseOptionalOps(FlatOptionalOps, Operands);
1636}
1637
1638AMDGPUAsmParser::OperandMatchResultTy
1639AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1640 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1641}
1642
1643void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1644 const OperandVector &Operands) {
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001645 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001646
1647 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1648 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1649
1650 // Add the register arguments
1651 if (Op.isReg()) {
1652 Op.addRegOperands(Inst, 1);
1653 continue;
1654 }
1655
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001656 // Handle 'glc' token which is sometimes hard-coded into the
1657 // asm string. There are no MCInst operands for these.
1658 if (Op.isToken())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001659 continue;
1660
1661 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001662 OptionalIdx[Op.getImmTy()] = i;
1663
Tom Stellard45bb48e2015-06-13 03:28:10 +00001664 }
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001665
1666 // flat atomic instructions don't have a glc argument.
1667 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1668 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1669 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1670 }
1671
1672 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1673 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1674
1675 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1676 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001677}
1678
1679//===----------------------------------------------------------------------===//
1680// mubuf
1681//===----------------------------------------------------------------------===//
1682
1683static const OptionalOperand MubufOptionalOps [] = {
1684 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1685 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1686 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1687 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1688};
1689
1690AMDGPUAsmParser::OperandMatchResultTy
1691AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1692 return parseOptionalOps(MubufOptionalOps, Operands);
1693}
1694
1695AMDGPUAsmParser::OperandMatchResultTy
1696AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1697 return parseIntWithPrefix("offset", Operands);
1698}
1699
1700AMDGPUAsmParser::OperandMatchResultTy
1701AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1702 return parseNamedBit("glc", Operands);
1703}
1704
1705AMDGPUAsmParser::OperandMatchResultTy
1706AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1707 return parseNamedBit("slc", Operands);
1708}
1709
1710AMDGPUAsmParser::OperandMatchResultTy
1711AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1712 return parseNamedBit("tfe", Operands);
1713}
1714
1715bool AMDGPUOperand::isMubufOffset() const {
1716 return isImm() && isUInt<12>(getImm());
1717}
1718
1719void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1720 const OperandVector &Operands) {
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001721 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001722
1723 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1724 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1725
1726 // Add the register arguments
1727 if (Op.isReg()) {
1728 Op.addRegOperands(Inst, 1);
1729 continue;
1730 }
1731
1732 // Handle the case where soffset is an immediate
1733 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1734 Op.addImmOperands(Inst, 1);
1735 continue;
1736 }
1737
1738 // Handle tokens like 'offen' which are sometimes hard-coded into the
1739 // asm string. There are no MCInst operands for these.
1740 if (Op.isToken()) {
1741 continue;
1742 }
1743 assert(Op.isImm());
1744
1745 // Handle optional arguments
1746 OptionalIdx[Op.getImmTy()] = i;
1747 }
1748
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001749 assert(OptionalIdx.size() == 4);
1750
1751 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1752 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1753 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1754 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1755
1756 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1757 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1758 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1759 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001760}
1761
1762//===----------------------------------------------------------------------===//
1763// mimg
1764//===----------------------------------------------------------------------===//
1765
1766AMDGPUAsmParser::OperandMatchResultTy
1767AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1768 return parseIntWithPrefix("dmask", Operands);
1769}
1770
1771AMDGPUAsmParser::OperandMatchResultTy
1772AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1773 return parseNamedBit("unorm", Operands);
1774}
1775
1776AMDGPUAsmParser::OperandMatchResultTy
1777AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1778 return parseNamedBit("r128", Operands);
1779}
1780
1781//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001782// smrd
1783//===----------------------------------------------------------------------===//
1784
1785bool AMDGPUOperand::isSMRDOffset() const {
1786
1787 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1788 // information here.
1789 return isImm() && isUInt<8>(getImm());
1790}
1791
1792bool AMDGPUOperand::isSMRDLiteralOffset() const {
1793 // 32-bit literals are only supported on CI and we only want to use them
1794 // when the offset is > 8-bits.
1795 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1796}
1797
1798//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001799// vop3
1800//===----------------------------------------------------------------------===//
1801
1802static bool ConvertOmodMul(int64_t &Mul) {
1803 if (Mul != 1 && Mul != 2 && Mul != 4)
1804 return false;
1805
1806 Mul >>= 1;
1807 return true;
1808}
1809
1810static bool ConvertOmodDiv(int64_t &Div) {
1811 if (Div == 1) {
1812 Div = 0;
1813 return true;
1814 }
1815
1816 if (Div == 2) {
1817 Div = 3;
1818 return true;
1819 }
1820
1821 return false;
1822}
1823
1824static const OptionalOperand VOP3OptionalOps [] = {
1825 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1826 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1827 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1828};
1829
1830static bool isVOP3(OperandVector &Operands) {
1831 if (operandsHaveModifiers(Operands))
1832 return true;
1833
Tom Stellarda90b9522016-02-11 03:28:15 +00001834 if (Operands.size() >= 2) {
1835 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001836
Tom Stellarda90b9522016-02-11 03:28:15 +00001837 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1838 return true;
1839 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001840
1841 if (Operands.size() >= 5)
1842 return true;
1843
1844 if (Operands.size() > 3) {
1845 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Benjamin Kramerac5e36f2016-02-12 12:37:21 +00001846 if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1847 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001848 return true;
1849 }
1850 return false;
1851}
1852
1853AMDGPUAsmParser::OperandMatchResultTy
1854AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1855
1856 // The value returned by this function may change after parsing
1857 // an operand so store the original value here.
1858 bool HasModifiers = operandsHaveModifiers(Operands);
1859
1860 bool IsVOP3 = isVOP3(Operands);
1861 if (HasModifiers || IsVOP3 ||
1862 getLexer().isNot(AsmToken::EndOfStatement) ||
1863 getForcedEncodingSize() == 64) {
1864
1865 AMDGPUAsmParser::OperandMatchResultTy Res =
1866 parseOptionalOps(VOP3OptionalOps, Operands);
1867
1868 if (!HasModifiers && Res == MatchOperand_Success) {
1869 // We have added a modifier operation, so we need to make sure all
1870 // previous register operands have modifiers
1871 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1872 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001873 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001874 Op.setModifiers(0);
1875 }
1876 }
1877 return Res;
1878 }
1879 return MatchOperand_NoMatch;
1880}
1881
Tom Stellarda90b9522016-02-11 03:28:15 +00001882void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1883 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001884 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001885 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001886 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1887 }
1888 for (unsigned E = Operands.size(); I != E; ++I)
1889 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1890}
1891
1892void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001893 if (operandsHaveModifiers(Operands) || isForcedVOP3()) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001894 cvtVOP3(Inst, Operands);
1895 } else {
1896 cvtId(Inst, Operands);
1897 }
1898}
1899
1900void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1901 if (operandsHaveModifiers(Operands)) {
1902 cvtVOP3(Inst, Operands);
1903 } else {
1904 cvtId(Inst, Operands);
1905 }
1906}
1907
1908void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1909 cvtVOP3(Inst, Operands);
1910}
1911
1912void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1913 unsigned I = 1;
1914 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001915 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001916 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001917 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001918
Tom Stellarda90b9522016-02-11 03:28:15 +00001919 unsigned ClampIdx = 0, OModIdx = 0;
1920 for (unsigned E = Operands.size(); I != E; ++I) {
1921 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001922 if (Op.isRegOrImmWithInputMods()) {
1923 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Tom Stellarda90b9522016-02-11 03:28:15 +00001924 } else if (Op.isClamp()) {
1925 ClampIdx = I;
1926 } else if (Op.isOMod()) {
1927 OModIdx = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001928 } else {
1929 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001930 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001931 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001932
Tom Stellarda90b9522016-02-11 03:28:15 +00001933 if (ClampIdx) {
1934 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[ClampIdx]);
1935 Op.addImmOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001936 } else {
Tom Stellarda90b9522016-02-11 03:28:15 +00001937 Inst.addOperand(MCOperand::createImm(0));
1938 }
1939 if (OModIdx) {
1940 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[OModIdx]);
1941 Op.addImmOperands(Inst, 1);
1942 } else {
1943 Inst.addOperand(MCOperand::createImm(0));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001944 }
1945}
1946
1947/// Force static initialization.
1948extern "C" void LLVMInitializeAMDGPUAsmParser() {
1949 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1950 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1951}
1952
1953#define GET_REGISTER_MATCHER
1954#define GET_MATCHER_IMPLEMENTATION
1955#include "AMDGPUGenAsmMatcher.inc"