blob: b360f2de0cb388edc4d2c79a34f491e37c0adcae [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000011#include "MCTargetDesc/AMDGPUTargetStreamer.h"
12#include "Utils/AMDGPUBaseInfo.h"
Tom Stellardff7416b2015-06-26 21:58:31 +000013#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/SmallString.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000031#include "llvm/MC/MCSymbolELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCTargetAsmParser.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/Support/SourceMgr.h"
35#include "llvm/Support/TargetRegistry.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Support/Debug.h"
38
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
70 ImmTyOMod
71 };
72
73 struct TokOp {
74 const char *Data;
75 unsigned Length;
76 };
77
78 struct ImmOp {
79 bool IsFPImm;
80 ImmTy Type;
81 int64_t Val;
82 };
83
84 struct RegOp {
85 unsigned RegNo;
86 int Modifiers;
87 const MCRegisterInfo *TRI;
88 bool IsForcedVOP3;
89 };
90
91 union {
92 TokOp Tok;
93 ImmOp Imm;
94 RegOp Reg;
95 const MCExpr *Expr;
96 };
97
98 void addImmOperands(MCInst &Inst, unsigned N) const {
99 Inst.addOperand(MCOperand::createImm(getImm()));
100 }
101
102 StringRef getToken() const {
103 return StringRef(Tok.Data, Tok.Length);
104 }
105
106 void addRegOperands(MCInst &Inst, unsigned N) const {
107 Inst.addOperand(MCOperand::createReg(getReg()));
108 }
109
110 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
111 if (isReg())
112 addRegOperands(Inst, N);
113 else
114 addImmOperands(Inst, N);
115 }
116
117 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
118 Inst.addOperand(MCOperand::createImm(
119 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
120 addRegOperands(Inst, N);
121 }
122
123 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
124 if (isImm())
125 addImmOperands(Inst, N);
126 else {
127 assert(isExpr());
128 Inst.addOperand(MCOperand::createExpr(Expr));
129 }
130 }
131
132 bool defaultTokenHasSuffix() const {
133 StringRef Token(Tok.Data, Tok.Length);
134
135 return Token.endswith("_e32") || Token.endswith("_e64");
136 }
137
138 bool isToken() const override {
139 return Kind == Token;
140 }
141
142 bool isImm() const override {
143 return Kind == Immediate;
144 }
145
146 bool isInlineImm() const {
147 float F = BitsToFloat(Imm.Val);
148 // TODO: Add 0.5pi for VI
149 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
150 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
151 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
152 }
153
154 bool isDSOffset0() const {
155 assert(isImm());
156 return Imm.Type == ImmTyDSOffset0;
157 }
158
159 bool isDSOffset1() const {
160 assert(isImm());
161 return Imm.Type == ImmTyDSOffset1;
162 }
163
164 int64_t getImm() const {
165 return Imm.Val;
166 }
167
168 enum ImmTy getImmTy() const {
169 assert(isImm());
170 return Imm.Type;
171 }
172
173 bool isRegKind() const {
174 return Kind == Register;
175 }
176
177 bool isReg() const override {
178 return Kind == Register && Reg.Modifiers == -1;
179 }
180
181 bool isRegWithInputMods() const {
182 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
183 }
184
185 void setModifiers(unsigned Mods) {
186 assert(isReg());
187 Reg.Modifiers = Mods;
188 }
189
190 bool hasModifiers() const {
191 assert(isRegKind());
192 return Reg.Modifiers != -1;
193 }
194
195 unsigned getReg() const override {
196 return Reg.RegNo;
197 }
198
199 bool isRegOrImm() const {
200 return isReg() || isImm();
201 }
202
203 bool isRegClass(unsigned RCID) const {
204 return Reg.TRI->getRegClass(RCID).contains(getReg());
205 }
206
207 bool isSCSrc32() const {
208 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
209 }
210
211 bool isSSrc32() const {
212 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
213 }
214
215 bool isSSrc64() const {
216 return isImm() || isInlineImm() ||
217 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
218 }
219
Matt Arsenault86d336e2015-09-08 21:15:00 +0000220 bool isSCSrc64() const {
221 return (isReg() && isRegClass(AMDGPU::SReg_64RegClassID)) || isInlineImm();
222 }
223
Tom Stellard45bb48e2015-06-13 03:28:10 +0000224 bool isVCSrc32() const {
225 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
226 }
227
228 bool isVCSrc64() const {
229 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
230 }
231
232 bool isVSrc32() const {
233 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
234 }
235
236 bool isVSrc64() const {
237 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
238 }
239
240 bool isMem() const override {
241 return false;
242 }
243
244 bool isExpr() const {
245 return Kind == Expression;
246 }
247
248 bool isSoppBrTarget() const {
249 return isExpr() || isImm();
250 }
251
252 SMLoc getStartLoc() const override {
253 return StartLoc;
254 }
255
256 SMLoc getEndLoc() const override {
257 return EndLoc;
258 }
259
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000260 void print(raw_ostream &OS) const override {
261 switch (Kind) {
262 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000263 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000264 break;
265 case Immediate:
266 OS << getImm();
267 break;
268 case Token:
269 OS << '\'' << getToken() << '\'';
270 break;
271 case Expression:
272 OS << "<expr " << *Expr << '>';
273 break;
274 }
275 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000276
277 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
278 enum ImmTy Type = ImmTyNone,
279 bool IsFPImm = false) {
280 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
281 Op->Imm.Val = Val;
282 Op->Imm.IsFPImm = IsFPImm;
283 Op->Imm.Type = Type;
284 Op->StartLoc = Loc;
285 Op->EndLoc = Loc;
286 return Op;
287 }
288
289 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
290 bool HasExplicitEncodingSize = true) {
291 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
292 Res->Tok.Data = Str.data();
293 Res->Tok.Length = Str.size();
294 Res->StartLoc = Loc;
295 Res->EndLoc = Loc;
296 return Res;
297 }
298
299 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
300 SMLoc E,
301 const MCRegisterInfo *TRI,
302 bool ForceVOP3) {
303 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
304 Op->Reg.RegNo = RegNo;
305 Op->Reg.TRI = TRI;
306 Op->Reg.Modifiers = -1;
307 Op->Reg.IsForcedVOP3 = ForceVOP3;
308 Op->StartLoc = S;
309 Op->EndLoc = E;
310 return Op;
311 }
312
313 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
314 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
315 Op->Expr = Expr;
316 Op->StartLoc = S;
317 Op->EndLoc = S;
318 return Op;
319 }
320
321 bool isDSOffset() const;
322 bool isDSOffset01() const;
323 bool isSWaitCnt() const;
324 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000325 bool isSMRDOffset() const;
326 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327};
328
329class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000330 const MCInstrInfo &MII;
331 MCAsmParser &Parser;
332
333 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000334
Matt Arsenault3b159672015-12-01 20:31:08 +0000335 bool isSI() const {
336 return STI->getFeatureBits()[AMDGPU::FeatureSouthernIslands];
337 }
338
339 bool isCI() const {
340 return STI->getFeatureBits()[AMDGPU::FeatureSeaIslands];
341 }
342
Matt Arsenault68802d32015-11-05 03:11:27 +0000343 bool isVI() const {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000344 return getSTI().getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
Matt Arsenault68802d32015-11-05 03:11:27 +0000345 }
346
347 bool hasSGPR102_SGPR103() const {
348 return !isVI();
349 }
350
Tom Stellard45bb48e2015-06-13 03:28:10 +0000351 /// @name Auto-generated Match Functions
352 /// {
353
354#define GET_ASSEMBLER_HEADER
355#include "AMDGPUGenAsmMatcher.inc"
356
357 /// }
358
Tom Stellard347ac792015-06-26 21:15:07 +0000359private:
360 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
361 bool ParseDirectiveHSACodeObjectVersion();
362 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000363 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
364 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000365 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000366 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000367 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000368 bool ParseDirectiveAMDGPUHsaModuleGlobal();
369 bool ParseDirectiveAMDGPUHsaProgramGlobal();
370 bool ParseSectionDirectiveHSADataGlobalAgent();
371 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard347ac792015-06-26 21:15:07 +0000372
Tom Stellard45bb48e2015-06-13 03:28:10 +0000373public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000374public:
375 enum AMDGPUMatchResultTy {
376 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
377 };
378
Akira Hatanakab11ef082015-11-14 06:35:56 +0000379 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380 const MCInstrInfo &MII,
381 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000382 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000383 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000384 MCAsmParserExtension::Initialize(Parser);
385
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000386 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000387 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000388 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000389 }
390
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000391 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000392 }
393
Tom Stellard347ac792015-06-26 21:15:07 +0000394 AMDGPUTargetStreamer &getTargetStreamer() {
395 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
396 return static_cast<AMDGPUTargetStreamer &>(TS);
397 }
398
Tom Stellard45bb48e2015-06-13 03:28:10 +0000399 unsigned getForcedEncodingSize() const {
400 return ForcedEncodingSize;
401 }
402
403 void setForcedEncodingSize(unsigned Size) {
404 ForcedEncodingSize = Size;
405 }
406
407 bool isForcedVOP3() const {
408 return ForcedEncodingSize == 64;
409 }
410
411 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
412 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
413 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
414 OperandVector &Operands, MCStreamer &Out,
415 uint64_t &ErrorInfo,
416 bool MatchingInlineAsm) override;
417 bool ParseDirective(AsmToken DirectiveID) override;
418 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
419 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
420 SMLoc NameLoc, OperandVector &Operands) override;
421
422 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
423 int64_t Default = 0);
424 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
425 OperandVector &Operands,
426 enum AMDGPUOperand::ImmTy ImmTy =
427 AMDGPUOperand::ImmTyNone);
428 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
429 enum AMDGPUOperand::ImmTy ImmTy =
430 AMDGPUOperand::ImmTyNone);
431 OperandMatchResultTy parseOptionalOps(
432 const ArrayRef<OptionalOperand> &OptionalOps,
433 OperandVector &Operands);
434
435
436 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
437 void cvtDS(MCInst &Inst, const OperandVector &Operands);
438 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
439 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
440 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
441
442 bool parseCnt(int64_t &IntVal);
443 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
444 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
445
446 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
447 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
448 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
449
450 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
451 OperandMatchResultTy parseOffset(OperandVector &Operands);
452 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
453 OperandMatchResultTy parseGLC(OperandVector &Operands);
454 OperandMatchResultTy parseSLC(OperandVector &Operands);
455 OperandMatchResultTy parseTFE(OperandVector &Operands);
456
457 OperandMatchResultTy parseDMask(OperandVector &Operands);
458 OperandMatchResultTy parseUNorm(OperandVector &Operands);
459 OperandMatchResultTy parseR128(OperandVector &Operands);
460
461 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
462 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
463};
464
465struct OptionalOperand {
466 const char *Name;
467 AMDGPUOperand::ImmTy Type;
468 bool IsBit;
469 int64_t Default;
470 bool (*ConvertResult)(int64_t&);
471};
472
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000473}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000474
Matt Arsenault967c2f52015-11-03 22:50:32 +0000475static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000476 if (IsVgpr) {
477 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000478 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000479 case 1: return AMDGPU::VGPR_32RegClassID;
480 case 2: return AMDGPU::VReg_64RegClassID;
481 case 3: return AMDGPU::VReg_96RegClassID;
482 case 4: return AMDGPU::VReg_128RegClassID;
483 case 8: return AMDGPU::VReg_256RegClassID;
484 case 16: return AMDGPU::VReg_512RegClassID;
485 }
486 }
487
488 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000489 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000490 case 1: return AMDGPU::SGPR_32RegClassID;
491 case 2: return AMDGPU::SGPR_64RegClassID;
492 case 4: return AMDGPU::SReg_128RegClassID;
493 case 8: return AMDGPU::SReg_256RegClassID;
494 case 16: return AMDGPU::SReg_512RegClassID;
495 }
496}
497
Craig Topper4e9b03d62015-09-21 00:18:00 +0000498static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000499
500 return StringSwitch<unsigned>(RegName)
501 .Case("exec", AMDGPU::EXEC)
502 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000503 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000504 .Case("m0", AMDGPU::M0)
505 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000506 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
507 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000508 .Case("vcc_lo", AMDGPU::VCC_LO)
509 .Case("vcc_hi", AMDGPU::VCC_HI)
510 .Case("exec_lo", AMDGPU::EXEC_LO)
511 .Case("exec_hi", AMDGPU::EXEC_HI)
512 .Default(0);
513}
514
515bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
516 const AsmToken Tok = Parser.getTok();
517 StartLoc = Tok.getLoc();
518 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000519 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
520
Matt Arsenault57116cc2015-09-10 21:51:15 +0000521 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000522 RegNo = getRegForName(RegName);
523
524 if (RegNo) {
525 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000526 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000527 }
528
529 // Match vgprs and sgprs
530 if (RegName[0] != 's' && RegName[0] != 'v')
531 return true;
532
533 bool IsVgpr = RegName[0] == 'v';
534 unsigned RegWidth;
535 unsigned RegIndexInClass;
536 if (RegName.size() > 1) {
537 // We have a 32-bit register
538 RegWidth = 1;
539 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
540 return true;
541 Parser.Lex();
542 } else {
543 // We have a register greater than 32-bits.
544
545 int64_t RegLo, RegHi;
546 Parser.Lex();
547 if (getLexer().isNot(AsmToken::LBrac))
548 return true;
549
550 Parser.Lex();
551 if (getParser().parseAbsoluteExpression(RegLo))
552 return true;
553
554 if (getLexer().isNot(AsmToken::Colon))
555 return true;
556
557 Parser.Lex();
558 if (getParser().parseAbsoluteExpression(RegHi))
559 return true;
560
561 if (getLexer().isNot(AsmToken::RBrac))
562 return true;
563
564 Parser.Lex();
565 RegWidth = (RegHi - RegLo) + 1;
566 if (IsVgpr) {
567 // VGPR registers aren't aligned.
568 RegIndexInClass = RegLo;
569 } else {
570 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000571 unsigned Size = std::min(RegWidth, 4u);
572 if (RegLo % Size != 0)
573 return true;
574
575 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576 }
577 }
578
Matt Arsenault967c2f52015-11-03 22:50:32 +0000579 int RCID = getRegClass(IsVgpr, RegWidth);
580 if (RCID == -1)
581 return true;
582
583 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000584 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000585 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000586
587 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000588 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589}
590
591unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
592
593 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
594
595 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
596 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
597 return Match_InvalidOperand;
598
Tom Stellard88e0b252015-10-06 15:57:53 +0000599 if ((TSFlags & SIInstrFlags::VOP3) &&
600 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
601 getForcedEncodingSize() != 64)
602 return Match_PreferE32;
603
Tom Stellard45bb48e2015-06-13 03:28:10 +0000604 return Match_Success;
605}
606
607
608bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
609 OperandVector &Operands,
610 MCStreamer &Out,
611 uint64_t &ErrorInfo,
612 bool MatchingInlineAsm) {
613 MCInst Inst;
614
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000615 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000616 default: break;
617 case Match_Success:
618 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000619 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000620 return false;
621 case Match_MissingFeature:
622 return Error(IDLoc, "instruction not supported on this GPU");
623
624 case Match_MnemonicFail:
625 return Error(IDLoc, "unrecognized instruction mnemonic");
626
627 case Match_InvalidOperand: {
628 SMLoc ErrorLoc = IDLoc;
629 if (ErrorInfo != ~0ULL) {
630 if (ErrorInfo >= Operands.size()) {
631 if (isForcedVOP3()) {
632 // If 64-bit encoding has been forced we can end up with no
633 // clamp or omod operands if none of the registers have modifiers,
634 // so we need to add these to the operand list.
635 AMDGPUOperand &LastOp =
636 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
637 if (LastOp.isRegKind() ||
638 (LastOp.isImm() &&
639 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
640 SMLoc S = Parser.getTok().getLoc();
641 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
642 AMDGPUOperand::ImmTyClamp));
643 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
644 AMDGPUOperand::ImmTyOMod));
645 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
646 Out, ErrorInfo,
647 MatchingInlineAsm);
648 if (!Res)
649 return Res;
650 }
651
652 }
653 return Error(IDLoc, "too few operands for instruction");
654 }
655
656 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
657 if (ErrorLoc == SMLoc())
658 ErrorLoc = IDLoc;
659 }
660 return Error(ErrorLoc, "invalid operand for instruction");
661 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000662 case Match_PreferE32:
663 return Error(IDLoc, "internal error: instruction without _e64 suffix "
664 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000665 }
666 llvm_unreachable("Implement any new match types added!");
667}
668
Tom Stellard347ac792015-06-26 21:15:07 +0000669bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
670 uint32_t &Minor) {
671 if (getLexer().isNot(AsmToken::Integer))
672 return TokError("invalid major version");
673
674 Major = getLexer().getTok().getIntVal();
675 Lex();
676
677 if (getLexer().isNot(AsmToken::Comma))
678 return TokError("minor version number required, comma expected");
679 Lex();
680
681 if (getLexer().isNot(AsmToken::Integer))
682 return TokError("invalid minor version");
683
684 Minor = getLexer().getTok().getIntVal();
685 Lex();
686
687 return false;
688}
689
690bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
691
692 uint32_t Major;
693 uint32_t Minor;
694
695 if (ParseDirectiveMajorMinor(Major, Minor))
696 return true;
697
698 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
699 return false;
700}
701
702bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
703
704 uint32_t Major;
705 uint32_t Minor;
706 uint32_t Stepping;
707 StringRef VendorName;
708 StringRef ArchName;
709
710 // If this directive has no arguments, then use the ISA version for the
711 // targeted GPU.
712 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000713 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000714 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
715 Isa.Stepping,
716 "AMD", "AMDGPU");
717 return false;
718 }
719
720
721 if (ParseDirectiveMajorMinor(Major, Minor))
722 return true;
723
724 if (getLexer().isNot(AsmToken::Comma))
725 return TokError("stepping version number required, comma expected");
726 Lex();
727
728 if (getLexer().isNot(AsmToken::Integer))
729 return TokError("invalid stepping version");
730
731 Stepping = getLexer().getTok().getIntVal();
732 Lex();
733
734 if (getLexer().isNot(AsmToken::Comma))
735 return TokError("vendor name required, comma expected");
736 Lex();
737
738 if (getLexer().isNot(AsmToken::String))
739 return TokError("invalid vendor name");
740
741 VendorName = getLexer().getTok().getStringContents();
742 Lex();
743
744 if (getLexer().isNot(AsmToken::Comma))
745 return TokError("arch name required, comma expected");
746 Lex();
747
748 if (getLexer().isNot(AsmToken::String))
749 return TokError("invalid arch name");
750
751 ArchName = getLexer().getTok().getStringContents();
752 Lex();
753
754 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
755 VendorName, ArchName);
756 return false;
757}
758
Tom Stellardff7416b2015-06-26 21:58:31 +0000759bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
760 amd_kernel_code_t &Header) {
761
762 if (getLexer().isNot(AsmToken::Equal))
763 return TokError("expected '='");
764 Lex();
765
766 if (getLexer().isNot(AsmToken::Integer))
767 return TokError("amd_kernel_code_t values must be integers");
768
769 uint64_t Value = getLexer().getTok().getIntVal();
770 Lex();
771
772 if (ID == "kernel_code_version_major")
773 Header.amd_kernel_code_version_major = Value;
774 else if (ID == "kernel_code_version_minor")
775 Header.amd_kernel_code_version_minor = Value;
776 else if (ID == "machine_kind")
777 Header.amd_machine_kind = Value;
778 else if (ID == "machine_version_major")
779 Header.amd_machine_version_major = Value;
780 else if (ID == "machine_version_minor")
781 Header.amd_machine_version_minor = Value;
782 else if (ID == "machine_version_stepping")
783 Header.amd_machine_version_stepping = Value;
784 else if (ID == "kernel_code_entry_byte_offset")
785 Header.kernel_code_entry_byte_offset = Value;
786 else if (ID == "kernel_code_prefetch_byte_size")
787 Header.kernel_code_prefetch_byte_size = Value;
788 else if (ID == "max_scratch_backing_memory_byte_size")
789 Header.max_scratch_backing_memory_byte_size = Value;
790 else if (ID == "compute_pgm_rsrc1_vgprs")
791 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
792 else if (ID == "compute_pgm_rsrc1_sgprs")
793 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
794 else if (ID == "compute_pgm_rsrc1_priority")
795 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
796 else if (ID == "compute_pgm_rsrc1_float_mode")
797 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
798 else if (ID == "compute_pgm_rsrc1_priv")
799 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
800 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
801 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
802 else if (ID == "compute_pgm_rsrc1_debug_mode")
803 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
804 else if (ID == "compute_pgm_rsrc1_ieee_mode")
805 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
806 else if (ID == "compute_pgm_rsrc2_scratch_en")
807 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
808 else if (ID == "compute_pgm_rsrc2_user_sgpr")
809 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
810 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
811 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
812 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
813 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
814 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
815 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
816 else if (ID == "compute_pgm_rsrc2_tg_size_en")
817 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
818 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
819 Header.compute_pgm_resource_registers |=
820 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
821 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
822 Header.compute_pgm_resource_registers |=
823 (S_00B84C_EXCP_EN_MSB(Value) << 32);
824 else if (ID == "compute_pgm_rsrc2_lds_size")
825 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
826 else if (ID == "compute_pgm_rsrc2_excp_en")
827 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
828 else if (ID == "compute_pgm_resource_registers")
829 Header.compute_pgm_resource_registers = Value;
830 else if (ID == "enable_sgpr_private_segment_buffer")
831 Header.code_properties |=
832 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
833 else if (ID == "enable_sgpr_dispatch_ptr")
834 Header.code_properties |=
835 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
836 else if (ID == "enable_sgpr_queue_ptr")
837 Header.code_properties |=
838 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
839 else if (ID == "enable_sgpr_kernarg_segment_ptr")
840 Header.code_properties |=
841 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
842 else if (ID == "enable_sgpr_dispatch_id")
843 Header.code_properties |=
844 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
845 else if (ID == "enable_sgpr_flat_scratch_init")
846 Header.code_properties |=
847 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
848 else if (ID == "enable_sgpr_private_segment_size")
849 Header.code_properties |=
850 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
851 else if (ID == "enable_sgpr_grid_workgroup_count_x")
852 Header.code_properties |=
853 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
854 else if (ID == "enable_sgpr_grid_workgroup_count_y")
855 Header.code_properties |=
856 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
857 else if (ID == "enable_sgpr_grid_workgroup_count_z")
858 Header.code_properties |=
859 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
860 else if (ID == "enable_ordered_append_gds")
861 Header.code_properties |=
862 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
863 else if (ID == "private_element_size")
864 Header.code_properties |=
865 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
866 else if (ID == "is_ptr64")
867 Header.code_properties |=
868 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
869 else if (ID == "is_dynamic_callstack")
870 Header.code_properties |=
871 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
872 else if (ID == "is_debug_enabled")
873 Header.code_properties |=
874 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
875 else if (ID == "is_xnack_enabled")
876 Header.code_properties |=
877 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
878 else if (ID == "workitem_private_segment_byte_size")
879 Header.workitem_private_segment_byte_size = Value;
880 else if (ID == "workgroup_group_segment_byte_size")
881 Header.workgroup_group_segment_byte_size = Value;
882 else if (ID == "gds_segment_byte_size")
883 Header.gds_segment_byte_size = Value;
884 else if (ID == "kernarg_segment_byte_size")
885 Header.kernarg_segment_byte_size = Value;
886 else if (ID == "workgroup_fbarrier_count")
887 Header.workgroup_fbarrier_count = Value;
888 else if (ID == "wavefront_sgpr_count")
889 Header.wavefront_sgpr_count = Value;
890 else if (ID == "workitem_vgpr_count")
891 Header.workitem_vgpr_count = Value;
892 else if (ID == "reserved_vgpr_first")
893 Header.reserved_vgpr_first = Value;
894 else if (ID == "reserved_vgpr_count")
895 Header.reserved_vgpr_count = Value;
896 else if (ID == "reserved_sgpr_first")
897 Header.reserved_sgpr_first = Value;
898 else if (ID == "reserved_sgpr_count")
899 Header.reserved_sgpr_count = Value;
900 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
901 Header.debug_wavefront_private_segment_offset_sgpr = Value;
902 else if (ID == "debug_private_segment_buffer_sgpr")
903 Header.debug_private_segment_buffer_sgpr = Value;
904 else if (ID == "kernarg_segment_alignment")
905 Header.kernarg_segment_alignment = Value;
906 else if (ID == "group_segment_alignment")
907 Header.group_segment_alignment = Value;
908 else if (ID == "private_segment_alignment")
909 Header.private_segment_alignment = Value;
910 else if (ID == "wavefront_size")
911 Header.wavefront_size = Value;
912 else if (ID == "call_convention")
913 Header.call_convention = Value;
914 else if (ID == "runtime_loader_kernel_symbol")
915 Header.runtime_loader_kernel_symbol = Value;
916 else
917 return TokError("amd_kernel_code_t value not recognized.");
918
919 return false;
920}
921
922bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
923
924 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000925 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000926
927 while (true) {
928
929 if (getLexer().isNot(AsmToken::EndOfStatement))
930 return TokError("amd_kernel_code_t values must begin on a new line");
931
932 // Lex EndOfStatement. This is in a while loop, because lexing a comment
933 // will set the current token to EndOfStatement.
934 while(getLexer().is(AsmToken::EndOfStatement))
935 Lex();
936
937 if (getLexer().isNot(AsmToken::Identifier))
938 return TokError("expected value identifier or .end_amd_kernel_code_t");
939
940 StringRef ID = getLexer().getTok().getIdentifier();
941 Lex();
942
943 if (ID == ".end_amd_kernel_code_t")
944 break;
945
946 if (ParseAMDKernelCodeTValue(ID, Header))
947 return true;
948 }
949
950 getTargetStreamer().EmitAMDKernelCodeT(Header);
951
952 return false;
953}
954
Tom Stellarde135ffd2015-09-25 21:41:28 +0000955bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
956 getParser().getStreamer().SwitchSection(
957 AMDGPU::getHSATextSection(getContext()));
958 return false;
959}
960
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000961bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
962 if (getLexer().isNot(AsmToken::Identifier))
963 return TokError("expected symbol name");
964
965 StringRef KernelName = Parser.getTok().getString();
966
967 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
968 ELF::STT_AMDGPU_HSA_KERNEL);
969 Lex();
970 return false;
971}
972
Tom Stellard00f2f912015-12-02 19:47:57 +0000973bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
974 if (getLexer().isNot(AsmToken::Identifier))
975 return TokError("expected symbol name");
976
977 StringRef GlobalName = Parser.getTok().getIdentifier();
978
979 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
980 Lex();
981 return false;
982}
983
984bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
985 if (getLexer().isNot(AsmToken::Identifier))
986 return TokError("expected symbol name");
987
988 StringRef GlobalName = Parser.getTok().getIdentifier();
989
990 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
991 Lex();
992 return false;
993}
994
995bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
996 getParser().getStreamer().SwitchSection(
997 AMDGPU::getHSADataGlobalAgentSection(getContext()));
998 return false;
999}
1000
1001bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1002 getParser().getStreamer().SwitchSection(
1003 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1004 return false;
1005}
1006
Tom Stellard45bb48e2015-06-13 03:28:10 +00001007bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001008 StringRef IDVal = DirectiveID.getString();
1009
1010 if (IDVal == ".hsa_code_object_version")
1011 return ParseDirectiveHSACodeObjectVersion();
1012
1013 if (IDVal == ".hsa_code_object_isa")
1014 return ParseDirectiveHSACodeObjectISA();
1015
Tom Stellardff7416b2015-06-26 21:58:31 +00001016 if (IDVal == ".amd_kernel_code_t")
1017 return ParseDirectiveAMDKernelCodeT();
1018
Tom Stellarde135ffd2015-09-25 21:41:28 +00001019 if (IDVal == ".hsatext" || IDVal == ".text")
1020 return ParseSectionDirectiveHSAText();
1021
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001022 if (IDVal == ".amdgpu_hsa_kernel")
1023 return ParseDirectiveAMDGPUHsaKernel();
1024
Tom Stellard00f2f912015-12-02 19:47:57 +00001025 if (IDVal == ".amdgpu_hsa_module_global")
1026 return ParseDirectiveAMDGPUHsaModuleGlobal();
1027
1028 if (IDVal == ".amdgpu_hsa_program_global")
1029 return ParseDirectiveAMDGPUHsaProgramGlobal();
1030
1031 if (IDVal == ".hsadata_global_agent")
1032 return ParseSectionDirectiveHSADataGlobalAgent();
1033
1034 if (IDVal == ".hsadata_global_program")
1035 return ParseSectionDirectiveHSADataGlobalProgram();
1036
Tom Stellard45bb48e2015-06-13 03:28:10 +00001037 return true;
1038}
1039
Matt Arsenault68802d32015-11-05 03:11:27 +00001040bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1041 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001042 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001043 return true;
1044
Matt Arsenault3b159672015-12-01 20:31:08 +00001045 if (isSI()) {
1046 // No flat_scr
1047 switch (RegNo) {
1048 case AMDGPU::FLAT_SCR:
1049 case AMDGPU::FLAT_SCR_LO:
1050 case AMDGPU::FLAT_SCR_HI:
1051 return false;
1052 default:
1053 return true;
1054 }
1055 }
1056
Matt Arsenault68802d32015-11-05 03:11:27 +00001057 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1058 // SI/CI have.
1059 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1060 R.isValid(); ++R) {
1061 if (*R == RegNo)
1062 return false;
1063 }
1064
1065 return true;
1066}
1067
Tom Stellard45bb48e2015-06-13 03:28:10 +00001068static bool operandsHaveModifiers(const OperandVector &Operands) {
1069
1070 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1071 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1072 if (Op.isRegKind() && Op.hasModifiers())
1073 return true;
1074 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1075 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1076 return true;
1077 }
1078 return false;
1079}
1080
1081AMDGPUAsmParser::OperandMatchResultTy
1082AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1083
1084 // Try to parse with a custom parser
1085 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1086
1087 // If we successfully parsed the operand or if there as an error parsing,
1088 // we are done.
1089 //
1090 // If we are parsing after we reach EndOfStatement then this means we
1091 // are appending default values to the Operands list. This is only done
1092 // by custom parser, so we shouldn't continue on to the generic parsing.
1093 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
1094 getLexer().is(AsmToken::EndOfStatement))
1095 return ResTy;
1096
1097 bool Negate = false, Abs = false;
1098 if (getLexer().getKind()== AsmToken::Minus) {
1099 Parser.Lex();
1100 Negate = true;
1101 }
1102
1103 if (getLexer().getKind() == AsmToken::Pipe) {
1104 Parser.Lex();
1105 Abs = true;
1106 }
1107
1108 switch(getLexer().getKind()) {
1109 case AsmToken::Integer: {
1110 SMLoc S = Parser.getTok().getLoc();
1111 int64_t IntVal;
1112 if (getParser().parseAbsoluteExpression(IntVal))
1113 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001114 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001115 Error(S, "invalid immediate: only 32-bit values are legal");
1116 return MatchOperand_ParseFail;
1117 }
1118
Tom Stellard45bb48e2015-06-13 03:28:10 +00001119 if (Negate)
1120 IntVal *= -1;
1121 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1122 return MatchOperand_Success;
1123 }
1124 case AsmToken::Real: {
1125 // FIXME: We should emit an error if a double precisions floating-point
1126 // value is used. I'm not sure the best way to detect this.
1127 SMLoc S = Parser.getTok().getLoc();
1128 int64_t IntVal;
1129 if (getParser().parseAbsoluteExpression(IntVal))
1130 return MatchOperand_ParseFail;
1131
1132 APFloat F((float)BitsToDouble(IntVal));
1133 if (Negate)
1134 F.changeSign();
1135 Operands.push_back(
1136 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1137 return MatchOperand_Success;
1138 }
1139 case AsmToken::Identifier: {
1140 SMLoc S, E;
1141 unsigned RegNo;
1142 if (!ParseRegister(RegNo, S, E)) {
1143
1144 bool HasModifiers = operandsHaveModifiers(Operands);
1145 unsigned Modifiers = 0;
1146
1147 if (Negate)
1148 Modifiers |= 0x1;
1149
1150 if (Abs) {
1151 if (getLexer().getKind() != AsmToken::Pipe)
1152 return MatchOperand_ParseFail;
1153 Parser.Lex();
1154 Modifiers |= 0x2;
1155 }
1156
1157 if (Modifiers && !HasModifiers) {
1158 // We are adding a modifier to src1 or src2 and previous sources
1159 // don't have modifiers, so we need to go back and empty modifers
1160 // for each previous source.
1161 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
1162 --PrevRegIdx) {
1163
1164 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
1165 RegOp.setModifiers(0);
1166 }
1167 }
1168
1169
1170 Operands.push_back(AMDGPUOperand::CreateReg(
1171 RegNo, S, E, getContext().getRegisterInfo(),
1172 isForcedVOP3()));
1173
1174 if (HasModifiers || Modifiers) {
1175 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1176 RegOp.setModifiers(Modifiers);
1177
1178 }
1179 } else {
1180 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1181 S));
1182 Parser.Lex();
1183 }
1184 return MatchOperand_Success;
1185 }
1186 default:
1187 return MatchOperand_NoMatch;
1188 }
1189}
1190
1191bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1192 StringRef Name,
1193 SMLoc NameLoc, OperandVector &Operands) {
1194
1195 // Clear any forced encodings from the previous instruction.
1196 setForcedEncodingSize(0);
1197
1198 if (Name.endswith("_e64"))
1199 setForcedEncodingSize(64);
1200 else if (Name.endswith("_e32"))
1201 setForcedEncodingSize(32);
1202
1203 // Add the instruction mnemonic
1204 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1205
1206 while (!getLexer().is(AsmToken::EndOfStatement)) {
1207 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1208
1209 // Eat the comma or space if there is one.
1210 if (getLexer().is(AsmToken::Comma))
1211 Parser.Lex();
1212
1213 switch (Res) {
1214 case MatchOperand_Success: break;
1215 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1216 "failed parsing operand.");
1217 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1218 "not a valid operand.");
1219 }
1220 }
1221
1222 // Once we reach end of statement, continue parsing so we can add default
1223 // values for optional arguments.
1224 AMDGPUAsmParser::OperandMatchResultTy Res;
1225 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
1226 if (Res != MatchOperand_Success)
1227 return Error(getLexer().getLoc(), "failed parsing operand.");
1228 }
1229 return false;
1230}
1231
1232//===----------------------------------------------------------------------===//
1233// Utility functions
1234//===----------------------------------------------------------------------===//
1235
1236AMDGPUAsmParser::OperandMatchResultTy
1237AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1238 int64_t Default) {
1239
1240 // We are at the end of the statement, and this is a default argument, so
1241 // use a default value.
1242 if (getLexer().is(AsmToken::EndOfStatement)) {
1243 Int = Default;
1244 return MatchOperand_Success;
1245 }
1246
1247 switch(getLexer().getKind()) {
1248 default: return MatchOperand_NoMatch;
1249 case AsmToken::Identifier: {
1250 StringRef OffsetName = Parser.getTok().getString();
1251 if (!OffsetName.equals(Prefix))
1252 return MatchOperand_NoMatch;
1253
1254 Parser.Lex();
1255 if (getLexer().isNot(AsmToken::Colon))
1256 return MatchOperand_ParseFail;
1257
1258 Parser.Lex();
1259 if (getLexer().isNot(AsmToken::Integer))
1260 return MatchOperand_ParseFail;
1261
1262 if (getParser().parseAbsoluteExpression(Int))
1263 return MatchOperand_ParseFail;
1264 break;
1265 }
1266 }
1267 return MatchOperand_Success;
1268}
1269
1270AMDGPUAsmParser::OperandMatchResultTy
1271AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1272 enum AMDGPUOperand::ImmTy ImmTy) {
1273
1274 SMLoc S = Parser.getTok().getLoc();
1275 int64_t Offset = 0;
1276
1277 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1278 if (Res != MatchOperand_Success)
1279 return Res;
1280
1281 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1282 return MatchOperand_Success;
1283}
1284
1285AMDGPUAsmParser::OperandMatchResultTy
1286AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1287 enum AMDGPUOperand::ImmTy ImmTy) {
1288 int64_t Bit = 0;
1289 SMLoc S = Parser.getTok().getLoc();
1290
1291 // We are at the end of the statement, and this is a default argument, so
1292 // use a default value.
1293 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1294 switch(getLexer().getKind()) {
1295 case AsmToken::Identifier: {
1296 StringRef Tok = Parser.getTok().getString();
1297 if (Tok == Name) {
1298 Bit = 1;
1299 Parser.Lex();
1300 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1301 Bit = 0;
1302 Parser.Lex();
1303 } else {
1304 return MatchOperand_NoMatch;
1305 }
1306 break;
1307 }
1308 default:
1309 return MatchOperand_NoMatch;
1310 }
1311 }
1312
1313 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1314 return MatchOperand_Success;
1315}
1316
1317static bool operandsHasOptionalOp(const OperandVector &Operands,
1318 const OptionalOperand &OOp) {
1319 for (unsigned i = 0; i < Operands.size(); i++) {
1320 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1321 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1322 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1323 return true;
1324
1325 }
1326 return false;
1327}
1328
1329AMDGPUAsmParser::OperandMatchResultTy
1330AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1331 OperandVector &Operands) {
1332 SMLoc S = Parser.getTok().getLoc();
1333 for (const OptionalOperand &Op : OptionalOps) {
1334 if (operandsHasOptionalOp(Operands, Op))
1335 continue;
1336 AMDGPUAsmParser::OperandMatchResultTy Res;
1337 int64_t Value;
1338 if (Op.IsBit) {
1339 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1340 if (Res == MatchOperand_NoMatch)
1341 continue;
1342 return Res;
1343 }
1344
1345 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1346
1347 if (Res == MatchOperand_NoMatch)
1348 continue;
1349
1350 if (Res != MatchOperand_Success)
1351 return Res;
1352
1353 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1354 return MatchOperand_ParseFail;
1355 }
1356
1357 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1358 return MatchOperand_Success;
1359 }
1360 return MatchOperand_NoMatch;
1361}
1362
1363//===----------------------------------------------------------------------===//
1364// ds
1365//===----------------------------------------------------------------------===//
1366
1367static const OptionalOperand DSOptionalOps [] = {
1368 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1369 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1370};
1371
1372static const OptionalOperand DSOptionalOpsOff01 [] = {
1373 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1374 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1375 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1376};
1377
1378AMDGPUAsmParser::OperandMatchResultTy
1379AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1380 return parseOptionalOps(DSOptionalOps, Operands);
1381}
1382AMDGPUAsmParser::OperandMatchResultTy
1383AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1384 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1385}
1386
1387AMDGPUAsmParser::OperandMatchResultTy
1388AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1389 SMLoc S = Parser.getTok().getLoc();
1390 AMDGPUAsmParser::OperandMatchResultTy Res =
1391 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1392 if (Res == MatchOperand_NoMatch) {
1393 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1394 AMDGPUOperand::ImmTyOffset));
1395 Res = MatchOperand_Success;
1396 }
1397 return Res;
1398}
1399
1400bool AMDGPUOperand::isDSOffset() const {
1401 return isImm() && isUInt<16>(getImm());
1402}
1403
1404bool AMDGPUOperand::isDSOffset01() const {
1405 return isImm() && isUInt<8>(getImm());
1406}
1407
1408void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1409 const OperandVector &Operands) {
1410
1411 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1412
1413 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1414 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1415
1416 // Add the register arguments
1417 if (Op.isReg()) {
1418 Op.addRegOperands(Inst, 1);
1419 continue;
1420 }
1421
1422 // Handle optional arguments
1423 OptionalIdx[Op.getImmTy()] = i;
1424 }
1425
1426 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1427 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1428 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1429
1430 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1431 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1432 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1433 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1434}
1435
1436void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1437
1438 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1439 bool GDSOnly = false;
1440
1441 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1442 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1443
1444 // Add the register arguments
1445 if (Op.isReg()) {
1446 Op.addRegOperands(Inst, 1);
1447 continue;
1448 }
1449
1450 if (Op.isToken() && Op.getToken() == "gds") {
1451 GDSOnly = true;
1452 continue;
1453 }
1454
1455 // Handle optional arguments
1456 OptionalIdx[Op.getImmTy()] = i;
1457 }
1458
1459 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1460 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
1461
1462 if (!GDSOnly) {
1463 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1464 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1465 }
1466 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1467}
1468
1469
1470//===----------------------------------------------------------------------===//
1471// s_waitcnt
1472//===----------------------------------------------------------------------===//
1473
1474bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1475 StringRef CntName = Parser.getTok().getString();
1476 int64_t CntVal;
1477
1478 Parser.Lex();
1479 if (getLexer().isNot(AsmToken::LParen))
1480 return true;
1481
1482 Parser.Lex();
1483 if (getLexer().isNot(AsmToken::Integer))
1484 return true;
1485
1486 if (getParser().parseAbsoluteExpression(CntVal))
1487 return true;
1488
1489 if (getLexer().isNot(AsmToken::RParen))
1490 return true;
1491
1492 Parser.Lex();
1493 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1494 Parser.Lex();
1495
1496 int CntShift;
1497 int CntMask;
1498
1499 if (CntName == "vmcnt") {
1500 CntMask = 0xf;
1501 CntShift = 0;
1502 } else if (CntName == "expcnt") {
1503 CntMask = 0x7;
1504 CntShift = 4;
1505 } else if (CntName == "lgkmcnt") {
1506 CntMask = 0x7;
1507 CntShift = 8;
1508 } else {
1509 return true;
1510 }
1511
1512 IntVal &= ~(CntMask << CntShift);
1513 IntVal |= (CntVal << CntShift);
1514 return false;
1515}
1516
1517AMDGPUAsmParser::OperandMatchResultTy
1518AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1519 // Disable all counters by default.
1520 // vmcnt [3:0]
1521 // expcnt [6:4]
1522 // lgkmcnt [10:8]
1523 int64_t CntVal = 0x77f;
1524 SMLoc S = Parser.getTok().getLoc();
1525
1526 switch(getLexer().getKind()) {
1527 default: return MatchOperand_ParseFail;
1528 case AsmToken::Integer:
1529 // The operand can be an integer value.
1530 if (getParser().parseAbsoluteExpression(CntVal))
1531 return MatchOperand_ParseFail;
1532 break;
1533
1534 case AsmToken::Identifier:
1535 do {
1536 if (parseCnt(CntVal))
1537 return MatchOperand_ParseFail;
1538 } while(getLexer().isNot(AsmToken::EndOfStatement));
1539 break;
1540 }
1541 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1542 return MatchOperand_Success;
1543}
1544
1545bool AMDGPUOperand::isSWaitCnt() const {
1546 return isImm();
1547}
1548
1549//===----------------------------------------------------------------------===//
1550// sopp branch targets
1551//===----------------------------------------------------------------------===//
1552
1553AMDGPUAsmParser::OperandMatchResultTy
1554AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1555 SMLoc S = Parser.getTok().getLoc();
1556
1557 switch (getLexer().getKind()) {
1558 default: return MatchOperand_ParseFail;
1559 case AsmToken::Integer: {
1560 int64_t Imm;
1561 if (getParser().parseAbsoluteExpression(Imm))
1562 return MatchOperand_ParseFail;
1563 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1564 return MatchOperand_Success;
1565 }
1566
1567 case AsmToken::Identifier:
1568 Operands.push_back(AMDGPUOperand::CreateExpr(
1569 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1570 Parser.getTok().getString()), getContext()), S));
1571 Parser.Lex();
1572 return MatchOperand_Success;
1573 }
1574}
1575
1576//===----------------------------------------------------------------------===//
1577// flat
1578//===----------------------------------------------------------------------===//
1579
1580static const OptionalOperand FlatOptionalOps [] = {
1581 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1582 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1583 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1584};
1585
1586static const OptionalOperand FlatAtomicOptionalOps [] = {
1587 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1588 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1589};
1590
1591AMDGPUAsmParser::OperandMatchResultTy
1592AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1593 return parseOptionalOps(FlatOptionalOps, Operands);
1594}
1595
1596AMDGPUAsmParser::OperandMatchResultTy
1597AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1598 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1599}
1600
1601void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1602 const OperandVector &Operands) {
1603 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1604
1605 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1606 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1607
1608 // Add the register arguments
1609 if (Op.isReg()) {
1610 Op.addRegOperands(Inst, 1);
1611 continue;
1612 }
1613
1614 // Handle 'glc' token which is sometimes hard-coded into the
1615 // asm string. There are no MCInst operands for these.
1616 if (Op.isToken())
1617 continue;
1618
1619 // Handle optional arguments
1620 OptionalIdx[Op.getImmTy()] = i;
1621
1622 }
1623
1624 // flat atomic instructions don't have a glc argument.
1625 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1626 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1627 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1628 }
1629
1630 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1631 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1632
1633 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1634 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1635}
1636
1637//===----------------------------------------------------------------------===//
1638// mubuf
1639//===----------------------------------------------------------------------===//
1640
1641static const OptionalOperand MubufOptionalOps [] = {
1642 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1643 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1644 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1645 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1646};
1647
1648AMDGPUAsmParser::OperandMatchResultTy
1649AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1650 return parseOptionalOps(MubufOptionalOps, Operands);
1651}
1652
1653AMDGPUAsmParser::OperandMatchResultTy
1654AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1655 return parseIntWithPrefix("offset", Operands);
1656}
1657
1658AMDGPUAsmParser::OperandMatchResultTy
1659AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1660 return parseNamedBit("glc", Operands);
1661}
1662
1663AMDGPUAsmParser::OperandMatchResultTy
1664AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1665 return parseNamedBit("slc", Operands);
1666}
1667
1668AMDGPUAsmParser::OperandMatchResultTy
1669AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1670 return parseNamedBit("tfe", Operands);
1671}
1672
1673bool AMDGPUOperand::isMubufOffset() const {
1674 return isImm() && isUInt<12>(getImm());
1675}
1676
1677void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1678 const OperandVector &Operands) {
1679 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1680
1681 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1682 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1683
1684 // Add the register arguments
1685 if (Op.isReg()) {
1686 Op.addRegOperands(Inst, 1);
1687 continue;
1688 }
1689
1690 // Handle the case where soffset is an immediate
1691 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1692 Op.addImmOperands(Inst, 1);
1693 continue;
1694 }
1695
1696 // Handle tokens like 'offen' which are sometimes hard-coded into the
1697 // asm string. There are no MCInst operands for these.
1698 if (Op.isToken()) {
1699 continue;
1700 }
1701 assert(Op.isImm());
1702
1703 // Handle optional arguments
1704 OptionalIdx[Op.getImmTy()] = i;
1705 }
1706
1707 assert(OptionalIdx.size() == 4);
1708
1709 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1710 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1711 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1712 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1713
1714 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1715 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1716 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1717 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1718}
1719
1720//===----------------------------------------------------------------------===//
1721// mimg
1722//===----------------------------------------------------------------------===//
1723
1724AMDGPUAsmParser::OperandMatchResultTy
1725AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1726 return parseIntWithPrefix("dmask", Operands);
1727}
1728
1729AMDGPUAsmParser::OperandMatchResultTy
1730AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1731 return parseNamedBit("unorm", Operands);
1732}
1733
1734AMDGPUAsmParser::OperandMatchResultTy
1735AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1736 return parseNamedBit("r128", Operands);
1737}
1738
1739//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001740// smrd
1741//===----------------------------------------------------------------------===//
1742
1743bool AMDGPUOperand::isSMRDOffset() const {
1744
1745 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1746 // information here.
1747 return isImm() && isUInt<8>(getImm());
1748}
1749
1750bool AMDGPUOperand::isSMRDLiteralOffset() const {
1751 // 32-bit literals are only supported on CI and we only want to use them
1752 // when the offset is > 8-bits.
1753 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1754}
1755
1756//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001757// vop3
1758//===----------------------------------------------------------------------===//
1759
1760static bool ConvertOmodMul(int64_t &Mul) {
1761 if (Mul != 1 && Mul != 2 && Mul != 4)
1762 return false;
1763
1764 Mul >>= 1;
1765 return true;
1766}
1767
1768static bool ConvertOmodDiv(int64_t &Div) {
1769 if (Div == 1) {
1770 Div = 0;
1771 return true;
1772 }
1773
1774 if (Div == 2) {
1775 Div = 3;
1776 return true;
1777 }
1778
1779 return false;
1780}
1781
1782static const OptionalOperand VOP3OptionalOps [] = {
1783 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1784 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1785 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1786};
1787
1788static bool isVOP3(OperandVector &Operands) {
1789 if (operandsHaveModifiers(Operands))
1790 return true;
1791
1792 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1793
1794 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1795 return true;
1796
1797 if (Operands.size() >= 5)
1798 return true;
1799
1800 if (Operands.size() > 3) {
1801 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1802 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1803 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1804 return true;
1805 }
1806 return false;
1807}
1808
1809AMDGPUAsmParser::OperandMatchResultTy
1810AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1811
1812 // The value returned by this function may change after parsing
1813 // an operand so store the original value here.
1814 bool HasModifiers = operandsHaveModifiers(Operands);
1815
1816 bool IsVOP3 = isVOP3(Operands);
1817 if (HasModifiers || IsVOP3 ||
1818 getLexer().isNot(AsmToken::EndOfStatement) ||
1819 getForcedEncodingSize() == 64) {
1820
1821 AMDGPUAsmParser::OperandMatchResultTy Res =
1822 parseOptionalOps(VOP3OptionalOps, Operands);
1823
1824 if (!HasModifiers && Res == MatchOperand_Success) {
1825 // We have added a modifier operation, so we need to make sure all
1826 // previous register operands have modifiers
1827 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1828 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1829 if (Op.isReg())
1830 Op.setModifiers(0);
1831 }
1832 }
1833 return Res;
1834 }
1835 return MatchOperand_NoMatch;
1836}
1837
1838void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Tom Stellard88e0b252015-10-06 15:57:53 +00001839
1840 unsigned i = 1;
1841 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1842 if (Desc.getNumDefs() > 0) {
1843 ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
1844 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001845
1846 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1847
1848 if (operandsHaveModifiers(Operands)) {
1849 for (unsigned e = Operands.size(); i != e; ++i) {
1850 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1851
1852 if (Op.isRegWithInputMods()) {
1853 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1854 continue;
1855 }
1856 OptionalIdx[Op.getImmTy()] = i;
1857 }
1858
1859 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1860 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1861
1862 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1863 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1864 } else {
1865 for (unsigned e = Operands.size(); i != e; ++i)
1866 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1867 }
1868}
1869
1870/// Force static initialization.
1871extern "C" void LLVMInitializeAMDGPUAsmParser() {
1872 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1873 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1874}
1875
1876#define GET_REGISTER_MATCHER
1877#define GET_MATCHER_IMPLEMENTATION
1878#include "AMDGPUGenAsmMatcher.inc"
1879