blob: 45e2a968f6db803fc0c3ebfaa411b8aedfc01f79 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000011#include "MCTargetDesc/AMDGPUTargetStreamer.h"
12#include "Utils/AMDGPUBaseInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
14#include "llvm/ADT/APFloat.h"
15#include "llvm/ADT/SmallString.h"
16#include "llvm/ADT/SmallVector.h"
17#include "llvm/ADT/STLExtras.h"
18#include "llvm/ADT/StringSwitch.h"
19#include "llvm/ADT/Twine.h"
20#include "llvm/MC/MCContext.h"
21#include "llvm/MC/MCExpr.h"
22#include "llvm/MC/MCInst.h"
23#include "llvm/MC/MCInstrInfo.h"
24#include "llvm/MC/MCParser/MCAsmLexer.h"
25#include "llvm/MC/MCParser/MCAsmParser.h"
26#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
27#include "llvm/MC/MCRegisterInfo.h"
28#include "llvm/MC/MCStreamer.h"
29#include "llvm/MC/MCSubtargetInfo.h"
30#include "llvm/MC/MCTargetAsmParser.h"
31#include "llvm/Support/SourceMgr.h"
32#include "llvm/Support/TargetRegistry.h"
33#include "llvm/Support/raw_ostream.h"
34#include "llvm/Support/Debug.h"
35
36using namespace llvm;
37
38namespace {
39
40struct OptionalOperand;
41
42class AMDGPUOperand : public MCParsedAsmOperand {
43 enum KindTy {
44 Token,
45 Immediate,
46 Register,
47 Expression
48 } Kind;
49
50 SMLoc StartLoc, EndLoc;
51
52public:
53 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
54
55 MCContext *Ctx;
56
57 enum ImmTy {
58 ImmTyNone,
59 ImmTyDSOffset0,
60 ImmTyDSOffset1,
61 ImmTyGDS,
62 ImmTyOffset,
63 ImmTyGLC,
64 ImmTySLC,
65 ImmTyTFE,
66 ImmTyClamp,
67 ImmTyOMod
68 };
69
70 struct TokOp {
71 const char *Data;
72 unsigned Length;
73 };
74
75 struct ImmOp {
76 bool IsFPImm;
77 ImmTy Type;
78 int64_t Val;
79 };
80
81 struct RegOp {
82 unsigned RegNo;
83 int Modifiers;
84 const MCRegisterInfo *TRI;
85 bool IsForcedVOP3;
86 };
87
88 union {
89 TokOp Tok;
90 ImmOp Imm;
91 RegOp Reg;
92 const MCExpr *Expr;
93 };
94
95 void addImmOperands(MCInst &Inst, unsigned N) const {
96 Inst.addOperand(MCOperand::createImm(getImm()));
97 }
98
99 StringRef getToken() const {
100 return StringRef(Tok.Data, Tok.Length);
101 }
102
103 void addRegOperands(MCInst &Inst, unsigned N) const {
104 Inst.addOperand(MCOperand::createReg(getReg()));
105 }
106
107 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
108 if (isReg())
109 addRegOperands(Inst, N);
110 else
111 addImmOperands(Inst, N);
112 }
113
114 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
115 Inst.addOperand(MCOperand::createImm(
116 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
117 addRegOperands(Inst, N);
118 }
119
120 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
121 if (isImm())
122 addImmOperands(Inst, N);
123 else {
124 assert(isExpr());
125 Inst.addOperand(MCOperand::createExpr(Expr));
126 }
127 }
128
129 bool defaultTokenHasSuffix() const {
130 StringRef Token(Tok.Data, Tok.Length);
131
132 return Token.endswith("_e32") || Token.endswith("_e64");
133 }
134
135 bool isToken() const override {
136 return Kind == Token;
137 }
138
139 bool isImm() const override {
140 return Kind == Immediate;
141 }
142
143 bool isInlineImm() const {
144 float F = BitsToFloat(Imm.Val);
145 // TODO: Add 0.5pi for VI
146 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
147 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
148 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
149 }
150
151 bool isDSOffset0() const {
152 assert(isImm());
153 return Imm.Type == ImmTyDSOffset0;
154 }
155
156 bool isDSOffset1() const {
157 assert(isImm());
158 return Imm.Type == ImmTyDSOffset1;
159 }
160
161 int64_t getImm() const {
162 return Imm.Val;
163 }
164
165 enum ImmTy getImmTy() const {
166 assert(isImm());
167 return Imm.Type;
168 }
169
170 bool isRegKind() const {
171 return Kind == Register;
172 }
173
174 bool isReg() const override {
175 return Kind == Register && Reg.Modifiers == -1;
176 }
177
178 bool isRegWithInputMods() const {
179 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
180 }
181
182 void setModifiers(unsigned Mods) {
183 assert(isReg());
184 Reg.Modifiers = Mods;
185 }
186
187 bool hasModifiers() const {
188 assert(isRegKind());
189 return Reg.Modifiers != -1;
190 }
191
192 unsigned getReg() const override {
193 return Reg.RegNo;
194 }
195
196 bool isRegOrImm() const {
197 return isReg() || isImm();
198 }
199
200 bool isRegClass(unsigned RCID) const {
201 return Reg.TRI->getRegClass(RCID).contains(getReg());
202 }
203
204 bool isSCSrc32() const {
205 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
206 }
207
208 bool isSSrc32() const {
209 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
210 }
211
212 bool isSSrc64() const {
213 return isImm() || isInlineImm() ||
214 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
215 }
216
217 bool isVCSrc32() const {
218 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
219 }
220
221 bool isVCSrc64() const {
222 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
223 }
224
225 bool isVSrc32() const {
226 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
227 }
228
229 bool isVSrc64() const {
230 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
231 }
232
233 bool isMem() const override {
234 return false;
235 }
236
237 bool isExpr() const {
238 return Kind == Expression;
239 }
240
241 bool isSoppBrTarget() const {
242 return isExpr() || isImm();
243 }
244
245 SMLoc getStartLoc() const override {
246 return StartLoc;
247 }
248
249 SMLoc getEndLoc() const override {
250 return EndLoc;
251 }
252
253 void print(raw_ostream &OS) const override { }
254
255 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
256 enum ImmTy Type = ImmTyNone,
257 bool IsFPImm = false) {
258 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
259 Op->Imm.Val = Val;
260 Op->Imm.IsFPImm = IsFPImm;
261 Op->Imm.Type = Type;
262 Op->StartLoc = Loc;
263 Op->EndLoc = Loc;
264 return Op;
265 }
266
267 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
268 bool HasExplicitEncodingSize = true) {
269 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
270 Res->Tok.Data = Str.data();
271 Res->Tok.Length = Str.size();
272 Res->StartLoc = Loc;
273 Res->EndLoc = Loc;
274 return Res;
275 }
276
277 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
278 SMLoc E,
279 const MCRegisterInfo *TRI,
280 bool ForceVOP3) {
281 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
282 Op->Reg.RegNo = RegNo;
283 Op->Reg.TRI = TRI;
284 Op->Reg.Modifiers = -1;
285 Op->Reg.IsForcedVOP3 = ForceVOP3;
286 Op->StartLoc = S;
287 Op->EndLoc = E;
288 return Op;
289 }
290
291 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
292 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
293 Op->Expr = Expr;
294 Op->StartLoc = S;
295 Op->EndLoc = S;
296 return Op;
297 }
298
299 bool isDSOffset() const;
300 bool isDSOffset01() const;
301 bool isSWaitCnt() const;
302 bool isMubufOffset() const;
303};
304
305class AMDGPUAsmParser : public MCTargetAsmParser {
306 MCSubtargetInfo &STI;
307 const MCInstrInfo &MII;
308 MCAsmParser &Parser;
309
310 unsigned ForcedEncodingSize;
311 /// @name Auto-generated Match Functions
312 /// {
313
314#define GET_ASSEMBLER_HEADER
315#include "AMDGPUGenAsmMatcher.inc"
316
317 /// }
318
Tom Stellard347ac792015-06-26 21:15:07 +0000319private:
320 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
321 bool ParseDirectiveHSACodeObjectVersion();
322 bool ParseDirectiveHSACodeObjectISA();
323
Tom Stellard45bb48e2015-06-13 03:28:10 +0000324public:
325 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
326 const MCInstrInfo &MII,
327 const MCTargetOptions &Options)
328 : MCTargetAsmParser(), STI(STI), MII(MII), Parser(_Parser),
329 ForcedEncodingSize(0){
330
331 if (STI.getFeatureBits().none()) {
332 // Set default features.
333 STI.ToggleFeature("SOUTHERN_ISLANDS");
334 }
335
336 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
337 }
338
Tom Stellard347ac792015-06-26 21:15:07 +0000339 AMDGPUTargetStreamer &getTargetStreamer() {
340 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
341 return static_cast<AMDGPUTargetStreamer &>(TS);
342 }
343
Tom Stellard45bb48e2015-06-13 03:28:10 +0000344 unsigned getForcedEncodingSize() const {
345 return ForcedEncodingSize;
346 }
347
348 void setForcedEncodingSize(unsigned Size) {
349 ForcedEncodingSize = Size;
350 }
351
352 bool isForcedVOP3() const {
353 return ForcedEncodingSize == 64;
354 }
355
356 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
357 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
358 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
359 OperandVector &Operands, MCStreamer &Out,
360 uint64_t &ErrorInfo,
361 bool MatchingInlineAsm) override;
362 bool ParseDirective(AsmToken DirectiveID) override;
363 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
364 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
365 SMLoc NameLoc, OperandVector &Operands) override;
366
367 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
368 int64_t Default = 0);
369 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
370 OperandVector &Operands,
371 enum AMDGPUOperand::ImmTy ImmTy =
372 AMDGPUOperand::ImmTyNone);
373 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
374 enum AMDGPUOperand::ImmTy ImmTy =
375 AMDGPUOperand::ImmTyNone);
376 OperandMatchResultTy parseOptionalOps(
377 const ArrayRef<OptionalOperand> &OptionalOps,
378 OperandVector &Operands);
379
380
381 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
382 void cvtDS(MCInst &Inst, const OperandVector &Operands);
383 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
384 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
385 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
386
387 bool parseCnt(int64_t &IntVal);
388 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
389 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
390
391 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
392 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
393 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
394
395 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
396 OperandMatchResultTy parseOffset(OperandVector &Operands);
397 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
398 OperandMatchResultTy parseGLC(OperandVector &Operands);
399 OperandMatchResultTy parseSLC(OperandVector &Operands);
400 OperandMatchResultTy parseTFE(OperandVector &Operands);
401
402 OperandMatchResultTy parseDMask(OperandVector &Operands);
403 OperandMatchResultTy parseUNorm(OperandVector &Operands);
404 OperandMatchResultTy parseR128(OperandVector &Operands);
405
406 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
407 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
408};
409
410struct OptionalOperand {
411 const char *Name;
412 AMDGPUOperand::ImmTy Type;
413 bool IsBit;
414 int64_t Default;
415 bool (*ConvertResult)(int64_t&);
416};
417
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000418}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000419
420static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
421 if (IsVgpr) {
422 switch (RegWidth) {
423 default: llvm_unreachable("Unknown register width");
424 case 1: return AMDGPU::VGPR_32RegClassID;
425 case 2: return AMDGPU::VReg_64RegClassID;
426 case 3: return AMDGPU::VReg_96RegClassID;
427 case 4: return AMDGPU::VReg_128RegClassID;
428 case 8: return AMDGPU::VReg_256RegClassID;
429 case 16: return AMDGPU::VReg_512RegClassID;
430 }
431 }
432
433 switch (RegWidth) {
434 default: llvm_unreachable("Unknown register width");
435 case 1: return AMDGPU::SGPR_32RegClassID;
436 case 2: return AMDGPU::SGPR_64RegClassID;
437 case 4: return AMDGPU::SReg_128RegClassID;
438 case 8: return AMDGPU::SReg_256RegClassID;
439 case 16: return AMDGPU::SReg_512RegClassID;
440 }
441}
442
443static unsigned getRegForName(const StringRef &RegName) {
444
445 return StringSwitch<unsigned>(RegName)
446 .Case("exec", AMDGPU::EXEC)
447 .Case("vcc", AMDGPU::VCC)
448 .Case("flat_scr", AMDGPU::FLAT_SCR)
449 .Case("m0", AMDGPU::M0)
450 .Case("scc", AMDGPU::SCC)
451 .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
452 .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
453 .Case("vcc_lo", AMDGPU::VCC_LO)
454 .Case("vcc_hi", AMDGPU::VCC_HI)
455 .Case("exec_lo", AMDGPU::EXEC_LO)
456 .Case("exec_hi", AMDGPU::EXEC_HI)
457 .Default(0);
458}
459
460bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
461 const AsmToken Tok = Parser.getTok();
462 StartLoc = Tok.getLoc();
463 EndLoc = Tok.getEndLoc();
464 const StringRef &RegName = Tok.getString();
465 RegNo = getRegForName(RegName);
466
467 if (RegNo) {
468 Parser.Lex();
469 return false;
470 }
471
472 // Match vgprs and sgprs
473 if (RegName[0] != 's' && RegName[0] != 'v')
474 return true;
475
476 bool IsVgpr = RegName[0] == 'v';
477 unsigned RegWidth;
478 unsigned RegIndexInClass;
479 if (RegName.size() > 1) {
480 // We have a 32-bit register
481 RegWidth = 1;
482 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
483 return true;
484 Parser.Lex();
485 } else {
486 // We have a register greater than 32-bits.
487
488 int64_t RegLo, RegHi;
489 Parser.Lex();
490 if (getLexer().isNot(AsmToken::LBrac))
491 return true;
492
493 Parser.Lex();
494 if (getParser().parseAbsoluteExpression(RegLo))
495 return true;
496
497 if (getLexer().isNot(AsmToken::Colon))
498 return true;
499
500 Parser.Lex();
501 if (getParser().parseAbsoluteExpression(RegHi))
502 return true;
503
504 if (getLexer().isNot(AsmToken::RBrac))
505 return true;
506
507 Parser.Lex();
508 RegWidth = (RegHi - RegLo) + 1;
509 if (IsVgpr) {
510 // VGPR registers aren't aligned.
511 RegIndexInClass = RegLo;
512 } else {
513 // SGPR registers are aligned. Max alignment is 4 dwords.
514 RegIndexInClass = RegLo / std::min(RegWidth, 4u);
515 }
516 }
517
518 const MCRegisterInfo *TRC = getContext().getRegisterInfo();
519 unsigned RC = getRegClass(IsVgpr, RegWidth);
520 if (RegIndexInClass > TRC->getRegClass(RC).getNumRegs())
521 return true;
522 RegNo = TRC->getRegClass(RC).getRegister(RegIndexInClass);
523 return false;
524}
525
526unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
527
528 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
529
530 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
531 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
532 return Match_InvalidOperand;
533
534 return Match_Success;
535}
536
537
538bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
539 OperandVector &Operands,
540 MCStreamer &Out,
541 uint64_t &ErrorInfo,
542 bool MatchingInlineAsm) {
543 MCInst Inst;
544
545 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
546 default: break;
547 case Match_Success:
548 Inst.setLoc(IDLoc);
549 Out.EmitInstruction(Inst, STI);
550 return false;
551 case Match_MissingFeature:
552 return Error(IDLoc, "instruction not supported on this GPU");
553
554 case Match_MnemonicFail:
555 return Error(IDLoc, "unrecognized instruction mnemonic");
556
557 case Match_InvalidOperand: {
558 SMLoc ErrorLoc = IDLoc;
559 if (ErrorInfo != ~0ULL) {
560 if (ErrorInfo >= Operands.size()) {
561 if (isForcedVOP3()) {
562 // If 64-bit encoding has been forced we can end up with no
563 // clamp or omod operands if none of the registers have modifiers,
564 // so we need to add these to the operand list.
565 AMDGPUOperand &LastOp =
566 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
567 if (LastOp.isRegKind() ||
568 (LastOp.isImm() &&
569 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
570 SMLoc S = Parser.getTok().getLoc();
571 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
572 AMDGPUOperand::ImmTyClamp));
573 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
574 AMDGPUOperand::ImmTyOMod));
575 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
576 Out, ErrorInfo,
577 MatchingInlineAsm);
578 if (!Res)
579 return Res;
580 }
581
582 }
583 return Error(IDLoc, "too few operands for instruction");
584 }
585
586 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
587 if (ErrorLoc == SMLoc())
588 ErrorLoc = IDLoc;
589 }
590 return Error(ErrorLoc, "invalid operand for instruction");
591 }
592 }
593 llvm_unreachable("Implement any new match types added!");
594}
595
Tom Stellard347ac792015-06-26 21:15:07 +0000596bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
597 uint32_t &Minor) {
598 if (getLexer().isNot(AsmToken::Integer))
599 return TokError("invalid major version");
600
601 Major = getLexer().getTok().getIntVal();
602 Lex();
603
604 if (getLexer().isNot(AsmToken::Comma))
605 return TokError("minor version number required, comma expected");
606 Lex();
607
608 if (getLexer().isNot(AsmToken::Integer))
609 return TokError("invalid minor version");
610
611 Minor = getLexer().getTok().getIntVal();
612 Lex();
613
614 return false;
615}
616
617bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
618
619 uint32_t Major;
620 uint32_t Minor;
621
622 if (ParseDirectiveMajorMinor(Major, Minor))
623 return true;
624
625 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
626 return false;
627}
628
629bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
630
631 uint32_t Major;
632 uint32_t Minor;
633 uint32_t Stepping;
634 StringRef VendorName;
635 StringRef ArchName;
636
637 // If this directive has no arguments, then use the ISA version for the
638 // targeted GPU.
639 if (getLexer().is(AsmToken::EndOfStatement)) {
640 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(STI.getFeatureBits());
641 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
642 Isa.Stepping,
643 "AMD", "AMDGPU");
644 return false;
645 }
646
647
648 if (ParseDirectiveMajorMinor(Major, Minor))
649 return true;
650
651 if (getLexer().isNot(AsmToken::Comma))
652 return TokError("stepping version number required, comma expected");
653 Lex();
654
655 if (getLexer().isNot(AsmToken::Integer))
656 return TokError("invalid stepping version");
657
658 Stepping = getLexer().getTok().getIntVal();
659 Lex();
660
661 if (getLexer().isNot(AsmToken::Comma))
662 return TokError("vendor name required, comma expected");
663 Lex();
664
665 if (getLexer().isNot(AsmToken::String))
666 return TokError("invalid vendor name");
667
668 VendorName = getLexer().getTok().getStringContents();
669 Lex();
670
671 if (getLexer().isNot(AsmToken::Comma))
672 return TokError("arch name required, comma expected");
673 Lex();
674
675 if (getLexer().isNot(AsmToken::String))
676 return TokError("invalid arch name");
677
678 ArchName = getLexer().getTok().getStringContents();
679 Lex();
680
681 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
682 VendorName, ArchName);
683 return false;
684}
685
Tom Stellard45bb48e2015-06-13 03:28:10 +0000686bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000687 MCAsmParser &Parser = getParser();
688 StringRef IDVal = DirectiveID.getString();
689
690 if (IDVal == ".hsa_code_object_version")
691 return ParseDirectiveHSACodeObjectVersion();
692
693 if (IDVal == ".hsa_code_object_isa")
694 return ParseDirectiveHSACodeObjectISA();
695
Tom Stellard45bb48e2015-06-13 03:28:10 +0000696 return true;
697}
698
699static bool operandsHaveModifiers(const OperandVector &Operands) {
700
701 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
702 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
703 if (Op.isRegKind() && Op.hasModifiers())
704 return true;
705 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
706 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
707 return true;
708 }
709 return false;
710}
711
712AMDGPUAsmParser::OperandMatchResultTy
713AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
714
715 // Try to parse with a custom parser
716 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
717
718 // If we successfully parsed the operand or if there as an error parsing,
719 // we are done.
720 //
721 // If we are parsing after we reach EndOfStatement then this means we
722 // are appending default values to the Operands list. This is only done
723 // by custom parser, so we shouldn't continue on to the generic parsing.
724 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
725 getLexer().is(AsmToken::EndOfStatement))
726 return ResTy;
727
728 bool Negate = false, Abs = false;
729 if (getLexer().getKind()== AsmToken::Minus) {
730 Parser.Lex();
731 Negate = true;
732 }
733
734 if (getLexer().getKind() == AsmToken::Pipe) {
735 Parser.Lex();
736 Abs = true;
737 }
738
739 switch(getLexer().getKind()) {
740 case AsmToken::Integer: {
741 SMLoc S = Parser.getTok().getLoc();
742 int64_t IntVal;
743 if (getParser().parseAbsoluteExpression(IntVal))
744 return MatchOperand_ParseFail;
745 APInt IntVal32(32, IntVal);
746 if (IntVal32.getSExtValue() != IntVal) {
747 Error(S, "invalid immediate: only 32-bit values are legal");
748 return MatchOperand_ParseFail;
749 }
750
751 IntVal = IntVal32.getSExtValue();
752 if (Negate)
753 IntVal *= -1;
754 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
755 return MatchOperand_Success;
756 }
757 case AsmToken::Real: {
758 // FIXME: We should emit an error if a double precisions floating-point
759 // value is used. I'm not sure the best way to detect this.
760 SMLoc S = Parser.getTok().getLoc();
761 int64_t IntVal;
762 if (getParser().parseAbsoluteExpression(IntVal))
763 return MatchOperand_ParseFail;
764
765 APFloat F((float)BitsToDouble(IntVal));
766 if (Negate)
767 F.changeSign();
768 Operands.push_back(
769 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
770 return MatchOperand_Success;
771 }
772 case AsmToken::Identifier: {
773 SMLoc S, E;
774 unsigned RegNo;
775 if (!ParseRegister(RegNo, S, E)) {
776
777 bool HasModifiers = operandsHaveModifiers(Operands);
778 unsigned Modifiers = 0;
779
780 if (Negate)
781 Modifiers |= 0x1;
782
783 if (Abs) {
784 if (getLexer().getKind() != AsmToken::Pipe)
785 return MatchOperand_ParseFail;
786 Parser.Lex();
787 Modifiers |= 0x2;
788 }
789
790 if (Modifiers && !HasModifiers) {
791 // We are adding a modifier to src1 or src2 and previous sources
792 // don't have modifiers, so we need to go back and empty modifers
793 // for each previous source.
794 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
795 --PrevRegIdx) {
796
797 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
798 RegOp.setModifiers(0);
799 }
800 }
801
802
803 Operands.push_back(AMDGPUOperand::CreateReg(
804 RegNo, S, E, getContext().getRegisterInfo(),
805 isForcedVOP3()));
806
807 if (HasModifiers || Modifiers) {
808 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
809 RegOp.setModifiers(Modifiers);
810
811 }
812 } else {
813 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
814 S));
815 Parser.Lex();
816 }
817 return MatchOperand_Success;
818 }
819 default:
820 return MatchOperand_NoMatch;
821 }
822}
823
824bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
825 StringRef Name,
826 SMLoc NameLoc, OperandVector &Operands) {
827
828 // Clear any forced encodings from the previous instruction.
829 setForcedEncodingSize(0);
830
831 if (Name.endswith("_e64"))
832 setForcedEncodingSize(64);
833 else if (Name.endswith("_e32"))
834 setForcedEncodingSize(32);
835
836 // Add the instruction mnemonic
837 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
838
839 while (!getLexer().is(AsmToken::EndOfStatement)) {
840 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
841
842 // Eat the comma or space if there is one.
843 if (getLexer().is(AsmToken::Comma))
844 Parser.Lex();
845
846 switch (Res) {
847 case MatchOperand_Success: break;
848 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
849 "failed parsing operand.");
850 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
851 "not a valid operand.");
852 }
853 }
854
855 // Once we reach end of statement, continue parsing so we can add default
856 // values for optional arguments.
857 AMDGPUAsmParser::OperandMatchResultTy Res;
858 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
859 if (Res != MatchOperand_Success)
860 return Error(getLexer().getLoc(), "failed parsing operand.");
861 }
862 return false;
863}
864
865//===----------------------------------------------------------------------===//
866// Utility functions
867//===----------------------------------------------------------------------===//
868
869AMDGPUAsmParser::OperandMatchResultTy
870AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
871 int64_t Default) {
872
873 // We are at the end of the statement, and this is a default argument, so
874 // use a default value.
875 if (getLexer().is(AsmToken::EndOfStatement)) {
876 Int = Default;
877 return MatchOperand_Success;
878 }
879
880 switch(getLexer().getKind()) {
881 default: return MatchOperand_NoMatch;
882 case AsmToken::Identifier: {
883 StringRef OffsetName = Parser.getTok().getString();
884 if (!OffsetName.equals(Prefix))
885 return MatchOperand_NoMatch;
886
887 Parser.Lex();
888 if (getLexer().isNot(AsmToken::Colon))
889 return MatchOperand_ParseFail;
890
891 Parser.Lex();
892 if (getLexer().isNot(AsmToken::Integer))
893 return MatchOperand_ParseFail;
894
895 if (getParser().parseAbsoluteExpression(Int))
896 return MatchOperand_ParseFail;
897 break;
898 }
899 }
900 return MatchOperand_Success;
901}
902
903AMDGPUAsmParser::OperandMatchResultTy
904AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
905 enum AMDGPUOperand::ImmTy ImmTy) {
906
907 SMLoc S = Parser.getTok().getLoc();
908 int64_t Offset = 0;
909
910 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
911 if (Res != MatchOperand_Success)
912 return Res;
913
914 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
915 return MatchOperand_Success;
916}
917
918AMDGPUAsmParser::OperandMatchResultTy
919AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
920 enum AMDGPUOperand::ImmTy ImmTy) {
921 int64_t Bit = 0;
922 SMLoc S = Parser.getTok().getLoc();
923
924 // We are at the end of the statement, and this is a default argument, so
925 // use a default value.
926 if (getLexer().isNot(AsmToken::EndOfStatement)) {
927 switch(getLexer().getKind()) {
928 case AsmToken::Identifier: {
929 StringRef Tok = Parser.getTok().getString();
930 if (Tok == Name) {
931 Bit = 1;
932 Parser.Lex();
933 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
934 Bit = 0;
935 Parser.Lex();
936 } else {
937 return MatchOperand_NoMatch;
938 }
939 break;
940 }
941 default:
942 return MatchOperand_NoMatch;
943 }
944 }
945
946 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
947 return MatchOperand_Success;
948}
949
950static bool operandsHasOptionalOp(const OperandVector &Operands,
951 const OptionalOperand &OOp) {
952 for (unsigned i = 0; i < Operands.size(); i++) {
953 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
954 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
955 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
956 return true;
957
958 }
959 return false;
960}
961
962AMDGPUAsmParser::OperandMatchResultTy
963AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
964 OperandVector &Operands) {
965 SMLoc S = Parser.getTok().getLoc();
966 for (const OptionalOperand &Op : OptionalOps) {
967 if (operandsHasOptionalOp(Operands, Op))
968 continue;
969 AMDGPUAsmParser::OperandMatchResultTy Res;
970 int64_t Value;
971 if (Op.IsBit) {
972 Res = parseNamedBit(Op.Name, Operands, Op.Type);
973 if (Res == MatchOperand_NoMatch)
974 continue;
975 return Res;
976 }
977
978 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
979
980 if (Res == MatchOperand_NoMatch)
981 continue;
982
983 if (Res != MatchOperand_Success)
984 return Res;
985
986 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
987 return MatchOperand_ParseFail;
988 }
989
990 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
991 return MatchOperand_Success;
992 }
993 return MatchOperand_NoMatch;
994}
995
996//===----------------------------------------------------------------------===//
997// ds
998//===----------------------------------------------------------------------===//
999
1000static const OptionalOperand DSOptionalOps [] = {
1001 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1002 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1003};
1004
1005static const OptionalOperand DSOptionalOpsOff01 [] = {
1006 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1007 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1008 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1009};
1010
1011AMDGPUAsmParser::OperandMatchResultTy
1012AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1013 return parseOptionalOps(DSOptionalOps, Operands);
1014}
1015AMDGPUAsmParser::OperandMatchResultTy
1016AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1017 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1018}
1019
1020AMDGPUAsmParser::OperandMatchResultTy
1021AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1022 SMLoc S = Parser.getTok().getLoc();
1023 AMDGPUAsmParser::OperandMatchResultTy Res =
1024 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1025 if (Res == MatchOperand_NoMatch) {
1026 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1027 AMDGPUOperand::ImmTyOffset));
1028 Res = MatchOperand_Success;
1029 }
1030 return Res;
1031}
1032
1033bool AMDGPUOperand::isDSOffset() const {
1034 return isImm() && isUInt<16>(getImm());
1035}
1036
1037bool AMDGPUOperand::isDSOffset01() const {
1038 return isImm() && isUInt<8>(getImm());
1039}
1040
1041void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1042 const OperandVector &Operands) {
1043
1044 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1045
1046 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1047 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1048
1049 // Add the register arguments
1050 if (Op.isReg()) {
1051 Op.addRegOperands(Inst, 1);
1052 continue;
1053 }
1054
1055 // Handle optional arguments
1056 OptionalIdx[Op.getImmTy()] = i;
1057 }
1058
1059 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1060 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1061 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1062
1063 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1064 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1065 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1066 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1067}
1068
1069void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1070
1071 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1072 bool GDSOnly = false;
1073
1074 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1075 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1076
1077 // Add the register arguments
1078 if (Op.isReg()) {
1079 Op.addRegOperands(Inst, 1);
1080 continue;
1081 }
1082
1083 if (Op.isToken() && Op.getToken() == "gds") {
1084 GDSOnly = true;
1085 continue;
1086 }
1087
1088 // Handle optional arguments
1089 OptionalIdx[Op.getImmTy()] = i;
1090 }
1091
1092 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1093 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
1094
1095 if (!GDSOnly) {
1096 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1097 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1098 }
1099 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1100}
1101
1102
1103//===----------------------------------------------------------------------===//
1104// s_waitcnt
1105//===----------------------------------------------------------------------===//
1106
1107bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1108 StringRef CntName = Parser.getTok().getString();
1109 int64_t CntVal;
1110
1111 Parser.Lex();
1112 if (getLexer().isNot(AsmToken::LParen))
1113 return true;
1114
1115 Parser.Lex();
1116 if (getLexer().isNot(AsmToken::Integer))
1117 return true;
1118
1119 if (getParser().parseAbsoluteExpression(CntVal))
1120 return true;
1121
1122 if (getLexer().isNot(AsmToken::RParen))
1123 return true;
1124
1125 Parser.Lex();
1126 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1127 Parser.Lex();
1128
1129 int CntShift;
1130 int CntMask;
1131
1132 if (CntName == "vmcnt") {
1133 CntMask = 0xf;
1134 CntShift = 0;
1135 } else if (CntName == "expcnt") {
1136 CntMask = 0x7;
1137 CntShift = 4;
1138 } else if (CntName == "lgkmcnt") {
1139 CntMask = 0x7;
1140 CntShift = 8;
1141 } else {
1142 return true;
1143 }
1144
1145 IntVal &= ~(CntMask << CntShift);
1146 IntVal |= (CntVal << CntShift);
1147 return false;
1148}
1149
1150AMDGPUAsmParser::OperandMatchResultTy
1151AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1152 // Disable all counters by default.
1153 // vmcnt [3:0]
1154 // expcnt [6:4]
1155 // lgkmcnt [10:8]
1156 int64_t CntVal = 0x77f;
1157 SMLoc S = Parser.getTok().getLoc();
1158
1159 switch(getLexer().getKind()) {
1160 default: return MatchOperand_ParseFail;
1161 case AsmToken::Integer:
1162 // The operand can be an integer value.
1163 if (getParser().parseAbsoluteExpression(CntVal))
1164 return MatchOperand_ParseFail;
1165 break;
1166
1167 case AsmToken::Identifier:
1168 do {
1169 if (parseCnt(CntVal))
1170 return MatchOperand_ParseFail;
1171 } while(getLexer().isNot(AsmToken::EndOfStatement));
1172 break;
1173 }
1174 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1175 return MatchOperand_Success;
1176}
1177
1178bool AMDGPUOperand::isSWaitCnt() const {
1179 return isImm();
1180}
1181
1182//===----------------------------------------------------------------------===//
1183// sopp branch targets
1184//===----------------------------------------------------------------------===//
1185
1186AMDGPUAsmParser::OperandMatchResultTy
1187AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1188 SMLoc S = Parser.getTok().getLoc();
1189
1190 switch (getLexer().getKind()) {
1191 default: return MatchOperand_ParseFail;
1192 case AsmToken::Integer: {
1193 int64_t Imm;
1194 if (getParser().parseAbsoluteExpression(Imm))
1195 return MatchOperand_ParseFail;
1196 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1197 return MatchOperand_Success;
1198 }
1199
1200 case AsmToken::Identifier:
1201 Operands.push_back(AMDGPUOperand::CreateExpr(
1202 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1203 Parser.getTok().getString()), getContext()), S));
1204 Parser.Lex();
1205 return MatchOperand_Success;
1206 }
1207}
1208
1209//===----------------------------------------------------------------------===//
1210// flat
1211//===----------------------------------------------------------------------===//
1212
1213static const OptionalOperand FlatOptionalOps [] = {
1214 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1215 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1216 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1217};
1218
1219static const OptionalOperand FlatAtomicOptionalOps [] = {
1220 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1221 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1222};
1223
1224AMDGPUAsmParser::OperandMatchResultTy
1225AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1226 return parseOptionalOps(FlatOptionalOps, Operands);
1227}
1228
1229AMDGPUAsmParser::OperandMatchResultTy
1230AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1231 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1232}
1233
1234void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1235 const OperandVector &Operands) {
1236 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1237
1238 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1239 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1240
1241 // Add the register arguments
1242 if (Op.isReg()) {
1243 Op.addRegOperands(Inst, 1);
1244 continue;
1245 }
1246
1247 // Handle 'glc' token which is sometimes hard-coded into the
1248 // asm string. There are no MCInst operands for these.
1249 if (Op.isToken())
1250 continue;
1251
1252 // Handle optional arguments
1253 OptionalIdx[Op.getImmTy()] = i;
1254
1255 }
1256
1257 // flat atomic instructions don't have a glc argument.
1258 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1259 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1260 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1261 }
1262
1263 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1264 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1265
1266 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1267 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1268}
1269
1270//===----------------------------------------------------------------------===//
1271// mubuf
1272//===----------------------------------------------------------------------===//
1273
1274static const OptionalOperand MubufOptionalOps [] = {
1275 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1276 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1277 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1278 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1279};
1280
1281AMDGPUAsmParser::OperandMatchResultTy
1282AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1283 return parseOptionalOps(MubufOptionalOps, Operands);
1284}
1285
1286AMDGPUAsmParser::OperandMatchResultTy
1287AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1288 return parseIntWithPrefix("offset", Operands);
1289}
1290
1291AMDGPUAsmParser::OperandMatchResultTy
1292AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1293 return parseNamedBit("glc", Operands);
1294}
1295
1296AMDGPUAsmParser::OperandMatchResultTy
1297AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1298 return parseNamedBit("slc", Operands);
1299}
1300
1301AMDGPUAsmParser::OperandMatchResultTy
1302AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1303 return parseNamedBit("tfe", Operands);
1304}
1305
1306bool AMDGPUOperand::isMubufOffset() const {
1307 return isImm() && isUInt<12>(getImm());
1308}
1309
1310void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1311 const OperandVector &Operands) {
1312 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1313
1314 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1315 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1316
1317 // Add the register arguments
1318 if (Op.isReg()) {
1319 Op.addRegOperands(Inst, 1);
1320 continue;
1321 }
1322
1323 // Handle the case where soffset is an immediate
1324 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1325 Op.addImmOperands(Inst, 1);
1326 continue;
1327 }
1328
1329 // Handle tokens like 'offen' which are sometimes hard-coded into the
1330 // asm string. There are no MCInst operands for these.
1331 if (Op.isToken()) {
1332 continue;
1333 }
1334 assert(Op.isImm());
1335
1336 // Handle optional arguments
1337 OptionalIdx[Op.getImmTy()] = i;
1338 }
1339
1340 assert(OptionalIdx.size() == 4);
1341
1342 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1343 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1344 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1345 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1346
1347 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1348 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1349 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1350 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1351}
1352
1353//===----------------------------------------------------------------------===//
1354// mimg
1355//===----------------------------------------------------------------------===//
1356
1357AMDGPUAsmParser::OperandMatchResultTy
1358AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1359 return parseIntWithPrefix("dmask", Operands);
1360}
1361
1362AMDGPUAsmParser::OperandMatchResultTy
1363AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1364 return parseNamedBit("unorm", Operands);
1365}
1366
1367AMDGPUAsmParser::OperandMatchResultTy
1368AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1369 return parseNamedBit("r128", Operands);
1370}
1371
1372//===----------------------------------------------------------------------===//
1373// vop3
1374//===----------------------------------------------------------------------===//
1375
1376static bool ConvertOmodMul(int64_t &Mul) {
1377 if (Mul != 1 && Mul != 2 && Mul != 4)
1378 return false;
1379
1380 Mul >>= 1;
1381 return true;
1382}
1383
1384static bool ConvertOmodDiv(int64_t &Div) {
1385 if (Div == 1) {
1386 Div = 0;
1387 return true;
1388 }
1389
1390 if (Div == 2) {
1391 Div = 3;
1392 return true;
1393 }
1394
1395 return false;
1396}
1397
1398static const OptionalOperand VOP3OptionalOps [] = {
1399 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1400 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1401 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1402};
1403
1404static bool isVOP3(OperandVector &Operands) {
1405 if (operandsHaveModifiers(Operands))
1406 return true;
1407
1408 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1409
1410 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1411 return true;
1412
1413 if (Operands.size() >= 5)
1414 return true;
1415
1416 if (Operands.size() > 3) {
1417 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1418 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1419 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1420 return true;
1421 }
1422 return false;
1423}
1424
1425AMDGPUAsmParser::OperandMatchResultTy
1426AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1427
1428 // The value returned by this function may change after parsing
1429 // an operand so store the original value here.
1430 bool HasModifiers = operandsHaveModifiers(Operands);
1431
1432 bool IsVOP3 = isVOP3(Operands);
1433 if (HasModifiers || IsVOP3 ||
1434 getLexer().isNot(AsmToken::EndOfStatement) ||
1435 getForcedEncodingSize() == 64) {
1436
1437 AMDGPUAsmParser::OperandMatchResultTy Res =
1438 parseOptionalOps(VOP3OptionalOps, Operands);
1439
1440 if (!HasModifiers && Res == MatchOperand_Success) {
1441 // We have added a modifier operation, so we need to make sure all
1442 // previous register operands have modifiers
1443 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1444 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1445 if (Op.isReg())
1446 Op.setModifiers(0);
1447 }
1448 }
1449 return Res;
1450 }
1451 return MatchOperand_NoMatch;
1452}
1453
1454void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1455 ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1);
1456 unsigned i = 2;
1457
1458 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1459
1460 if (operandsHaveModifiers(Operands)) {
1461 for (unsigned e = Operands.size(); i != e; ++i) {
1462 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1463
1464 if (Op.isRegWithInputMods()) {
1465 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1466 continue;
1467 }
1468 OptionalIdx[Op.getImmTy()] = i;
1469 }
1470
1471 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1472 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1473
1474 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1475 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1476 } else {
1477 for (unsigned e = Operands.size(); i != e; ++i)
1478 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1479 }
1480}
1481
1482/// Force static initialization.
1483extern "C" void LLVMInitializeAMDGPUAsmParser() {
1484 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1485 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1486}
1487
1488#define GET_REGISTER_MATCHER
1489#define GET_MATCHER_IMPLEMENTATION
1490#include "AMDGPUGenAsmMatcher.inc"
1491