blob: a2420ad20700315a42c8b00ee05313d9d731f13c [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000011#include "MCTargetDesc/AMDGPUTargetStreamer.h"
12#include "Utils/AMDGPUBaseInfo.h"
Tom Stellardff7416b2015-06-26 21:58:31 +000013#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/SmallString.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCSubtargetInfo.h"
31#include "llvm/MC/MCTargetAsmParser.h"
32#include "llvm/Support/SourceMgr.h"
33#include "llvm/Support/TargetRegistry.h"
34#include "llvm/Support/raw_ostream.h"
35#include "llvm/Support/Debug.h"
36
37using namespace llvm;
38
39namespace {
40
41struct OptionalOperand;
42
43class AMDGPUOperand : public MCParsedAsmOperand {
44 enum KindTy {
45 Token,
46 Immediate,
47 Register,
48 Expression
49 } Kind;
50
51 SMLoc StartLoc, EndLoc;
52
53public:
54 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
55
56 MCContext *Ctx;
57
58 enum ImmTy {
59 ImmTyNone,
60 ImmTyDSOffset0,
61 ImmTyDSOffset1,
62 ImmTyGDS,
63 ImmTyOffset,
64 ImmTyGLC,
65 ImmTySLC,
66 ImmTyTFE,
67 ImmTyClamp,
68 ImmTyOMod
69 };
70
71 struct TokOp {
72 const char *Data;
73 unsigned Length;
74 };
75
76 struct ImmOp {
77 bool IsFPImm;
78 ImmTy Type;
79 int64_t Val;
80 };
81
82 struct RegOp {
83 unsigned RegNo;
84 int Modifiers;
85 const MCRegisterInfo *TRI;
86 bool IsForcedVOP3;
87 };
88
89 union {
90 TokOp Tok;
91 ImmOp Imm;
92 RegOp Reg;
93 const MCExpr *Expr;
94 };
95
96 void addImmOperands(MCInst &Inst, unsigned N) const {
97 Inst.addOperand(MCOperand::createImm(getImm()));
98 }
99
100 StringRef getToken() const {
101 return StringRef(Tok.Data, Tok.Length);
102 }
103
104 void addRegOperands(MCInst &Inst, unsigned N) const {
105 Inst.addOperand(MCOperand::createReg(getReg()));
106 }
107
108 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
109 if (isReg())
110 addRegOperands(Inst, N);
111 else
112 addImmOperands(Inst, N);
113 }
114
115 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
116 Inst.addOperand(MCOperand::createImm(
117 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
118 addRegOperands(Inst, N);
119 }
120
121 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
122 if (isImm())
123 addImmOperands(Inst, N);
124 else {
125 assert(isExpr());
126 Inst.addOperand(MCOperand::createExpr(Expr));
127 }
128 }
129
130 bool defaultTokenHasSuffix() const {
131 StringRef Token(Tok.Data, Tok.Length);
132
133 return Token.endswith("_e32") || Token.endswith("_e64");
134 }
135
136 bool isToken() const override {
137 return Kind == Token;
138 }
139
140 bool isImm() const override {
141 return Kind == Immediate;
142 }
143
144 bool isInlineImm() const {
145 float F = BitsToFloat(Imm.Val);
146 // TODO: Add 0.5pi for VI
147 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
148 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
149 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
150 }
151
152 bool isDSOffset0() const {
153 assert(isImm());
154 return Imm.Type == ImmTyDSOffset0;
155 }
156
157 bool isDSOffset1() const {
158 assert(isImm());
159 return Imm.Type == ImmTyDSOffset1;
160 }
161
162 int64_t getImm() const {
163 return Imm.Val;
164 }
165
166 enum ImmTy getImmTy() const {
167 assert(isImm());
168 return Imm.Type;
169 }
170
171 bool isRegKind() const {
172 return Kind == Register;
173 }
174
175 bool isReg() const override {
176 return Kind == Register && Reg.Modifiers == -1;
177 }
178
179 bool isRegWithInputMods() const {
180 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
181 }
182
183 void setModifiers(unsigned Mods) {
184 assert(isReg());
185 Reg.Modifiers = Mods;
186 }
187
188 bool hasModifiers() const {
189 assert(isRegKind());
190 return Reg.Modifiers != -1;
191 }
192
193 unsigned getReg() const override {
194 return Reg.RegNo;
195 }
196
197 bool isRegOrImm() const {
198 return isReg() || isImm();
199 }
200
201 bool isRegClass(unsigned RCID) const {
202 return Reg.TRI->getRegClass(RCID).contains(getReg());
203 }
204
205 bool isSCSrc32() const {
206 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
207 }
208
209 bool isSSrc32() const {
210 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
211 }
212
213 bool isSSrc64() const {
214 return isImm() || isInlineImm() ||
215 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
216 }
217
Matt Arsenault86d336e2015-09-08 21:15:00 +0000218 bool isSCSrc64() const {
219 return (isReg() && isRegClass(AMDGPU::SReg_64RegClassID)) || isInlineImm();
220 }
221
Tom Stellard45bb48e2015-06-13 03:28:10 +0000222 bool isVCSrc32() const {
223 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
224 }
225
226 bool isVCSrc64() const {
227 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
228 }
229
230 bool isVSrc32() const {
231 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
232 }
233
234 bool isVSrc64() const {
235 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
236 }
237
238 bool isMem() const override {
239 return false;
240 }
241
242 bool isExpr() const {
243 return Kind == Expression;
244 }
245
246 bool isSoppBrTarget() const {
247 return isExpr() || isImm();
248 }
249
250 SMLoc getStartLoc() const override {
251 return StartLoc;
252 }
253
254 SMLoc getEndLoc() const override {
255 return EndLoc;
256 }
257
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000258 void print(raw_ostream &OS) const override {
259 switch (Kind) {
260 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000261 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000262 break;
263 case Immediate:
264 OS << getImm();
265 break;
266 case Token:
267 OS << '\'' << getToken() << '\'';
268 break;
269 case Expression:
270 OS << "<expr " << *Expr << '>';
271 break;
272 }
273 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000274
275 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
276 enum ImmTy Type = ImmTyNone,
277 bool IsFPImm = false) {
278 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
279 Op->Imm.Val = Val;
280 Op->Imm.IsFPImm = IsFPImm;
281 Op->Imm.Type = Type;
282 Op->StartLoc = Loc;
283 Op->EndLoc = Loc;
284 return Op;
285 }
286
287 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
288 bool HasExplicitEncodingSize = true) {
289 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
290 Res->Tok.Data = Str.data();
291 Res->Tok.Length = Str.size();
292 Res->StartLoc = Loc;
293 Res->EndLoc = Loc;
294 return Res;
295 }
296
297 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
298 SMLoc E,
299 const MCRegisterInfo *TRI,
300 bool ForceVOP3) {
301 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
302 Op->Reg.RegNo = RegNo;
303 Op->Reg.TRI = TRI;
304 Op->Reg.Modifiers = -1;
305 Op->Reg.IsForcedVOP3 = ForceVOP3;
306 Op->StartLoc = S;
307 Op->EndLoc = E;
308 return Op;
309 }
310
311 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
312 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
313 Op->Expr = Expr;
314 Op->StartLoc = S;
315 Op->EndLoc = S;
316 return Op;
317 }
318
319 bool isDSOffset() const;
320 bool isDSOffset01() const;
321 bool isSWaitCnt() const;
322 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000323 bool isSMRDOffset() const;
324 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000325};
326
327class AMDGPUAsmParser : public MCTargetAsmParser {
328 MCSubtargetInfo &STI;
329 const MCInstrInfo &MII;
330 MCAsmParser &Parser;
331
332 unsigned ForcedEncodingSize;
333 /// @name Auto-generated Match Functions
334 /// {
335
336#define GET_ASSEMBLER_HEADER
337#include "AMDGPUGenAsmMatcher.inc"
338
339 /// }
340
Tom Stellard347ac792015-06-26 21:15:07 +0000341private:
342 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
343 bool ParseDirectiveHSACodeObjectVersion();
344 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000345 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
346 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000347 bool ParseSectionDirectiveHSAText();
Tom Stellard347ac792015-06-26 21:15:07 +0000348
Tom Stellard45bb48e2015-06-13 03:28:10 +0000349public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000350public:
351 enum AMDGPUMatchResultTy {
352 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
353 };
354
Tom Stellard45bb48e2015-06-13 03:28:10 +0000355 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
356 const MCInstrInfo &MII,
357 const MCTargetOptions &Options)
Colin LeMahieufe2c8b82015-07-27 21:56:53 +0000358 : MCTargetAsmParser(Options), STI(STI), MII(MII), Parser(_Parser),
Tom Stellard45bb48e2015-06-13 03:28:10 +0000359 ForcedEncodingSize(0){
360
361 if (STI.getFeatureBits().none()) {
362 // Set default features.
363 STI.ToggleFeature("SOUTHERN_ISLANDS");
364 }
365
366 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
367 }
368
Tom Stellard347ac792015-06-26 21:15:07 +0000369 AMDGPUTargetStreamer &getTargetStreamer() {
370 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
371 return static_cast<AMDGPUTargetStreamer &>(TS);
372 }
373
Tom Stellard45bb48e2015-06-13 03:28:10 +0000374 unsigned getForcedEncodingSize() const {
375 return ForcedEncodingSize;
376 }
377
378 void setForcedEncodingSize(unsigned Size) {
379 ForcedEncodingSize = Size;
380 }
381
382 bool isForcedVOP3() const {
383 return ForcedEncodingSize == 64;
384 }
385
386 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
387 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
388 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
389 OperandVector &Operands, MCStreamer &Out,
390 uint64_t &ErrorInfo,
391 bool MatchingInlineAsm) override;
392 bool ParseDirective(AsmToken DirectiveID) override;
393 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
394 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
395 SMLoc NameLoc, OperandVector &Operands) override;
396
397 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
398 int64_t Default = 0);
399 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
400 OperandVector &Operands,
401 enum AMDGPUOperand::ImmTy ImmTy =
402 AMDGPUOperand::ImmTyNone);
403 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
404 enum AMDGPUOperand::ImmTy ImmTy =
405 AMDGPUOperand::ImmTyNone);
406 OperandMatchResultTy parseOptionalOps(
407 const ArrayRef<OptionalOperand> &OptionalOps,
408 OperandVector &Operands);
409
410
411 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
412 void cvtDS(MCInst &Inst, const OperandVector &Operands);
413 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
414 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
415 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
416
417 bool parseCnt(int64_t &IntVal);
418 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
419 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
420
421 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
422 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
423 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
424
425 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
426 OperandMatchResultTy parseOffset(OperandVector &Operands);
427 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
428 OperandMatchResultTy parseGLC(OperandVector &Operands);
429 OperandMatchResultTy parseSLC(OperandVector &Operands);
430 OperandMatchResultTy parseTFE(OperandVector &Operands);
431
432 OperandMatchResultTy parseDMask(OperandVector &Operands);
433 OperandMatchResultTy parseUNorm(OperandVector &Operands);
434 OperandMatchResultTy parseR128(OperandVector &Operands);
435
436 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
437 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
438};
439
440struct OptionalOperand {
441 const char *Name;
442 AMDGPUOperand::ImmTy Type;
443 bool IsBit;
444 int64_t Default;
445 bool (*ConvertResult)(int64_t&);
446};
447
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000448}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000449
450static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
451 if (IsVgpr) {
452 switch (RegWidth) {
453 default: llvm_unreachable("Unknown register width");
454 case 1: return AMDGPU::VGPR_32RegClassID;
455 case 2: return AMDGPU::VReg_64RegClassID;
456 case 3: return AMDGPU::VReg_96RegClassID;
457 case 4: return AMDGPU::VReg_128RegClassID;
458 case 8: return AMDGPU::VReg_256RegClassID;
459 case 16: return AMDGPU::VReg_512RegClassID;
460 }
461 }
462
463 switch (RegWidth) {
464 default: llvm_unreachable("Unknown register width");
465 case 1: return AMDGPU::SGPR_32RegClassID;
466 case 2: return AMDGPU::SGPR_64RegClassID;
467 case 4: return AMDGPU::SReg_128RegClassID;
468 case 8: return AMDGPU::SReg_256RegClassID;
469 case 16: return AMDGPU::SReg_512RegClassID;
470 }
471}
472
Craig Topper4e9b03d62015-09-21 00:18:00 +0000473static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000474
475 return StringSwitch<unsigned>(RegName)
476 .Case("exec", AMDGPU::EXEC)
477 .Case("vcc", AMDGPU::VCC)
478 .Case("flat_scr", AMDGPU::FLAT_SCR)
479 .Case("m0", AMDGPU::M0)
480 .Case("scc", AMDGPU::SCC)
481 .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
482 .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
483 .Case("vcc_lo", AMDGPU::VCC_LO)
484 .Case("vcc_hi", AMDGPU::VCC_HI)
485 .Case("exec_lo", AMDGPU::EXEC_LO)
486 .Case("exec_hi", AMDGPU::EXEC_HI)
487 .Default(0);
488}
489
490bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
491 const AsmToken Tok = Parser.getTok();
492 StartLoc = Tok.getLoc();
493 EndLoc = Tok.getEndLoc();
Matt Arsenault57116cc2015-09-10 21:51:15 +0000494 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000495 RegNo = getRegForName(RegName);
496
497 if (RegNo) {
498 Parser.Lex();
499 return false;
500 }
501
502 // Match vgprs and sgprs
503 if (RegName[0] != 's' && RegName[0] != 'v')
504 return true;
505
506 bool IsVgpr = RegName[0] == 'v';
507 unsigned RegWidth;
508 unsigned RegIndexInClass;
509 if (RegName.size() > 1) {
510 // We have a 32-bit register
511 RegWidth = 1;
512 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
513 return true;
514 Parser.Lex();
515 } else {
516 // We have a register greater than 32-bits.
517
518 int64_t RegLo, RegHi;
519 Parser.Lex();
520 if (getLexer().isNot(AsmToken::LBrac))
521 return true;
522
523 Parser.Lex();
524 if (getParser().parseAbsoluteExpression(RegLo))
525 return true;
526
527 if (getLexer().isNot(AsmToken::Colon))
528 return true;
529
530 Parser.Lex();
531 if (getParser().parseAbsoluteExpression(RegHi))
532 return true;
533
534 if (getLexer().isNot(AsmToken::RBrac))
535 return true;
536
537 Parser.Lex();
538 RegWidth = (RegHi - RegLo) + 1;
539 if (IsVgpr) {
540 // VGPR registers aren't aligned.
541 RegIndexInClass = RegLo;
542 } else {
543 // SGPR registers are aligned. Max alignment is 4 dwords.
544 RegIndexInClass = RegLo / std::min(RegWidth, 4u);
545 }
546 }
547
Matt Arsenault3473c722015-11-03 22:50:27 +0000548 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
549 const MCRegisterClass RC = TRI->getRegClass(getRegClass(IsVgpr, RegWidth));
550 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000551 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000552
553 RegNo = RC.getRegister(RegIndexInClass);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 return false;
555}
556
557unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
558
559 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
560
561 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
562 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
563 return Match_InvalidOperand;
564
Tom Stellard88e0b252015-10-06 15:57:53 +0000565 if ((TSFlags & SIInstrFlags::VOP3) &&
566 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
567 getForcedEncodingSize() != 64)
568 return Match_PreferE32;
569
Tom Stellard45bb48e2015-06-13 03:28:10 +0000570 return Match_Success;
571}
572
573
574bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
575 OperandVector &Operands,
576 MCStreamer &Out,
577 uint64_t &ErrorInfo,
578 bool MatchingInlineAsm) {
579 MCInst Inst;
580
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000581 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000582 default: break;
583 case Match_Success:
584 Inst.setLoc(IDLoc);
585 Out.EmitInstruction(Inst, STI);
586 return false;
587 case Match_MissingFeature:
588 return Error(IDLoc, "instruction not supported on this GPU");
589
590 case Match_MnemonicFail:
591 return Error(IDLoc, "unrecognized instruction mnemonic");
592
593 case Match_InvalidOperand: {
594 SMLoc ErrorLoc = IDLoc;
595 if (ErrorInfo != ~0ULL) {
596 if (ErrorInfo >= Operands.size()) {
597 if (isForcedVOP3()) {
598 // If 64-bit encoding has been forced we can end up with no
599 // clamp or omod operands if none of the registers have modifiers,
600 // so we need to add these to the operand list.
601 AMDGPUOperand &LastOp =
602 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
603 if (LastOp.isRegKind() ||
604 (LastOp.isImm() &&
605 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
606 SMLoc S = Parser.getTok().getLoc();
607 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
608 AMDGPUOperand::ImmTyClamp));
609 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
610 AMDGPUOperand::ImmTyOMod));
611 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
612 Out, ErrorInfo,
613 MatchingInlineAsm);
614 if (!Res)
615 return Res;
616 }
617
618 }
619 return Error(IDLoc, "too few operands for instruction");
620 }
621
622 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
623 if (ErrorLoc == SMLoc())
624 ErrorLoc = IDLoc;
625 }
626 return Error(ErrorLoc, "invalid operand for instruction");
627 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000628 case Match_PreferE32:
629 return Error(IDLoc, "internal error: instruction without _e64 suffix "
630 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000631 }
632 llvm_unreachable("Implement any new match types added!");
633}
634
Tom Stellard347ac792015-06-26 21:15:07 +0000635bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
636 uint32_t &Minor) {
637 if (getLexer().isNot(AsmToken::Integer))
638 return TokError("invalid major version");
639
640 Major = getLexer().getTok().getIntVal();
641 Lex();
642
643 if (getLexer().isNot(AsmToken::Comma))
644 return TokError("minor version number required, comma expected");
645 Lex();
646
647 if (getLexer().isNot(AsmToken::Integer))
648 return TokError("invalid minor version");
649
650 Minor = getLexer().getTok().getIntVal();
651 Lex();
652
653 return false;
654}
655
656bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
657
658 uint32_t Major;
659 uint32_t Minor;
660
661 if (ParseDirectiveMajorMinor(Major, Minor))
662 return true;
663
664 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
665 return false;
666}
667
668bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
669
670 uint32_t Major;
671 uint32_t Minor;
672 uint32_t Stepping;
673 StringRef VendorName;
674 StringRef ArchName;
675
676 // If this directive has no arguments, then use the ISA version for the
677 // targeted GPU.
678 if (getLexer().is(AsmToken::EndOfStatement)) {
679 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(STI.getFeatureBits());
680 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
681 Isa.Stepping,
682 "AMD", "AMDGPU");
683 return false;
684 }
685
686
687 if (ParseDirectiveMajorMinor(Major, Minor))
688 return true;
689
690 if (getLexer().isNot(AsmToken::Comma))
691 return TokError("stepping version number required, comma expected");
692 Lex();
693
694 if (getLexer().isNot(AsmToken::Integer))
695 return TokError("invalid stepping version");
696
697 Stepping = getLexer().getTok().getIntVal();
698 Lex();
699
700 if (getLexer().isNot(AsmToken::Comma))
701 return TokError("vendor name required, comma expected");
702 Lex();
703
704 if (getLexer().isNot(AsmToken::String))
705 return TokError("invalid vendor name");
706
707 VendorName = getLexer().getTok().getStringContents();
708 Lex();
709
710 if (getLexer().isNot(AsmToken::Comma))
711 return TokError("arch name required, comma expected");
712 Lex();
713
714 if (getLexer().isNot(AsmToken::String))
715 return TokError("invalid arch name");
716
717 ArchName = getLexer().getTok().getStringContents();
718 Lex();
719
720 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
721 VendorName, ArchName);
722 return false;
723}
724
Tom Stellardff7416b2015-06-26 21:58:31 +0000725bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
726 amd_kernel_code_t &Header) {
727
728 if (getLexer().isNot(AsmToken::Equal))
729 return TokError("expected '='");
730 Lex();
731
732 if (getLexer().isNot(AsmToken::Integer))
733 return TokError("amd_kernel_code_t values must be integers");
734
735 uint64_t Value = getLexer().getTok().getIntVal();
736 Lex();
737
738 if (ID == "kernel_code_version_major")
739 Header.amd_kernel_code_version_major = Value;
740 else if (ID == "kernel_code_version_minor")
741 Header.amd_kernel_code_version_minor = Value;
742 else if (ID == "machine_kind")
743 Header.amd_machine_kind = Value;
744 else if (ID == "machine_version_major")
745 Header.amd_machine_version_major = Value;
746 else if (ID == "machine_version_minor")
747 Header.amd_machine_version_minor = Value;
748 else if (ID == "machine_version_stepping")
749 Header.amd_machine_version_stepping = Value;
750 else if (ID == "kernel_code_entry_byte_offset")
751 Header.kernel_code_entry_byte_offset = Value;
752 else if (ID == "kernel_code_prefetch_byte_size")
753 Header.kernel_code_prefetch_byte_size = Value;
754 else if (ID == "max_scratch_backing_memory_byte_size")
755 Header.max_scratch_backing_memory_byte_size = Value;
756 else if (ID == "compute_pgm_rsrc1_vgprs")
757 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
758 else if (ID == "compute_pgm_rsrc1_sgprs")
759 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
760 else if (ID == "compute_pgm_rsrc1_priority")
761 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
762 else if (ID == "compute_pgm_rsrc1_float_mode")
763 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
764 else if (ID == "compute_pgm_rsrc1_priv")
765 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
766 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
767 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
768 else if (ID == "compute_pgm_rsrc1_debug_mode")
769 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
770 else if (ID == "compute_pgm_rsrc1_ieee_mode")
771 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
772 else if (ID == "compute_pgm_rsrc2_scratch_en")
773 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
774 else if (ID == "compute_pgm_rsrc2_user_sgpr")
775 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
776 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
777 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
778 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
779 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
780 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
781 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
782 else if (ID == "compute_pgm_rsrc2_tg_size_en")
783 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
784 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
785 Header.compute_pgm_resource_registers |=
786 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
787 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
788 Header.compute_pgm_resource_registers |=
789 (S_00B84C_EXCP_EN_MSB(Value) << 32);
790 else if (ID == "compute_pgm_rsrc2_lds_size")
791 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
792 else if (ID == "compute_pgm_rsrc2_excp_en")
793 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
794 else if (ID == "compute_pgm_resource_registers")
795 Header.compute_pgm_resource_registers = Value;
796 else if (ID == "enable_sgpr_private_segment_buffer")
797 Header.code_properties |=
798 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
799 else if (ID == "enable_sgpr_dispatch_ptr")
800 Header.code_properties |=
801 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
802 else if (ID == "enable_sgpr_queue_ptr")
803 Header.code_properties |=
804 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
805 else if (ID == "enable_sgpr_kernarg_segment_ptr")
806 Header.code_properties |=
807 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
808 else if (ID == "enable_sgpr_dispatch_id")
809 Header.code_properties |=
810 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
811 else if (ID == "enable_sgpr_flat_scratch_init")
812 Header.code_properties |=
813 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
814 else if (ID == "enable_sgpr_private_segment_size")
815 Header.code_properties |=
816 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
817 else if (ID == "enable_sgpr_grid_workgroup_count_x")
818 Header.code_properties |=
819 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
820 else if (ID == "enable_sgpr_grid_workgroup_count_y")
821 Header.code_properties |=
822 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
823 else if (ID == "enable_sgpr_grid_workgroup_count_z")
824 Header.code_properties |=
825 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
826 else if (ID == "enable_ordered_append_gds")
827 Header.code_properties |=
828 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
829 else if (ID == "private_element_size")
830 Header.code_properties |=
831 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
832 else if (ID == "is_ptr64")
833 Header.code_properties |=
834 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
835 else if (ID == "is_dynamic_callstack")
836 Header.code_properties |=
837 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
838 else if (ID == "is_debug_enabled")
839 Header.code_properties |=
840 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
841 else if (ID == "is_xnack_enabled")
842 Header.code_properties |=
843 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
844 else if (ID == "workitem_private_segment_byte_size")
845 Header.workitem_private_segment_byte_size = Value;
846 else if (ID == "workgroup_group_segment_byte_size")
847 Header.workgroup_group_segment_byte_size = Value;
848 else if (ID == "gds_segment_byte_size")
849 Header.gds_segment_byte_size = Value;
850 else if (ID == "kernarg_segment_byte_size")
851 Header.kernarg_segment_byte_size = Value;
852 else if (ID == "workgroup_fbarrier_count")
853 Header.workgroup_fbarrier_count = Value;
854 else if (ID == "wavefront_sgpr_count")
855 Header.wavefront_sgpr_count = Value;
856 else if (ID == "workitem_vgpr_count")
857 Header.workitem_vgpr_count = Value;
858 else if (ID == "reserved_vgpr_first")
859 Header.reserved_vgpr_first = Value;
860 else if (ID == "reserved_vgpr_count")
861 Header.reserved_vgpr_count = Value;
862 else if (ID == "reserved_sgpr_first")
863 Header.reserved_sgpr_first = Value;
864 else if (ID == "reserved_sgpr_count")
865 Header.reserved_sgpr_count = Value;
866 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
867 Header.debug_wavefront_private_segment_offset_sgpr = Value;
868 else if (ID == "debug_private_segment_buffer_sgpr")
869 Header.debug_private_segment_buffer_sgpr = Value;
870 else if (ID == "kernarg_segment_alignment")
871 Header.kernarg_segment_alignment = Value;
872 else if (ID == "group_segment_alignment")
873 Header.group_segment_alignment = Value;
874 else if (ID == "private_segment_alignment")
875 Header.private_segment_alignment = Value;
876 else if (ID == "wavefront_size")
877 Header.wavefront_size = Value;
878 else if (ID == "call_convention")
879 Header.call_convention = Value;
880 else if (ID == "runtime_loader_kernel_symbol")
881 Header.runtime_loader_kernel_symbol = Value;
882 else
883 return TokError("amd_kernel_code_t value not recognized.");
884
885 return false;
886}
887
888bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
889
890 amd_kernel_code_t Header;
891 AMDGPU::initDefaultAMDKernelCodeT(Header, STI.getFeatureBits());
892
893 while (true) {
894
895 if (getLexer().isNot(AsmToken::EndOfStatement))
896 return TokError("amd_kernel_code_t values must begin on a new line");
897
898 // Lex EndOfStatement. This is in a while loop, because lexing a comment
899 // will set the current token to EndOfStatement.
900 while(getLexer().is(AsmToken::EndOfStatement))
901 Lex();
902
903 if (getLexer().isNot(AsmToken::Identifier))
904 return TokError("expected value identifier or .end_amd_kernel_code_t");
905
906 StringRef ID = getLexer().getTok().getIdentifier();
907 Lex();
908
909 if (ID == ".end_amd_kernel_code_t")
910 break;
911
912 if (ParseAMDKernelCodeTValue(ID, Header))
913 return true;
914 }
915
916 getTargetStreamer().EmitAMDKernelCodeT(Header);
917
918 return false;
919}
920
Tom Stellarde135ffd2015-09-25 21:41:28 +0000921bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
922 getParser().getStreamer().SwitchSection(
923 AMDGPU::getHSATextSection(getContext()));
924 return false;
925}
926
Tom Stellard45bb48e2015-06-13 03:28:10 +0000927bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000928 StringRef IDVal = DirectiveID.getString();
929
930 if (IDVal == ".hsa_code_object_version")
931 return ParseDirectiveHSACodeObjectVersion();
932
933 if (IDVal == ".hsa_code_object_isa")
934 return ParseDirectiveHSACodeObjectISA();
935
Tom Stellardff7416b2015-06-26 21:58:31 +0000936 if (IDVal == ".amd_kernel_code_t")
937 return ParseDirectiveAMDKernelCodeT();
938
Tom Stellarde135ffd2015-09-25 21:41:28 +0000939 if (IDVal == ".hsatext" || IDVal == ".text")
940 return ParseSectionDirectiveHSAText();
941
Tom Stellard45bb48e2015-06-13 03:28:10 +0000942 return true;
943}
944
945static bool operandsHaveModifiers(const OperandVector &Operands) {
946
947 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
948 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
949 if (Op.isRegKind() && Op.hasModifiers())
950 return true;
951 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
952 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
953 return true;
954 }
955 return false;
956}
957
958AMDGPUAsmParser::OperandMatchResultTy
959AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
960
961 // Try to parse with a custom parser
962 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
963
964 // If we successfully parsed the operand or if there as an error parsing,
965 // we are done.
966 //
967 // If we are parsing after we reach EndOfStatement then this means we
968 // are appending default values to the Operands list. This is only done
969 // by custom parser, so we shouldn't continue on to the generic parsing.
970 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
971 getLexer().is(AsmToken::EndOfStatement))
972 return ResTy;
973
974 bool Negate = false, Abs = false;
975 if (getLexer().getKind()== AsmToken::Minus) {
976 Parser.Lex();
977 Negate = true;
978 }
979
980 if (getLexer().getKind() == AsmToken::Pipe) {
981 Parser.Lex();
982 Abs = true;
983 }
984
985 switch(getLexer().getKind()) {
986 case AsmToken::Integer: {
987 SMLoc S = Parser.getTok().getLoc();
988 int64_t IntVal;
989 if (getParser().parseAbsoluteExpression(IntVal))
990 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +0000991 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000992 Error(S, "invalid immediate: only 32-bit values are legal");
993 return MatchOperand_ParseFail;
994 }
995
Tom Stellard45bb48e2015-06-13 03:28:10 +0000996 if (Negate)
997 IntVal *= -1;
998 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
999 return MatchOperand_Success;
1000 }
1001 case AsmToken::Real: {
1002 // FIXME: We should emit an error if a double precisions floating-point
1003 // value is used. I'm not sure the best way to detect this.
1004 SMLoc S = Parser.getTok().getLoc();
1005 int64_t IntVal;
1006 if (getParser().parseAbsoluteExpression(IntVal))
1007 return MatchOperand_ParseFail;
1008
1009 APFloat F((float)BitsToDouble(IntVal));
1010 if (Negate)
1011 F.changeSign();
1012 Operands.push_back(
1013 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1014 return MatchOperand_Success;
1015 }
1016 case AsmToken::Identifier: {
1017 SMLoc S, E;
1018 unsigned RegNo;
1019 if (!ParseRegister(RegNo, S, E)) {
1020
1021 bool HasModifiers = operandsHaveModifiers(Operands);
1022 unsigned Modifiers = 0;
1023
1024 if (Negate)
1025 Modifiers |= 0x1;
1026
1027 if (Abs) {
1028 if (getLexer().getKind() != AsmToken::Pipe)
1029 return MatchOperand_ParseFail;
1030 Parser.Lex();
1031 Modifiers |= 0x2;
1032 }
1033
1034 if (Modifiers && !HasModifiers) {
1035 // We are adding a modifier to src1 or src2 and previous sources
1036 // don't have modifiers, so we need to go back and empty modifers
1037 // for each previous source.
1038 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
1039 --PrevRegIdx) {
1040
1041 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
1042 RegOp.setModifiers(0);
1043 }
1044 }
1045
1046
1047 Operands.push_back(AMDGPUOperand::CreateReg(
1048 RegNo, S, E, getContext().getRegisterInfo(),
1049 isForcedVOP3()));
1050
1051 if (HasModifiers || Modifiers) {
1052 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1053 RegOp.setModifiers(Modifiers);
1054
1055 }
1056 } else {
1057 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1058 S));
1059 Parser.Lex();
1060 }
1061 return MatchOperand_Success;
1062 }
1063 default:
1064 return MatchOperand_NoMatch;
1065 }
1066}
1067
1068bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1069 StringRef Name,
1070 SMLoc NameLoc, OperandVector &Operands) {
1071
1072 // Clear any forced encodings from the previous instruction.
1073 setForcedEncodingSize(0);
1074
1075 if (Name.endswith("_e64"))
1076 setForcedEncodingSize(64);
1077 else if (Name.endswith("_e32"))
1078 setForcedEncodingSize(32);
1079
1080 // Add the instruction mnemonic
1081 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1082
1083 while (!getLexer().is(AsmToken::EndOfStatement)) {
1084 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1085
1086 // Eat the comma or space if there is one.
1087 if (getLexer().is(AsmToken::Comma))
1088 Parser.Lex();
1089
1090 switch (Res) {
1091 case MatchOperand_Success: break;
1092 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1093 "failed parsing operand.");
1094 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1095 "not a valid operand.");
1096 }
1097 }
1098
1099 // Once we reach end of statement, continue parsing so we can add default
1100 // values for optional arguments.
1101 AMDGPUAsmParser::OperandMatchResultTy Res;
1102 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
1103 if (Res != MatchOperand_Success)
1104 return Error(getLexer().getLoc(), "failed parsing operand.");
1105 }
1106 return false;
1107}
1108
1109//===----------------------------------------------------------------------===//
1110// Utility functions
1111//===----------------------------------------------------------------------===//
1112
1113AMDGPUAsmParser::OperandMatchResultTy
1114AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1115 int64_t Default) {
1116
1117 // We are at the end of the statement, and this is a default argument, so
1118 // use a default value.
1119 if (getLexer().is(AsmToken::EndOfStatement)) {
1120 Int = Default;
1121 return MatchOperand_Success;
1122 }
1123
1124 switch(getLexer().getKind()) {
1125 default: return MatchOperand_NoMatch;
1126 case AsmToken::Identifier: {
1127 StringRef OffsetName = Parser.getTok().getString();
1128 if (!OffsetName.equals(Prefix))
1129 return MatchOperand_NoMatch;
1130
1131 Parser.Lex();
1132 if (getLexer().isNot(AsmToken::Colon))
1133 return MatchOperand_ParseFail;
1134
1135 Parser.Lex();
1136 if (getLexer().isNot(AsmToken::Integer))
1137 return MatchOperand_ParseFail;
1138
1139 if (getParser().parseAbsoluteExpression(Int))
1140 return MatchOperand_ParseFail;
1141 break;
1142 }
1143 }
1144 return MatchOperand_Success;
1145}
1146
1147AMDGPUAsmParser::OperandMatchResultTy
1148AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1149 enum AMDGPUOperand::ImmTy ImmTy) {
1150
1151 SMLoc S = Parser.getTok().getLoc();
1152 int64_t Offset = 0;
1153
1154 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1155 if (Res != MatchOperand_Success)
1156 return Res;
1157
1158 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1159 return MatchOperand_Success;
1160}
1161
1162AMDGPUAsmParser::OperandMatchResultTy
1163AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1164 enum AMDGPUOperand::ImmTy ImmTy) {
1165 int64_t Bit = 0;
1166 SMLoc S = Parser.getTok().getLoc();
1167
1168 // We are at the end of the statement, and this is a default argument, so
1169 // use a default value.
1170 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1171 switch(getLexer().getKind()) {
1172 case AsmToken::Identifier: {
1173 StringRef Tok = Parser.getTok().getString();
1174 if (Tok == Name) {
1175 Bit = 1;
1176 Parser.Lex();
1177 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1178 Bit = 0;
1179 Parser.Lex();
1180 } else {
1181 return MatchOperand_NoMatch;
1182 }
1183 break;
1184 }
1185 default:
1186 return MatchOperand_NoMatch;
1187 }
1188 }
1189
1190 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1191 return MatchOperand_Success;
1192}
1193
1194static bool operandsHasOptionalOp(const OperandVector &Operands,
1195 const OptionalOperand &OOp) {
1196 for (unsigned i = 0; i < Operands.size(); i++) {
1197 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1198 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1199 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1200 return true;
1201
1202 }
1203 return false;
1204}
1205
1206AMDGPUAsmParser::OperandMatchResultTy
1207AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1208 OperandVector &Operands) {
1209 SMLoc S = Parser.getTok().getLoc();
1210 for (const OptionalOperand &Op : OptionalOps) {
1211 if (operandsHasOptionalOp(Operands, Op))
1212 continue;
1213 AMDGPUAsmParser::OperandMatchResultTy Res;
1214 int64_t Value;
1215 if (Op.IsBit) {
1216 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1217 if (Res == MatchOperand_NoMatch)
1218 continue;
1219 return Res;
1220 }
1221
1222 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1223
1224 if (Res == MatchOperand_NoMatch)
1225 continue;
1226
1227 if (Res != MatchOperand_Success)
1228 return Res;
1229
1230 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1231 return MatchOperand_ParseFail;
1232 }
1233
1234 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1235 return MatchOperand_Success;
1236 }
1237 return MatchOperand_NoMatch;
1238}
1239
1240//===----------------------------------------------------------------------===//
1241// ds
1242//===----------------------------------------------------------------------===//
1243
1244static const OptionalOperand DSOptionalOps [] = {
1245 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1246 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1247};
1248
1249static const OptionalOperand DSOptionalOpsOff01 [] = {
1250 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1251 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1252 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1253};
1254
1255AMDGPUAsmParser::OperandMatchResultTy
1256AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1257 return parseOptionalOps(DSOptionalOps, Operands);
1258}
1259AMDGPUAsmParser::OperandMatchResultTy
1260AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1261 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1262}
1263
1264AMDGPUAsmParser::OperandMatchResultTy
1265AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1266 SMLoc S = Parser.getTok().getLoc();
1267 AMDGPUAsmParser::OperandMatchResultTy Res =
1268 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1269 if (Res == MatchOperand_NoMatch) {
1270 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1271 AMDGPUOperand::ImmTyOffset));
1272 Res = MatchOperand_Success;
1273 }
1274 return Res;
1275}
1276
1277bool AMDGPUOperand::isDSOffset() const {
1278 return isImm() && isUInt<16>(getImm());
1279}
1280
1281bool AMDGPUOperand::isDSOffset01() const {
1282 return isImm() && isUInt<8>(getImm());
1283}
1284
1285void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1286 const OperandVector &Operands) {
1287
1288 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1289
1290 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1291 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1292
1293 // Add the register arguments
1294 if (Op.isReg()) {
1295 Op.addRegOperands(Inst, 1);
1296 continue;
1297 }
1298
1299 // Handle optional arguments
1300 OptionalIdx[Op.getImmTy()] = i;
1301 }
1302
1303 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1304 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1305 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1306
1307 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1308 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1309 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1310 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1311}
1312
1313void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1314
1315 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1316 bool GDSOnly = false;
1317
1318 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1319 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1320
1321 // Add the register arguments
1322 if (Op.isReg()) {
1323 Op.addRegOperands(Inst, 1);
1324 continue;
1325 }
1326
1327 if (Op.isToken() && Op.getToken() == "gds") {
1328 GDSOnly = true;
1329 continue;
1330 }
1331
1332 // Handle optional arguments
1333 OptionalIdx[Op.getImmTy()] = i;
1334 }
1335
1336 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1337 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
1338
1339 if (!GDSOnly) {
1340 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1341 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1342 }
1343 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1344}
1345
1346
1347//===----------------------------------------------------------------------===//
1348// s_waitcnt
1349//===----------------------------------------------------------------------===//
1350
1351bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1352 StringRef CntName = Parser.getTok().getString();
1353 int64_t CntVal;
1354
1355 Parser.Lex();
1356 if (getLexer().isNot(AsmToken::LParen))
1357 return true;
1358
1359 Parser.Lex();
1360 if (getLexer().isNot(AsmToken::Integer))
1361 return true;
1362
1363 if (getParser().parseAbsoluteExpression(CntVal))
1364 return true;
1365
1366 if (getLexer().isNot(AsmToken::RParen))
1367 return true;
1368
1369 Parser.Lex();
1370 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1371 Parser.Lex();
1372
1373 int CntShift;
1374 int CntMask;
1375
1376 if (CntName == "vmcnt") {
1377 CntMask = 0xf;
1378 CntShift = 0;
1379 } else if (CntName == "expcnt") {
1380 CntMask = 0x7;
1381 CntShift = 4;
1382 } else if (CntName == "lgkmcnt") {
1383 CntMask = 0x7;
1384 CntShift = 8;
1385 } else {
1386 return true;
1387 }
1388
1389 IntVal &= ~(CntMask << CntShift);
1390 IntVal |= (CntVal << CntShift);
1391 return false;
1392}
1393
1394AMDGPUAsmParser::OperandMatchResultTy
1395AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1396 // Disable all counters by default.
1397 // vmcnt [3:0]
1398 // expcnt [6:4]
1399 // lgkmcnt [10:8]
1400 int64_t CntVal = 0x77f;
1401 SMLoc S = Parser.getTok().getLoc();
1402
1403 switch(getLexer().getKind()) {
1404 default: return MatchOperand_ParseFail;
1405 case AsmToken::Integer:
1406 // The operand can be an integer value.
1407 if (getParser().parseAbsoluteExpression(CntVal))
1408 return MatchOperand_ParseFail;
1409 break;
1410
1411 case AsmToken::Identifier:
1412 do {
1413 if (parseCnt(CntVal))
1414 return MatchOperand_ParseFail;
1415 } while(getLexer().isNot(AsmToken::EndOfStatement));
1416 break;
1417 }
1418 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1419 return MatchOperand_Success;
1420}
1421
1422bool AMDGPUOperand::isSWaitCnt() const {
1423 return isImm();
1424}
1425
1426//===----------------------------------------------------------------------===//
1427// sopp branch targets
1428//===----------------------------------------------------------------------===//
1429
1430AMDGPUAsmParser::OperandMatchResultTy
1431AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1432 SMLoc S = Parser.getTok().getLoc();
1433
1434 switch (getLexer().getKind()) {
1435 default: return MatchOperand_ParseFail;
1436 case AsmToken::Integer: {
1437 int64_t Imm;
1438 if (getParser().parseAbsoluteExpression(Imm))
1439 return MatchOperand_ParseFail;
1440 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1441 return MatchOperand_Success;
1442 }
1443
1444 case AsmToken::Identifier:
1445 Operands.push_back(AMDGPUOperand::CreateExpr(
1446 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1447 Parser.getTok().getString()), getContext()), S));
1448 Parser.Lex();
1449 return MatchOperand_Success;
1450 }
1451}
1452
1453//===----------------------------------------------------------------------===//
1454// flat
1455//===----------------------------------------------------------------------===//
1456
1457static const OptionalOperand FlatOptionalOps [] = {
1458 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1459 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1460 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1461};
1462
1463static const OptionalOperand FlatAtomicOptionalOps [] = {
1464 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1465 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1466};
1467
1468AMDGPUAsmParser::OperandMatchResultTy
1469AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1470 return parseOptionalOps(FlatOptionalOps, Operands);
1471}
1472
1473AMDGPUAsmParser::OperandMatchResultTy
1474AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1475 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1476}
1477
1478void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1479 const OperandVector &Operands) {
1480 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1481
1482 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1483 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1484
1485 // Add the register arguments
1486 if (Op.isReg()) {
1487 Op.addRegOperands(Inst, 1);
1488 continue;
1489 }
1490
1491 // Handle 'glc' token which is sometimes hard-coded into the
1492 // asm string. There are no MCInst operands for these.
1493 if (Op.isToken())
1494 continue;
1495
1496 // Handle optional arguments
1497 OptionalIdx[Op.getImmTy()] = i;
1498
1499 }
1500
1501 // flat atomic instructions don't have a glc argument.
1502 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1503 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1504 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1505 }
1506
1507 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1508 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1509
1510 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1511 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1512}
1513
1514//===----------------------------------------------------------------------===//
1515// mubuf
1516//===----------------------------------------------------------------------===//
1517
1518static const OptionalOperand MubufOptionalOps [] = {
1519 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1520 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1521 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1522 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1523};
1524
1525AMDGPUAsmParser::OperandMatchResultTy
1526AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1527 return parseOptionalOps(MubufOptionalOps, Operands);
1528}
1529
1530AMDGPUAsmParser::OperandMatchResultTy
1531AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1532 return parseIntWithPrefix("offset", Operands);
1533}
1534
1535AMDGPUAsmParser::OperandMatchResultTy
1536AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1537 return parseNamedBit("glc", Operands);
1538}
1539
1540AMDGPUAsmParser::OperandMatchResultTy
1541AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1542 return parseNamedBit("slc", Operands);
1543}
1544
1545AMDGPUAsmParser::OperandMatchResultTy
1546AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1547 return parseNamedBit("tfe", Operands);
1548}
1549
1550bool AMDGPUOperand::isMubufOffset() const {
1551 return isImm() && isUInt<12>(getImm());
1552}
1553
1554void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1555 const OperandVector &Operands) {
1556 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1557
1558 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1559 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1560
1561 // Add the register arguments
1562 if (Op.isReg()) {
1563 Op.addRegOperands(Inst, 1);
1564 continue;
1565 }
1566
1567 // Handle the case where soffset is an immediate
1568 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1569 Op.addImmOperands(Inst, 1);
1570 continue;
1571 }
1572
1573 // Handle tokens like 'offen' which are sometimes hard-coded into the
1574 // asm string. There are no MCInst operands for these.
1575 if (Op.isToken()) {
1576 continue;
1577 }
1578 assert(Op.isImm());
1579
1580 // Handle optional arguments
1581 OptionalIdx[Op.getImmTy()] = i;
1582 }
1583
1584 assert(OptionalIdx.size() == 4);
1585
1586 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1587 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1588 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1589 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1590
1591 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1592 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1593 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1594 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1595}
1596
1597//===----------------------------------------------------------------------===//
1598// mimg
1599//===----------------------------------------------------------------------===//
1600
1601AMDGPUAsmParser::OperandMatchResultTy
1602AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1603 return parseIntWithPrefix("dmask", Operands);
1604}
1605
1606AMDGPUAsmParser::OperandMatchResultTy
1607AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1608 return parseNamedBit("unorm", Operands);
1609}
1610
1611AMDGPUAsmParser::OperandMatchResultTy
1612AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1613 return parseNamedBit("r128", Operands);
1614}
1615
1616//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001617// smrd
1618//===----------------------------------------------------------------------===//
1619
1620bool AMDGPUOperand::isSMRDOffset() const {
1621
1622 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1623 // information here.
1624 return isImm() && isUInt<8>(getImm());
1625}
1626
1627bool AMDGPUOperand::isSMRDLiteralOffset() const {
1628 // 32-bit literals are only supported on CI and we only want to use them
1629 // when the offset is > 8-bits.
1630 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1631}
1632
1633//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001634// vop3
1635//===----------------------------------------------------------------------===//
1636
1637static bool ConvertOmodMul(int64_t &Mul) {
1638 if (Mul != 1 && Mul != 2 && Mul != 4)
1639 return false;
1640
1641 Mul >>= 1;
1642 return true;
1643}
1644
1645static bool ConvertOmodDiv(int64_t &Div) {
1646 if (Div == 1) {
1647 Div = 0;
1648 return true;
1649 }
1650
1651 if (Div == 2) {
1652 Div = 3;
1653 return true;
1654 }
1655
1656 return false;
1657}
1658
1659static const OptionalOperand VOP3OptionalOps [] = {
1660 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1661 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1662 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1663};
1664
1665static bool isVOP3(OperandVector &Operands) {
1666 if (operandsHaveModifiers(Operands))
1667 return true;
1668
1669 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1670
1671 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1672 return true;
1673
1674 if (Operands.size() >= 5)
1675 return true;
1676
1677 if (Operands.size() > 3) {
1678 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1679 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1680 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1681 return true;
1682 }
1683 return false;
1684}
1685
1686AMDGPUAsmParser::OperandMatchResultTy
1687AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1688
1689 // The value returned by this function may change after parsing
1690 // an operand so store the original value here.
1691 bool HasModifiers = operandsHaveModifiers(Operands);
1692
1693 bool IsVOP3 = isVOP3(Operands);
1694 if (HasModifiers || IsVOP3 ||
1695 getLexer().isNot(AsmToken::EndOfStatement) ||
1696 getForcedEncodingSize() == 64) {
1697
1698 AMDGPUAsmParser::OperandMatchResultTy Res =
1699 parseOptionalOps(VOP3OptionalOps, Operands);
1700
1701 if (!HasModifiers && Res == MatchOperand_Success) {
1702 // We have added a modifier operation, so we need to make sure all
1703 // previous register operands have modifiers
1704 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1705 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1706 if (Op.isReg())
1707 Op.setModifiers(0);
1708 }
1709 }
1710 return Res;
1711 }
1712 return MatchOperand_NoMatch;
1713}
1714
1715void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Tom Stellard88e0b252015-10-06 15:57:53 +00001716
1717 unsigned i = 1;
1718 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1719 if (Desc.getNumDefs() > 0) {
1720 ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
1721 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001722
1723 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1724
1725 if (operandsHaveModifiers(Operands)) {
1726 for (unsigned e = Operands.size(); i != e; ++i) {
1727 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1728
1729 if (Op.isRegWithInputMods()) {
1730 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1731 continue;
1732 }
1733 OptionalIdx[Op.getImmTy()] = i;
1734 }
1735
1736 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1737 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1738
1739 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1740 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1741 } else {
1742 for (unsigned e = Operands.size(); i != e; ++i)
1743 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1744 }
1745}
1746
1747/// Force static initialization.
1748extern "C" void LLVMInitializeAMDGPUAsmParser() {
1749 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1750 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1751}
1752
1753#define GET_REGISTER_MATCHER
1754#define GET_MATCHER_IMPLEMENTATION
1755#include "AMDGPUGenAsmMatcher.inc"
1756