blob: 930aed1c38f91aeaf7b99bbf8e065d62077ed0b9 [file] [log] [blame]
Tom Stellard9d7ddd52014-11-14 14:08:00 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellardd7e6f132015-04-08 01:09:26 +000011#include "SIDefines.h"
12#include "llvm/ADT/APFloat.h"
Tom Stellard9d7ddd52014-11-14 14:08:00 +000013#include "llvm/ADT/SmallString.h"
14#include "llvm/ADT/SmallVector.h"
15#include "llvm/ADT/STLExtras.h"
16#include "llvm/ADT/StringSwitch.h"
17#include "llvm/ADT/Twine.h"
18#include "llvm/MC/MCContext.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrInfo.h"
22#include "llvm/MC/MCParser/MCAsmLexer.h"
23#include "llvm/MC/MCParser/MCAsmParser.h"
24#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
25#include "llvm/MC/MCRegisterInfo.h"
26#include "llvm/MC/MCStreamer.h"
27#include "llvm/MC/MCSubtargetInfo.h"
28#include "llvm/MC/MCTargetAsmParser.h"
29#include "llvm/Support/SourceMgr.h"
30#include "llvm/Support/TargetRegistry.h"
31#include "llvm/Support/raw_ostream.h"
Tom Stellardd7e6f132015-04-08 01:09:26 +000032#include "llvm/Support/Debug.h"
Tom Stellard9d7ddd52014-11-14 14:08:00 +000033
34using namespace llvm;
35
36namespace {
37
Tom Stellardd7e6f132015-04-08 01:09:26 +000038struct OptionalOperand;
39
40class AMDGPUOperand : public MCParsedAsmOperand {
41 enum KindTy {
42 Token,
43 Immediate,
44 Register,
45 Expression
46 } Kind;
47
48 SMLoc StartLoc, EndLoc;
49
50public:
51 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
52
53 MCContext *Ctx;
54
55 enum ImmTy {
56 ImmTyNone,
57 ImmTyDSOffset0,
58 ImmTyDSOffset1,
59 ImmTyGDS,
60 ImmTyOffset,
61 ImmTyGLC,
62 ImmTySLC,
63 ImmTyTFE,
64 ImmTyClamp,
65 ImmTyOMod
66 };
67
68 struct TokOp {
69 const char *Data;
70 unsigned Length;
71 };
72
73 struct ImmOp {
74 bool IsFPImm;
75 ImmTy Type;
76 int64_t Val;
77 };
78
79 struct RegOp {
80 unsigned RegNo;
81 int Modifiers;
82 const MCRegisterInfo *TRI;
Tom Stellard7130ef42015-04-23 19:33:48 +000083 bool IsForcedVOP3;
Tom Stellardd7e6f132015-04-08 01:09:26 +000084 };
85
86 union {
87 TokOp Tok;
88 ImmOp Imm;
89 RegOp Reg;
90 const MCExpr *Expr;
91 };
92
93 void addImmOperands(MCInst &Inst, unsigned N) const {
Jim Grosbache9119e42015-05-13 18:37:00 +000094 Inst.addOperand(MCOperand::createImm(getImm()));
Tom Stellardd7e6f132015-04-08 01:09:26 +000095 }
96
97 StringRef getToken() const {
98 return StringRef(Tok.Data, Tok.Length);
99 }
100
101 void addRegOperands(MCInst &Inst, unsigned N) const {
Jim Grosbache9119e42015-05-13 18:37:00 +0000102 Inst.addOperand(MCOperand::createReg(getReg()));
Tom Stellardd7e6f132015-04-08 01:09:26 +0000103 }
104
105 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
106 if (isReg())
107 addRegOperands(Inst, N);
108 else
109 addImmOperands(Inst, N);
110 }
111
112 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
Jim Grosbache9119e42015-05-13 18:37:00 +0000113 Inst.addOperand(MCOperand::createImm(
Tom Stellard7130ef42015-04-23 19:33:48 +0000114 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
Tom Stellardd7e6f132015-04-08 01:09:26 +0000115 addRegOperands(Inst, N);
116 }
117
118 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
119 if (isImm())
120 addImmOperands(Inst, N);
121 else {
122 assert(isExpr());
Jim Grosbache9119e42015-05-13 18:37:00 +0000123 Inst.addOperand(MCOperand::createExpr(Expr));
Tom Stellardd7e6f132015-04-08 01:09:26 +0000124 }
125 }
126
127 bool defaultTokenHasSuffix() const {
128 StringRef Token(Tok.Data, Tok.Length);
129
130 return Token.endswith("_e32") || Token.endswith("_e64");
131 }
132
133 bool isToken() const override {
134 return Kind == Token;
135 }
136
137 bool isImm() const override {
138 return Kind == Immediate;
139 }
140
141 bool isInlineImm() const {
142 float F = BitsToFloat(Imm.Val);
143 // TODO: Add 0.5pi for VI
144 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
145 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
146 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
147 }
148
149 bool isDSOffset0() const {
150 assert(isImm());
151 return Imm.Type == ImmTyDSOffset0;
152 }
153
154 bool isDSOffset1() const {
155 assert(isImm());
156 return Imm.Type == ImmTyDSOffset1;
157 }
158
159 int64_t getImm() const {
160 return Imm.Val;
161 }
162
163 enum ImmTy getImmTy() const {
164 assert(isImm());
165 return Imm.Type;
166 }
167
Tom Stellard7130ef42015-04-23 19:33:48 +0000168 bool isRegKind() const {
169 return Kind == Register;
170 }
171
Tom Stellardd7e6f132015-04-08 01:09:26 +0000172 bool isReg() const override {
173 return Kind == Register && Reg.Modifiers == -1;
174 }
175
176 bool isRegWithInputMods() const {
Tom Stellard7130ef42015-04-23 19:33:48 +0000177 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
Tom Stellardd7e6f132015-04-08 01:09:26 +0000178 }
179
180 void setModifiers(unsigned Mods) {
181 assert(isReg());
182 Reg.Modifiers = Mods;
183 }
184
Tom Stellard7130ef42015-04-23 19:33:48 +0000185 bool hasModifiers() const {
186 assert(isRegKind());
187 return Reg.Modifiers != -1;
188 }
189
Tom Stellardd7e6f132015-04-08 01:09:26 +0000190 unsigned getReg() const override {
191 return Reg.RegNo;
192 }
193
194 bool isRegOrImm() const {
195 return isReg() || isImm();
196 }
197
198 bool isRegClass(unsigned RCID) const {
199 return Reg.TRI->getRegClass(RCID).contains(getReg());
200 }
201
202 bool isSCSrc32() const {
203 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
204 }
205
206 bool isSSrc32() const {
207 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
208 }
209
210 bool isSSrc64() const {
211 return isImm() || isInlineImm() ||
212 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
213 }
214
215 bool isVCSrc32() const {
216 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
217 }
218
219 bool isVCSrc64() const {
220 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
221 }
222
223 bool isVSrc32() const {
224 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
225 }
226
227 bool isVSrc64() const {
228 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
229 }
230
231 bool isMem() const override {
232 return false;
233 }
234
235 bool isExpr() const {
236 return Kind == Expression;
237 }
238
239 bool isSoppBrTarget() const {
240 return isExpr() || isImm();
241 }
242
243 SMLoc getStartLoc() const override {
244 return StartLoc;
245 }
246
247 SMLoc getEndLoc() const override {
248 return EndLoc;
249 }
250
251 void print(raw_ostream &OS) const override { }
252
253 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
254 enum ImmTy Type = ImmTyNone,
255 bool IsFPImm = false) {
256 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
257 Op->Imm.Val = Val;
258 Op->Imm.IsFPImm = IsFPImm;
259 Op->Imm.Type = Type;
260 Op->StartLoc = Loc;
261 Op->EndLoc = Loc;
262 return Op;
263 }
264
265 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
266 bool HasExplicitEncodingSize = true) {
267 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
268 Res->Tok.Data = Str.data();
269 Res->Tok.Length = Str.size();
270 Res->StartLoc = Loc;
271 Res->EndLoc = Loc;
272 return Res;
273 }
274
275 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
276 SMLoc E,
Tom Stellard7130ef42015-04-23 19:33:48 +0000277 const MCRegisterInfo *TRI,
278 bool ForceVOP3) {
Tom Stellardd7e6f132015-04-08 01:09:26 +0000279 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
280 Op->Reg.RegNo = RegNo;
281 Op->Reg.TRI = TRI;
282 Op->Reg.Modifiers = -1;
Tom Stellard7130ef42015-04-23 19:33:48 +0000283 Op->Reg.IsForcedVOP3 = ForceVOP3;
Tom Stellardd7e6f132015-04-08 01:09:26 +0000284 Op->StartLoc = S;
285 Op->EndLoc = E;
286 return Op;
287 }
288
289 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
290 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
291 Op->Expr = Expr;
292 Op->StartLoc = S;
293 Op->EndLoc = S;
294 return Op;
295 }
296
297 bool isDSOffset() const;
298 bool isDSOffset01() const;
299 bool isSWaitCnt() const;
300 bool isMubufOffset() const;
301};
302
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000303class AMDGPUAsmParser : public MCTargetAsmParser {
304 MCSubtargetInfo &STI;
Tom Stellardd7e6f132015-04-08 01:09:26 +0000305 const MCInstrInfo &MII;
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000306 MCAsmParser &Parser;
307
Tom Stellardd7e6f132015-04-08 01:09:26 +0000308 unsigned ForcedEncodingSize;
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000309 /// @name Auto-generated Match Functions
310 /// {
311
312#define GET_ASSEMBLER_HEADER
313#include "AMDGPUGenAsmMatcher.inc"
314
315 /// }
316
317public:
Tom Stellardd7e6f132015-04-08 01:09:26 +0000318 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
319 const MCInstrInfo &MII,
320 const MCTargetOptions &Options)
321 : MCTargetAsmParser(), STI(STI), MII(MII), Parser(_Parser),
322 ForcedEncodingSize(0){
323
Michael Kupersteinc3434b32015-05-13 10:28:46 +0000324 if (!STI.getFeatureBits()) {
Tom Stellardd7e6f132015-04-08 01:09:26 +0000325 // Set default features.
326 STI.ToggleFeature("SOUTHERN_ISLANDS");
327 }
328
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000329 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
330 }
Tom Stellardd7e6f132015-04-08 01:09:26 +0000331
332 unsigned getForcedEncodingSize() const {
333 return ForcedEncodingSize;
334 }
335
336 void setForcedEncodingSize(unsigned Size) {
337 ForcedEncodingSize = Size;
338 }
339
Tom Stellard7130ef42015-04-23 19:33:48 +0000340 bool isForcedVOP3() const {
341 return ForcedEncodingSize == 64;
342 }
343
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000344 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
Tom Stellardd7e6f132015-04-08 01:09:26 +0000345 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000346 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
347 OperandVector &Operands, MCStreamer &Out,
348 uint64_t &ErrorInfo,
349 bool MatchingInlineAsm) override;
350 bool ParseDirective(AsmToken DirectiveID) override;
351 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
352 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
353 SMLoc NameLoc, OperandVector &Operands) override;
354
Tom Stellardd7e6f132015-04-08 01:09:26 +0000355 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
356 int64_t Default = 0);
357 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
358 OperandVector &Operands,
359 enum AMDGPUOperand::ImmTy ImmTy =
360 AMDGPUOperand::ImmTyNone);
361 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
362 enum AMDGPUOperand::ImmTy ImmTy =
363 AMDGPUOperand::ImmTyNone);
364 OperandMatchResultTy parseOptionalOps(
365 const ArrayRef<OptionalOperand> &OptionalOps,
366 OperandVector &Operands);
367
368
369 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
370 void cvtDS(MCInst &Inst, const OperandVector &Operands);
371 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
372 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
373 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
374
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000375 bool parseCnt(int64_t &IntVal);
376 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Tom Stellardd7e6f132015-04-08 01:09:26 +0000377 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
378
379 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
380 OperandMatchResultTy parseOffset(OperandVector &Operands);
381 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
382 OperandMatchResultTy parseGLC(OperandVector &Operands);
383 OperandMatchResultTy parseSLC(OperandVector &Operands);
384 OperandMatchResultTy parseTFE(OperandVector &Operands);
385
386 OperandMatchResultTy parseDMask(OperandVector &Operands);
387 OperandMatchResultTy parseUNorm(OperandVector &Operands);
388 OperandMatchResultTy parseR128(OperandVector &Operands);
389
390 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
391 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000392};
393
Tom Stellardd7e6f132015-04-08 01:09:26 +0000394struct OptionalOperand {
395 const char *Name;
396 AMDGPUOperand::ImmTy Type;
397 bool IsBit;
398 int64_t Default;
399 bool (*ConvertResult)(int64_t&);
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000400};
401
402}
403
Tom Stellardd7e6f132015-04-08 01:09:26 +0000404static unsigned getRegClass(bool IsVgpr, unsigned RegWidth) {
405 if (IsVgpr) {
406 switch (RegWidth) {
407 default: llvm_unreachable("Unknown register width");
408 case 1: return AMDGPU::VGPR_32RegClassID;
409 case 2: return AMDGPU::VReg_64RegClassID;
410 case 3: return AMDGPU::VReg_96RegClassID;
411 case 4: return AMDGPU::VReg_128RegClassID;
412 case 8: return AMDGPU::VReg_256RegClassID;
413 case 16: return AMDGPU::VReg_512RegClassID;
414 }
415 }
416
417 switch (RegWidth) {
418 default: llvm_unreachable("Unknown register width");
419 case 1: return AMDGPU::SGPR_32RegClassID;
420 case 2: return AMDGPU::SGPR_64RegClassID;
421 case 4: return AMDGPU::SReg_128RegClassID;
422 case 8: return AMDGPU::SReg_256RegClassID;
423 case 16: return AMDGPU::SReg_512RegClassID;
424 }
425}
426
427static unsigned getRegForName(const StringRef &RegName) {
428
429 return StringSwitch<unsigned>(RegName)
430 .Case("exec", AMDGPU::EXEC)
431 .Case("vcc", AMDGPU::VCC)
432 .Case("flat_scr", AMDGPU::FLAT_SCR)
433 .Case("m0", AMDGPU::M0)
434 .Case("scc", AMDGPU::SCC)
435 .Case("flat_scr_lo", AMDGPU::FLAT_SCR_LO)
436 .Case("flat_scr_hi", AMDGPU::FLAT_SCR_HI)
437 .Case("vcc_lo", AMDGPU::VCC_LO)
438 .Case("vcc_hi", AMDGPU::VCC_HI)
439 .Case("exec_lo", AMDGPU::EXEC_LO)
440 .Case("exec_hi", AMDGPU::EXEC_HI)
441 .Default(0);
442}
443
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000444bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Tom Stellardd7e6f132015-04-08 01:09:26 +0000445 const AsmToken Tok = Parser.getTok();
446 StartLoc = Tok.getLoc();
447 EndLoc = Tok.getEndLoc();
448 const StringRef &RegName = Tok.getString();
449 RegNo = getRegForName(RegName);
450
451 if (RegNo) {
452 Parser.Lex();
453 return false;
454 }
455
456 // Match vgprs and sgprs
457 if (RegName[0] != 's' && RegName[0] != 'v')
458 return true;
459
460 bool IsVgpr = RegName[0] == 'v';
461 unsigned RegWidth;
462 unsigned RegIndexInClass;
463 if (RegName.size() > 1) {
464 // We have a 32-bit register
465 RegWidth = 1;
466 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
467 return true;
468 Parser.Lex();
469 } else {
470 // We have a register greater than 32-bits.
471
472 int64_t RegLo, RegHi;
473 Parser.Lex();
474 if (getLexer().isNot(AsmToken::LBrac))
475 return true;
476
477 Parser.Lex();
478 if (getParser().parseAbsoluteExpression(RegLo))
479 return true;
480
481 if (getLexer().isNot(AsmToken::Colon))
482 return true;
483
484 Parser.Lex();
485 if (getParser().parseAbsoluteExpression(RegHi))
486 return true;
487
488 if (getLexer().isNot(AsmToken::RBrac))
489 return true;
490
491 Parser.Lex();
492 RegWidth = (RegHi - RegLo) + 1;
493 if (IsVgpr) {
494 // VGPR registers aren't aligned.
495 RegIndexInClass = RegLo;
496 } else {
497 // SGPR registers are aligned. Max alignment is 4 dwords.
498 RegIndexInClass = RegLo / std::min(RegWidth, 4u);
499 }
500 }
501
502 const MCRegisterInfo *TRC = getContext().getRegisterInfo();
503 unsigned RC = getRegClass(IsVgpr, RegWidth);
504 if (RegIndexInClass > TRC->getRegClass(RC).getNumRegs())
505 return true;
506 RegNo = TRC->getRegClass(RC).getRegister(RegIndexInClass);
507 return false;
508}
509
510unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
511
512 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
513
514 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
515 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
516 return Match_InvalidOperand;
517
518 return Match_Success;
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000519}
520
521
522bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
523 OperandVector &Operands,
524 MCStreamer &Out,
525 uint64_t &ErrorInfo,
526 bool MatchingInlineAsm) {
527 MCInst Inst;
528
529 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellardd7e6f132015-04-08 01:09:26 +0000530 default: break;
531 case Match_Success:
532 Inst.setLoc(IDLoc);
533 Out.EmitInstruction(Inst, STI);
534 return false;
535 case Match_MissingFeature:
Tom Stellard21cce292015-04-23 19:33:51 +0000536 return Error(IDLoc, "instruction not supported on this GPU");
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000537
Tom Stellardd7e6f132015-04-08 01:09:26 +0000538 case Match_MnemonicFail:
539 return Error(IDLoc, "unrecognized instruction mnemonic");
540
541 case Match_InvalidOperand: {
542 SMLoc ErrorLoc = IDLoc;
543 if (ErrorInfo != ~0ULL) {
544 if (ErrorInfo >= Operands.size()) {
Tom Stellard7130ef42015-04-23 19:33:48 +0000545 if (isForcedVOP3()) {
546 // If 64-bit encoding has been forced we can end up with no
547 // clamp or omod operands if none of the registers have modifiers,
548 // so we need to add these to the operand list.
549 AMDGPUOperand &LastOp =
550 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
551 if (LastOp.isRegKind() ||
552 (LastOp.isImm() &&
553 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
554 SMLoc S = Parser.getTok().getLoc();
555 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
556 AMDGPUOperand::ImmTyClamp));
557 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
558 AMDGPUOperand::ImmTyOMod));
559 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
560 Out, ErrorInfo,
561 MatchingInlineAsm);
562 if (!Res)
563 return Res;
564 }
565
566 }
Tom Stellardd7e6f132015-04-08 01:09:26 +0000567 return Error(IDLoc, "too few operands for instruction");
568 }
569
570 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
571 if (ErrorLoc == SMLoc())
572 ErrorLoc = IDLoc;
573 }
574 return Error(ErrorLoc, "invalid operand for instruction");
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000575 }
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000576 }
577 llvm_unreachable("Implement any new match types added!");
578}
579
580bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
581 return true;
582}
583
Tom Stellardd7e6f132015-04-08 01:09:26 +0000584static bool operandsHaveModifiers(const OperandVector &Operands) {
585
586 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
587 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellard7130ef42015-04-23 19:33:48 +0000588 if (Op.isRegKind() && Op.hasModifiers())
Tom Stellardd7e6f132015-04-08 01:09:26 +0000589 return true;
590 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
591 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
592 return true;
593 }
594 return false;
595}
596
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000597AMDGPUAsmParser::OperandMatchResultTy
598AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
599
600 // Try to parse with a custom parser
601 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
602
603 // If we successfully parsed the operand or if there as an error parsing,
604 // we are done.
Tom Stellardd7e6f132015-04-08 01:09:26 +0000605 //
606 // If we are parsing after we reach EndOfStatement then this means we
607 // are appending default values to the Operands list. This is only done
608 // by custom parser, so we shouldn't continue on to the generic parsing.
609 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
610 getLexer().is(AsmToken::EndOfStatement))
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000611 return ResTy;
612
Tom Stellardd7e6f132015-04-08 01:09:26 +0000613 bool Negate = false, Abs = false;
614 if (getLexer().getKind()== AsmToken::Minus) {
615 Parser.Lex();
616 Negate = true;
617 }
618
619 if (getLexer().getKind() == AsmToken::Pipe) {
620 Parser.Lex();
621 Abs = true;
622 }
623
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000624 switch(getLexer().getKind()) {
625 case AsmToken::Integer: {
Tom Stellardd7e6f132015-04-08 01:09:26 +0000626 SMLoc S = Parser.getTok().getLoc();
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000627 int64_t IntVal;
628 if (getParser().parseAbsoluteExpression(IntVal))
629 return MatchOperand_ParseFail;
Tom Stellardd7e6f132015-04-08 01:09:26 +0000630 APInt IntVal32(32, IntVal);
631 if (IntVal32.getSExtValue() != IntVal) {
632 Error(S, "invalid immediate: only 32-bit values are legal");
633 return MatchOperand_ParseFail;
634 }
635
636 IntVal = IntVal32.getSExtValue();
637 if (Negate)
638 IntVal *= -1;
639 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000640 return MatchOperand_Success;
641 }
Tom Stellardd7e6f132015-04-08 01:09:26 +0000642 case AsmToken::Real: {
643 // FIXME: We should emit an error if a double precisions floating-point
644 // value is used. I'm not sure the best way to detect this.
645 SMLoc S = Parser.getTok().getLoc();
646 int64_t IntVal;
647 if (getParser().parseAbsoluteExpression(IntVal))
648 return MatchOperand_ParseFail;
649
650 APFloat F((float)BitsToDouble(IntVal));
651 if (Negate)
652 F.changeSign();
653 Operands.push_back(
654 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
655 return MatchOperand_Success;
656 }
657 case AsmToken::Identifier: {
658 SMLoc S, E;
659 unsigned RegNo;
660 if (!ParseRegister(RegNo, S, E)) {
661
662 bool HasModifiers = operandsHaveModifiers(Operands);
663 unsigned Modifiers = 0;
664
665 if (Negate)
666 Modifiers |= 0x1;
667
668 if (Abs) {
669 if (getLexer().getKind() != AsmToken::Pipe)
670 return MatchOperand_ParseFail;
671 Parser.Lex();
672 Modifiers |= 0x2;
673 }
674
675 if (Modifiers && !HasModifiers) {
676 // We are adding a modifier to src1 or src2 and previous sources
677 // don't have modifiers, so we need to go back and empty modifers
678 // for each previous source.
679 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
680 --PrevRegIdx) {
681
682 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
683 RegOp.setModifiers(0);
684 }
685 }
686
687
688 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard7130ef42015-04-23 19:33:48 +0000689 RegNo, S, E, getContext().getRegisterInfo(),
690 isForcedVOP3()));
Tom Stellardd7e6f132015-04-08 01:09:26 +0000691
692 if (HasModifiers || Modifiers) {
693 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
694 RegOp.setModifiers(Modifiers);
695
696 }
697 } else {
698 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
699 S));
700 Parser.Lex();
701 }
702 return MatchOperand_Success;
703 }
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000704 default:
705 return MatchOperand_NoMatch;
706 }
707}
708
709bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
710 StringRef Name,
711 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellardd7e6f132015-04-08 01:09:26 +0000712
713 // Clear any forced encodings from the previous instruction.
714 setForcedEncodingSize(0);
715
716 if (Name.endswith("_e64"))
717 setForcedEncodingSize(64);
718 else if (Name.endswith("_e32"))
719 setForcedEncodingSize(32);
720
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000721 // Add the instruction mnemonic
722 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
723
Tom Stellardd7e6f132015-04-08 01:09:26 +0000724 while (!getLexer().is(AsmToken::EndOfStatement)) {
725 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000726
Tom Stellardd7e6f132015-04-08 01:09:26 +0000727 // Eat the comma or space if there is one.
728 if (getLexer().is(AsmToken::Comma))
729 Parser.Lex();
730
731 switch (Res) {
732 case MatchOperand_Success: break;
733 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
734 "failed parsing operand.");
735 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
736 "not a valid operand.");
737 }
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000738 }
Tom Stellardd7e6f132015-04-08 01:09:26 +0000739
740 // Once we reach end of statement, continue parsing so we can add default
741 // values for optional arguments.
742 AMDGPUAsmParser::OperandMatchResultTy Res;
743 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
744 if (Res != MatchOperand_Success)
745 return Error(getLexer().getLoc(), "failed parsing operand.");
746 }
747 return false;
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000748}
749
750//===----------------------------------------------------------------------===//
Tom Stellardd7e6f132015-04-08 01:09:26 +0000751// Utility functions
752//===----------------------------------------------------------------------===//
753
754AMDGPUAsmParser::OperandMatchResultTy
755AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
756 int64_t Default) {
757
758 // We are at the end of the statement, and this is a default argument, so
759 // use a default value.
760 if (getLexer().is(AsmToken::EndOfStatement)) {
761 Int = Default;
762 return MatchOperand_Success;
763 }
764
765 switch(getLexer().getKind()) {
766 default: return MatchOperand_NoMatch;
767 case AsmToken::Identifier: {
768 StringRef OffsetName = Parser.getTok().getString();
769 if (!OffsetName.equals(Prefix))
770 return MatchOperand_NoMatch;
771
772 Parser.Lex();
773 if (getLexer().isNot(AsmToken::Colon))
774 return MatchOperand_ParseFail;
775
776 Parser.Lex();
777 if (getLexer().isNot(AsmToken::Integer))
778 return MatchOperand_ParseFail;
779
780 if (getParser().parseAbsoluteExpression(Int))
781 return MatchOperand_ParseFail;
782 break;
783 }
784 }
785 return MatchOperand_Success;
786}
787
788AMDGPUAsmParser::OperandMatchResultTy
789AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
790 enum AMDGPUOperand::ImmTy ImmTy) {
791
792 SMLoc S = Parser.getTok().getLoc();
793 int64_t Offset = 0;
794
795 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
796 if (Res != MatchOperand_Success)
797 return Res;
798
799 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
800 return MatchOperand_Success;
801}
802
803AMDGPUAsmParser::OperandMatchResultTy
804AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
805 enum AMDGPUOperand::ImmTy ImmTy) {
806 int64_t Bit = 0;
807 SMLoc S = Parser.getTok().getLoc();
808
809 // We are at the end of the statement, and this is a default argument, so
810 // use a default value.
811 if (getLexer().isNot(AsmToken::EndOfStatement)) {
812 switch(getLexer().getKind()) {
813 case AsmToken::Identifier: {
814 StringRef Tok = Parser.getTok().getString();
815 if (Tok == Name) {
816 Bit = 1;
817 Parser.Lex();
818 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
819 Bit = 0;
820 Parser.Lex();
821 } else {
822 return MatchOperand_NoMatch;
823 }
824 break;
825 }
826 default:
827 return MatchOperand_NoMatch;
828 }
829 }
830
831 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
832 return MatchOperand_Success;
833}
834
835static bool operandsHasOptionalOp(const OperandVector &Operands,
836 const OptionalOperand &OOp) {
837 for (unsigned i = 0; i < Operands.size(); i++) {
838 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
839 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
840 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
841 return true;
842
843 }
844 return false;
845}
846
847AMDGPUAsmParser::OperandMatchResultTy
848AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
849 OperandVector &Operands) {
850 SMLoc S = Parser.getTok().getLoc();
851 for (const OptionalOperand &Op : OptionalOps) {
852 if (operandsHasOptionalOp(Operands, Op))
853 continue;
854 AMDGPUAsmParser::OperandMatchResultTy Res;
855 int64_t Value;
856 if (Op.IsBit) {
857 Res = parseNamedBit(Op.Name, Operands, Op.Type);
858 if (Res == MatchOperand_NoMatch)
859 continue;
860 return Res;
861 }
862
863 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
864
865 if (Res == MatchOperand_NoMatch)
866 continue;
867
868 if (Res != MatchOperand_Success)
869 return Res;
870
871 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
872 return MatchOperand_ParseFail;
873 }
874
875 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
876 return MatchOperand_Success;
877 }
878 return MatchOperand_NoMatch;
879}
880
881//===----------------------------------------------------------------------===//
882// ds
883//===----------------------------------------------------------------------===//
884
885static const OptionalOperand DSOptionalOps [] = {
886 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
887 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
888};
889
890static const OptionalOperand DSOptionalOpsOff01 [] = {
891 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
892 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
893 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
894};
895
896AMDGPUAsmParser::OperandMatchResultTy
897AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
898 return parseOptionalOps(DSOptionalOps, Operands);
899}
900AMDGPUAsmParser::OperandMatchResultTy
901AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
902 return parseOptionalOps(DSOptionalOpsOff01, Operands);
903}
904
905AMDGPUAsmParser::OperandMatchResultTy
906AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
907 SMLoc S = Parser.getTok().getLoc();
908 AMDGPUAsmParser::OperandMatchResultTy Res =
909 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
910 if (Res == MatchOperand_NoMatch) {
911 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
912 AMDGPUOperand::ImmTyOffset));
913 Res = MatchOperand_Success;
914 }
915 return Res;
916}
917
918bool AMDGPUOperand::isDSOffset() const {
919 return isImm() && isUInt<16>(getImm());
920}
921
922bool AMDGPUOperand::isDSOffset01() const {
923 return isImm() && isUInt<8>(getImm());
924}
925
926void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
927 const OperandVector &Operands) {
928
929 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
930
931 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
932 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
933
934 // Add the register arguments
935 if (Op.isReg()) {
936 Op.addRegOperands(Inst, 1);
937 continue;
938 }
939
940 // Handle optional arguments
941 OptionalIdx[Op.getImmTy()] = i;
942 }
943
944 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
945 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
946 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
947
948 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
949 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
950 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
Jim Grosbache9119e42015-05-13 18:37:00 +0000951 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
Tom Stellardd7e6f132015-04-08 01:09:26 +0000952}
953
954void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
955
956 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
957 bool GDSOnly = false;
958
959 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
960 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
961
962 // Add the register arguments
963 if (Op.isReg()) {
964 Op.addRegOperands(Inst, 1);
965 continue;
966 }
967
968 if (Op.isToken() && Op.getToken() == "gds") {
969 GDSOnly = true;
970 continue;
971 }
972
973 // Handle optional arguments
974 OptionalIdx[Op.getImmTy()] = i;
975 }
976
977 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
978 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
979
980 if (!GDSOnly) {
981 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
982 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
983 }
Jim Grosbache9119e42015-05-13 18:37:00 +0000984 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
Tom Stellardd7e6f132015-04-08 01:09:26 +0000985}
986
987
988//===----------------------------------------------------------------------===//
Tom Stellard9d7ddd52014-11-14 14:08:00 +0000989// s_waitcnt
990//===----------------------------------------------------------------------===//
991
992bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
993 StringRef CntName = Parser.getTok().getString();
994 int64_t CntVal;
995
996 Parser.Lex();
997 if (getLexer().isNot(AsmToken::LParen))
998 return true;
999
1000 Parser.Lex();
1001 if (getLexer().isNot(AsmToken::Integer))
1002 return true;
1003
1004 if (getParser().parseAbsoluteExpression(CntVal))
1005 return true;
1006
1007 if (getLexer().isNot(AsmToken::RParen))
1008 return true;
1009
1010 Parser.Lex();
1011 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1012 Parser.Lex();
1013
1014 int CntShift;
1015 int CntMask;
1016
1017 if (CntName == "vmcnt") {
1018 CntMask = 0xf;
1019 CntShift = 0;
1020 } else if (CntName == "expcnt") {
1021 CntMask = 0x7;
1022 CntShift = 4;
1023 } else if (CntName == "lgkmcnt") {
1024 CntMask = 0x7;
1025 CntShift = 8;
1026 } else {
1027 return true;
1028 }
1029
1030 IntVal &= ~(CntMask << CntShift);
1031 IntVal |= (CntVal << CntShift);
1032 return false;
1033}
1034
1035AMDGPUAsmParser::OperandMatchResultTy
1036AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1037 // Disable all counters by default.
1038 // vmcnt [3:0]
1039 // expcnt [6:4]
1040 // lgkmcnt [10:8]
1041 int64_t CntVal = 0x77f;
Tom Stellardd7e6f132015-04-08 01:09:26 +00001042 SMLoc S = Parser.getTok().getLoc();
Tom Stellard9d7ddd52014-11-14 14:08:00 +00001043
1044 switch(getLexer().getKind()) {
1045 default: return MatchOperand_ParseFail;
1046 case AsmToken::Integer:
1047 // The operand can be an integer value.
1048 if (getParser().parseAbsoluteExpression(CntVal))
1049 return MatchOperand_ParseFail;
1050 break;
1051
1052 case AsmToken::Identifier:
1053 do {
1054 if (parseCnt(CntVal))
1055 return MatchOperand_ParseFail;
1056 } while(getLexer().isNot(AsmToken::EndOfStatement));
1057 break;
1058 }
Tom Stellardd7e6f132015-04-08 01:09:26 +00001059 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
Tom Stellard9d7ddd52014-11-14 14:08:00 +00001060 return MatchOperand_Success;
1061}
1062
1063bool AMDGPUOperand::isSWaitCnt() const {
1064 return isImm();
1065}
1066
Tom Stellardd7e6f132015-04-08 01:09:26 +00001067//===----------------------------------------------------------------------===//
1068// sopp branch targets
1069//===----------------------------------------------------------------------===//
1070
1071AMDGPUAsmParser::OperandMatchResultTy
1072AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1073 SMLoc S = Parser.getTok().getLoc();
1074
1075 switch (getLexer().getKind()) {
1076 default: return MatchOperand_ParseFail;
1077 case AsmToken::Integer: {
1078 int64_t Imm;
1079 if (getParser().parseAbsoluteExpression(Imm))
1080 return MatchOperand_ParseFail;
1081 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1082 return MatchOperand_Success;
1083 }
1084
1085 case AsmToken::Identifier:
1086 Operands.push_back(AMDGPUOperand::CreateExpr(
1087 MCSymbolRefExpr::Create(getContext().GetOrCreateSymbol(
1088 Parser.getTok().getString()), getContext()), S));
1089 Parser.Lex();
1090 return MatchOperand_Success;
1091 }
1092}
1093
1094//===----------------------------------------------------------------------===//
1095// mubuf
1096//===----------------------------------------------------------------------===//
1097
1098static const OptionalOperand MubufOptionalOps [] = {
1099 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1100 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1101 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1102 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1103};
1104
1105AMDGPUAsmParser::OperandMatchResultTy
1106AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1107 return parseOptionalOps(MubufOptionalOps, Operands);
1108}
1109
1110AMDGPUAsmParser::OperandMatchResultTy
1111AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1112 return parseIntWithPrefix("offset", Operands);
1113}
1114
1115AMDGPUAsmParser::OperandMatchResultTy
1116AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1117 return parseNamedBit("glc", Operands);
1118}
1119
1120AMDGPUAsmParser::OperandMatchResultTy
1121AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1122 return parseNamedBit("slc", Operands);
1123}
1124
1125AMDGPUAsmParser::OperandMatchResultTy
1126AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1127 return parseNamedBit("tfe", Operands);
1128}
1129
1130bool AMDGPUOperand::isMubufOffset() const {
1131 return isImm() && isUInt<12>(getImm());
1132}
1133
1134void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1135 const OperandVector &Operands) {
1136 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1137
1138 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1139 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1140
1141 // Add the register arguments
1142 if (Op.isReg()) {
1143 Op.addRegOperands(Inst, 1);
1144 continue;
1145 }
1146
1147 // Handle the case where soffset is an immediate
1148 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1149 Op.addImmOperands(Inst, 1);
1150 continue;
1151 }
1152
1153 // Handle tokens like 'offen' which are sometimes hard-coded into the
1154 // asm string. There are no MCInst operands for these.
1155 if (Op.isToken()) {
1156 continue;
1157 }
1158 assert(Op.isImm());
1159
1160 // Handle optional arguments
1161 OptionalIdx[Op.getImmTy()] = i;
1162 }
1163
1164 assert(OptionalIdx.size() == 4);
1165
1166 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1167 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1168 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1169 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1170
1171 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1172 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1173 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1174 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1175}
1176
1177//===----------------------------------------------------------------------===//
1178// mimg
1179//===----------------------------------------------------------------------===//
1180
1181AMDGPUAsmParser::OperandMatchResultTy
1182AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1183 return parseIntWithPrefix("dmask", Operands);
1184}
1185
1186AMDGPUAsmParser::OperandMatchResultTy
1187AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1188 return parseNamedBit("unorm", Operands);
1189}
1190
1191AMDGPUAsmParser::OperandMatchResultTy
1192AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1193 return parseNamedBit("r128", Operands);
1194}
1195
1196//===----------------------------------------------------------------------===//
1197// vop3
1198//===----------------------------------------------------------------------===//
1199
1200static bool ConvertOmodMul(int64_t &Mul) {
1201 if (Mul != 1 && Mul != 2 && Mul != 4)
1202 return false;
1203
1204 Mul >>= 1;
1205 return true;
1206}
1207
1208static bool ConvertOmodDiv(int64_t &Div) {
1209 if (Div == 1) {
1210 Div = 0;
1211 return true;
1212 }
1213
1214 if (Div == 2) {
1215 Div = 3;
1216 return true;
1217 }
1218
1219 return false;
1220}
1221
1222static const OptionalOperand VOP3OptionalOps [] = {
1223 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1224 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1225 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1226};
1227
1228static bool isVOP3(OperandVector &Operands) {
1229 if (operandsHaveModifiers(Operands))
1230 return true;
1231
1232 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1233
1234 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1235 return true;
1236
1237 if (Operands.size() >= 5)
1238 return true;
1239
1240 if (Operands.size() > 3) {
1241 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1242 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1243 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1244 return true;
1245 }
1246 return false;
1247}
1248
1249AMDGPUAsmParser::OperandMatchResultTy
1250AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1251
1252 // The value returned by this function may change after parsing
1253 // an operand so store the original value here.
1254 bool HasModifiers = operandsHaveModifiers(Operands);
1255
1256 bool IsVOP3 = isVOP3(Operands);
1257 if (HasModifiers || IsVOP3 ||
1258 getLexer().isNot(AsmToken::EndOfStatement) ||
1259 getForcedEncodingSize() == 64) {
1260
1261 AMDGPUAsmParser::OperandMatchResultTy Res =
1262 parseOptionalOps(VOP3OptionalOps, Operands);
1263
1264 if (!HasModifiers && Res == MatchOperand_Success) {
1265 // We have added a modifier operation, so we need to make sure all
1266 // previous register operands have modifiers
1267 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1268 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1269 if (Op.isReg())
1270 Op.setModifiers(0);
1271 }
1272 }
1273 return Res;
1274 }
1275 return MatchOperand_NoMatch;
1276}
1277
1278void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
1279 ((AMDGPUOperand &)*Operands[1]).addRegOperands(Inst, 1);
1280 unsigned i = 2;
1281
1282 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1283
1284 if (operandsHaveModifiers(Operands)) {
1285 for (unsigned e = Operands.size(); i != e; ++i) {
1286 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1287
1288 if (Op.isRegWithInputMods()) {
1289 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1290 continue;
1291 }
1292 OptionalIdx[Op.getImmTy()] = i;
1293 }
1294
1295 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1296 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1297
1298 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1299 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1300 } else {
1301 for (unsigned e = Operands.size(); i != e; ++i)
1302 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1303 }
1304}
1305
Tom Stellard9d7ddd52014-11-14 14:08:00 +00001306/// Force static initialization.
1307extern "C" void LLVMInitializeR600AsmParser() {
1308 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
Tom Stellard49f8bfd2015-01-06 18:00:21 +00001309 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
Tom Stellard9d7ddd52014-11-14 14:08:00 +00001310}
1311
1312#define GET_REGISTER_MATCHER
1313#define GET_MATCHER_IMPLEMENTATION
1314#include "AMDGPUGenAsmMatcher.inc"
1315