blob: 501bc2b3913cdbb34a58ab68897899364d527f24 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000011#include "MCTargetDesc/AMDGPUTargetStreamer.h"
12#include "Utils/AMDGPUBaseInfo.h"
Tom Stellardff7416b2015-06-26 21:58:31 +000013#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/SmallString.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000031#include "llvm/MC/MCSymbolELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCTargetAsmParser.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/Support/SourceMgr.h"
35#include "llvm/Support/TargetRegistry.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Support/Debug.h"
38
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
70 ImmTyOMod
71 };
72
73 struct TokOp {
74 const char *Data;
75 unsigned Length;
76 };
77
78 struct ImmOp {
79 bool IsFPImm;
80 ImmTy Type;
81 int64_t Val;
82 };
83
84 struct RegOp {
85 unsigned RegNo;
86 int Modifiers;
87 const MCRegisterInfo *TRI;
88 bool IsForcedVOP3;
89 };
90
91 union {
92 TokOp Tok;
93 ImmOp Imm;
94 RegOp Reg;
95 const MCExpr *Expr;
96 };
97
98 void addImmOperands(MCInst &Inst, unsigned N) const {
99 Inst.addOperand(MCOperand::createImm(getImm()));
100 }
101
102 StringRef getToken() const {
103 return StringRef(Tok.Data, Tok.Length);
104 }
105
106 void addRegOperands(MCInst &Inst, unsigned N) const {
107 Inst.addOperand(MCOperand::createReg(getReg()));
108 }
109
110 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
111 if (isReg())
112 addRegOperands(Inst, N);
113 else
114 addImmOperands(Inst, N);
115 }
116
117 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
118 Inst.addOperand(MCOperand::createImm(
119 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
120 addRegOperands(Inst, N);
121 }
122
123 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
124 if (isImm())
125 addImmOperands(Inst, N);
126 else {
127 assert(isExpr());
128 Inst.addOperand(MCOperand::createExpr(Expr));
129 }
130 }
131
132 bool defaultTokenHasSuffix() const {
133 StringRef Token(Tok.Data, Tok.Length);
134
135 return Token.endswith("_e32") || Token.endswith("_e64");
136 }
137
138 bool isToken() const override {
139 return Kind == Token;
140 }
141
142 bool isImm() const override {
143 return Kind == Immediate;
144 }
145
146 bool isInlineImm() const {
147 float F = BitsToFloat(Imm.Val);
148 // TODO: Add 0.5pi for VI
149 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
150 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
151 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
152 }
153
154 bool isDSOffset0() const {
155 assert(isImm());
156 return Imm.Type == ImmTyDSOffset0;
157 }
158
159 bool isDSOffset1() const {
160 assert(isImm());
161 return Imm.Type == ImmTyDSOffset1;
162 }
163
164 int64_t getImm() const {
165 return Imm.Val;
166 }
167
168 enum ImmTy getImmTy() const {
169 assert(isImm());
170 return Imm.Type;
171 }
172
173 bool isRegKind() const {
174 return Kind == Register;
175 }
176
177 bool isReg() const override {
178 return Kind == Register && Reg.Modifiers == -1;
179 }
180
181 bool isRegWithInputMods() const {
182 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
183 }
184
185 void setModifiers(unsigned Mods) {
186 assert(isReg());
187 Reg.Modifiers = Mods;
188 }
189
190 bool hasModifiers() const {
191 assert(isRegKind());
192 return Reg.Modifiers != -1;
193 }
194
195 unsigned getReg() const override {
196 return Reg.RegNo;
197 }
198
199 bool isRegOrImm() const {
200 return isReg() || isImm();
201 }
202
203 bool isRegClass(unsigned RCID) const {
204 return Reg.TRI->getRegClass(RCID).contains(getReg());
205 }
206
207 bool isSCSrc32() const {
208 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
209 }
210
211 bool isSSrc32() const {
212 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
213 }
214
215 bool isSSrc64() const {
216 return isImm() || isInlineImm() ||
217 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
218 }
219
Matt Arsenault86d336e2015-09-08 21:15:00 +0000220 bool isSCSrc64() const {
221 return (isReg() && isRegClass(AMDGPU::SReg_64RegClassID)) || isInlineImm();
222 }
223
Tom Stellard45bb48e2015-06-13 03:28:10 +0000224 bool isVCSrc32() const {
225 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
226 }
227
228 bool isVCSrc64() const {
229 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
230 }
231
232 bool isVSrc32() const {
233 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
234 }
235
236 bool isVSrc64() const {
237 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
238 }
239
240 bool isMem() const override {
241 return false;
242 }
243
244 bool isExpr() const {
245 return Kind == Expression;
246 }
247
248 bool isSoppBrTarget() const {
249 return isExpr() || isImm();
250 }
251
252 SMLoc getStartLoc() const override {
253 return StartLoc;
254 }
255
256 SMLoc getEndLoc() const override {
257 return EndLoc;
258 }
259
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000260 void print(raw_ostream &OS) const override {
261 switch (Kind) {
262 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000263 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000264 break;
265 case Immediate:
266 OS << getImm();
267 break;
268 case Token:
269 OS << '\'' << getToken() << '\'';
270 break;
271 case Expression:
272 OS << "<expr " << *Expr << '>';
273 break;
274 }
275 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000276
277 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
278 enum ImmTy Type = ImmTyNone,
279 bool IsFPImm = false) {
280 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
281 Op->Imm.Val = Val;
282 Op->Imm.IsFPImm = IsFPImm;
283 Op->Imm.Type = Type;
284 Op->StartLoc = Loc;
285 Op->EndLoc = Loc;
286 return Op;
287 }
288
289 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
290 bool HasExplicitEncodingSize = true) {
291 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
292 Res->Tok.Data = Str.data();
293 Res->Tok.Length = Str.size();
294 Res->StartLoc = Loc;
295 Res->EndLoc = Loc;
296 return Res;
297 }
298
299 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
300 SMLoc E,
301 const MCRegisterInfo *TRI,
302 bool ForceVOP3) {
303 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
304 Op->Reg.RegNo = RegNo;
305 Op->Reg.TRI = TRI;
306 Op->Reg.Modifiers = -1;
307 Op->Reg.IsForcedVOP3 = ForceVOP3;
308 Op->StartLoc = S;
309 Op->EndLoc = E;
310 return Op;
311 }
312
313 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
314 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
315 Op->Expr = Expr;
316 Op->StartLoc = S;
317 Op->EndLoc = S;
318 return Op;
319 }
320
321 bool isDSOffset() const;
322 bool isDSOffset01() const;
323 bool isSWaitCnt() const;
324 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000325 bool isSMRDOffset() const;
326 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327};
328
329class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000330 const MCInstrInfo &MII;
331 MCAsmParser &Parser;
332
333 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000334
335 bool isVI() const {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000336 return getSTI().getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
Matt Arsenault68802d32015-11-05 03:11:27 +0000337 }
338
339 bool hasSGPR102_SGPR103() const {
340 return !isVI();
341 }
342
Tom Stellard45bb48e2015-06-13 03:28:10 +0000343 /// @name Auto-generated Match Functions
344 /// {
345
346#define GET_ASSEMBLER_HEADER
347#include "AMDGPUGenAsmMatcher.inc"
348
349 /// }
350
Tom Stellard347ac792015-06-26 21:15:07 +0000351private:
352 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
353 bool ParseDirectiveHSACodeObjectVersion();
354 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000355 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
356 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000357 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000358 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000359 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000360
Tom Stellard45bb48e2015-06-13 03:28:10 +0000361public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000362public:
363 enum AMDGPUMatchResultTy {
364 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
365 };
366
Tom Stellard45bb48e2015-06-13 03:28:10 +0000367 AMDGPUAsmParser(MCSubtargetInfo &STI, MCAsmParser &_Parser,
368 const MCInstrInfo &MII,
369 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000370 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000371 ForcedEncodingSize(0) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000372 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000373 // Set default features.
374 STI.ToggleFeature("SOUTHERN_ISLANDS");
375 }
376
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000377 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000378 }
379
Tom Stellard347ac792015-06-26 21:15:07 +0000380 AMDGPUTargetStreamer &getTargetStreamer() {
381 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
382 return static_cast<AMDGPUTargetStreamer &>(TS);
383 }
384
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385 unsigned getForcedEncodingSize() const {
386 return ForcedEncodingSize;
387 }
388
389 void setForcedEncodingSize(unsigned Size) {
390 ForcedEncodingSize = Size;
391 }
392
393 bool isForcedVOP3() const {
394 return ForcedEncodingSize == 64;
395 }
396
397 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
398 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
399 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
400 OperandVector &Operands, MCStreamer &Out,
401 uint64_t &ErrorInfo,
402 bool MatchingInlineAsm) override;
403 bool ParseDirective(AsmToken DirectiveID) override;
404 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
405 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
406 SMLoc NameLoc, OperandVector &Operands) override;
407
408 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
409 int64_t Default = 0);
410 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
411 OperandVector &Operands,
412 enum AMDGPUOperand::ImmTy ImmTy =
413 AMDGPUOperand::ImmTyNone);
414 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
415 enum AMDGPUOperand::ImmTy ImmTy =
416 AMDGPUOperand::ImmTyNone);
417 OperandMatchResultTy parseOptionalOps(
418 const ArrayRef<OptionalOperand> &OptionalOps,
419 OperandVector &Operands);
420
421
422 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
423 void cvtDS(MCInst &Inst, const OperandVector &Operands);
424 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
425 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
426 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
427
428 bool parseCnt(int64_t &IntVal);
429 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
430 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
431
432 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
433 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
434 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
435
436 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
437 OperandMatchResultTy parseOffset(OperandVector &Operands);
438 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
439 OperandMatchResultTy parseGLC(OperandVector &Operands);
440 OperandMatchResultTy parseSLC(OperandVector &Operands);
441 OperandMatchResultTy parseTFE(OperandVector &Operands);
442
443 OperandMatchResultTy parseDMask(OperandVector &Operands);
444 OperandMatchResultTy parseUNorm(OperandVector &Operands);
445 OperandMatchResultTy parseR128(OperandVector &Operands);
446
447 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
448 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
449};
450
451struct OptionalOperand {
452 const char *Name;
453 AMDGPUOperand::ImmTy Type;
454 bool IsBit;
455 int64_t Default;
456 bool (*ConvertResult)(int64_t&);
457};
458
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000459}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000460
Matt Arsenault967c2f52015-11-03 22:50:32 +0000461static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000462 if (IsVgpr) {
463 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000464 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000465 case 1: return AMDGPU::VGPR_32RegClassID;
466 case 2: return AMDGPU::VReg_64RegClassID;
467 case 3: return AMDGPU::VReg_96RegClassID;
468 case 4: return AMDGPU::VReg_128RegClassID;
469 case 8: return AMDGPU::VReg_256RegClassID;
470 case 16: return AMDGPU::VReg_512RegClassID;
471 }
472 }
473
474 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000475 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000476 case 1: return AMDGPU::SGPR_32RegClassID;
477 case 2: return AMDGPU::SGPR_64RegClassID;
478 case 4: return AMDGPU::SReg_128RegClassID;
479 case 8: return AMDGPU::SReg_256RegClassID;
480 case 16: return AMDGPU::SReg_512RegClassID;
481 }
482}
483
Craig Topper4e9b03d62015-09-21 00:18:00 +0000484static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000485
486 return StringSwitch<unsigned>(RegName)
487 .Case("exec", AMDGPU::EXEC)
488 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000489 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000490 .Case("m0", AMDGPU::M0)
491 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000492 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
493 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000494 .Case("vcc_lo", AMDGPU::VCC_LO)
495 .Case("vcc_hi", AMDGPU::VCC_HI)
496 .Case("exec_lo", AMDGPU::EXEC_LO)
497 .Case("exec_hi", AMDGPU::EXEC_HI)
498 .Default(0);
499}
500
501bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
502 const AsmToken Tok = Parser.getTok();
503 StartLoc = Tok.getLoc();
504 EndLoc = Tok.getEndLoc();
Matt Arsenault57116cc2015-09-10 21:51:15 +0000505 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000506 RegNo = getRegForName(RegName);
507
508 if (RegNo) {
509 Parser.Lex();
510 return false;
511 }
512
513 // Match vgprs and sgprs
514 if (RegName[0] != 's' && RegName[0] != 'v')
515 return true;
516
517 bool IsVgpr = RegName[0] == 'v';
518 unsigned RegWidth;
519 unsigned RegIndexInClass;
520 if (RegName.size() > 1) {
521 // We have a 32-bit register
522 RegWidth = 1;
523 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
524 return true;
525 Parser.Lex();
526 } else {
527 // We have a register greater than 32-bits.
528
529 int64_t RegLo, RegHi;
530 Parser.Lex();
531 if (getLexer().isNot(AsmToken::LBrac))
532 return true;
533
534 Parser.Lex();
535 if (getParser().parseAbsoluteExpression(RegLo))
536 return true;
537
538 if (getLexer().isNot(AsmToken::Colon))
539 return true;
540
541 Parser.Lex();
542 if (getParser().parseAbsoluteExpression(RegHi))
543 return true;
544
545 if (getLexer().isNot(AsmToken::RBrac))
546 return true;
547
548 Parser.Lex();
549 RegWidth = (RegHi - RegLo) + 1;
550 if (IsVgpr) {
551 // VGPR registers aren't aligned.
552 RegIndexInClass = RegLo;
553 } else {
554 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000555 unsigned Size = std::min(RegWidth, 4u);
556 if (RegLo % Size != 0)
557 return true;
558
559 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000560 }
561 }
562
Matt Arsenault3473c722015-11-03 22:50:27 +0000563 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
Matt Arsenault967c2f52015-11-03 22:50:32 +0000564 int RCID = getRegClass(IsVgpr, RegWidth);
565 if (RCID == -1)
566 return true;
567
568 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000569 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000570 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000571
572 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000573 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000574}
575
576unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
577
578 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
579
580 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
581 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
582 return Match_InvalidOperand;
583
Tom Stellard88e0b252015-10-06 15:57:53 +0000584 if ((TSFlags & SIInstrFlags::VOP3) &&
585 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
586 getForcedEncodingSize() != 64)
587 return Match_PreferE32;
588
Tom Stellard45bb48e2015-06-13 03:28:10 +0000589 return Match_Success;
590}
591
592
593bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
594 OperandVector &Operands,
595 MCStreamer &Out,
596 uint64_t &ErrorInfo,
597 bool MatchingInlineAsm) {
598 MCInst Inst;
599
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000600 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000601 default: break;
602 case Match_Success:
603 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000604 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000605 return false;
606 case Match_MissingFeature:
607 return Error(IDLoc, "instruction not supported on this GPU");
608
609 case Match_MnemonicFail:
610 return Error(IDLoc, "unrecognized instruction mnemonic");
611
612 case Match_InvalidOperand: {
613 SMLoc ErrorLoc = IDLoc;
614 if (ErrorInfo != ~0ULL) {
615 if (ErrorInfo >= Operands.size()) {
616 if (isForcedVOP3()) {
617 // If 64-bit encoding has been forced we can end up with no
618 // clamp or omod operands if none of the registers have modifiers,
619 // so we need to add these to the operand list.
620 AMDGPUOperand &LastOp =
621 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
622 if (LastOp.isRegKind() ||
623 (LastOp.isImm() &&
624 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
625 SMLoc S = Parser.getTok().getLoc();
626 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
627 AMDGPUOperand::ImmTyClamp));
628 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
629 AMDGPUOperand::ImmTyOMod));
630 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
631 Out, ErrorInfo,
632 MatchingInlineAsm);
633 if (!Res)
634 return Res;
635 }
636
637 }
638 return Error(IDLoc, "too few operands for instruction");
639 }
640
641 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
642 if (ErrorLoc == SMLoc())
643 ErrorLoc = IDLoc;
644 }
645 return Error(ErrorLoc, "invalid operand for instruction");
646 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000647 case Match_PreferE32:
648 return Error(IDLoc, "internal error: instruction without _e64 suffix "
649 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000650 }
651 llvm_unreachable("Implement any new match types added!");
652}
653
Tom Stellard347ac792015-06-26 21:15:07 +0000654bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
655 uint32_t &Minor) {
656 if (getLexer().isNot(AsmToken::Integer))
657 return TokError("invalid major version");
658
659 Major = getLexer().getTok().getIntVal();
660 Lex();
661
662 if (getLexer().isNot(AsmToken::Comma))
663 return TokError("minor version number required, comma expected");
664 Lex();
665
666 if (getLexer().isNot(AsmToken::Integer))
667 return TokError("invalid minor version");
668
669 Minor = getLexer().getTok().getIntVal();
670 Lex();
671
672 return false;
673}
674
675bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
676
677 uint32_t Major;
678 uint32_t Minor;
679
680 if (ParseDirectiveMajorMinor(Major, Minor))
681 return true;
682
683 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
684 return false;
685}
686
687bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
688
689 uint32_t Major;
690 uint32_t Minor;
691 uint32_t Stepping;
692 StringRef VendorName;
693 StringRef ArchName;
694
695 // If this directive has no arguments, then use the ISA version for the
696 // targeted GPU.
697 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000698 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000699 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
700 Isa.Stepping,
701 "AMD", "AMDGPU");
702 return false;
703 }
704
705
706 if (ParseDirectiveMajorMinor(Major, Minor))
707 return true;
708
709 if (getLexer().isNot(AsmToken::Comma))
710 return TokError("stepping version number required, comma expected");
711 Lex();
712
713 if (getLexer().isNot(AsmToken::Integer))
714 return TokError("invalid stepping version");
715
716 Stepping = getLexer().getTok().getIntVal();
717 Lex();
718
719 if (getLexer().isNot(AsmToken::Comma))
720 return TokError("vendor name required, comma expected");
721 Lex();
722
723 if (getLexer().isNot(AsmToken::String))
724 return TokError("invalid vendor name");
725
726 VendorName = getLexer().getTok().getStringContents();
727 Lex();
728
729 if (getLexer().isNot(AsmToken::Comma))
730 return TokError("arch name required, comma expected");
731 Lex();
732
733 if (getLexer().isNot(AsmToken::String))
734 return TokError("invalid arch name");
735
736 ArchName = getLexer().getTok().getStringContents();
737 Lex();
738
739 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
740 VendorName, ArchName);
741 return false;
742}
743
Tom Stellardff7416b2015-06-26 21:58:31 +0000744bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
745 amd_kernel_code_t &Header) {
746
747 if (getLexer().isNot(AsmToken::Equal))
748 return TokError("expected '='");
749 Lex();
750
751 if (getLexer().isNot(AsmToken::Integer))
752 return TokError("amd_kernel_code_t values must be integers");
753
754 uint64_t Value = getLexer().getTok().getIntVal();
755 Lex();
756
757 if (ID == "kernel_code_version_major")
758 Header.amd_kernel_code_version_major = Value;
759 else if (ID == "kernel_code_version_minor")
760 Header.amd_kernel_code_version_minor = Value;
761 else if (ID == "machine_kind")
762 Header.amd_machine_kind = Value;
763 else if (ID == "machine_version_major")
764 Header.amd_machine_version_major = Value;
765 else if (ID == "machine_version_minor")
766 Header.amd_machine_version_minor = Value;
767 else if (ID == "machine_version_stepping")
768 Header.amd_machine_version_stepping = Value;
769 else if (ID == "kernel_code_entry_byte_offset")
770 Header.kernel_code_entry_byte_offset = Value;
771 else if (ID == "kernel_code_prefetch_byte_size")
772 Header.kernel_code_prefetch_byte_size = Value;
773 else if (ID == "max_scratch_backing_memory_byte_size")
774 Header.max_scratch_backing_memory_byte_size = Value;
775 else if (ID == "compute_pgm_rsrc1_vgprs")
776 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
777 else if (ID == "compute_pgm_rsrc1_sgprs")
778 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
779 else if (ID == "compute_pgm_rsrc1_priority")
780 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
781 else if (ID == "compute_pgm_rsrc1_float_mode")
782 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
783 else if (ID == "compute_pgm_rsrc1_priv")
784 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
785 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
786 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
787 else if (ID == "compute_pgm_rsrc1_debug_mode")
788 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
789 else if (ID == "compute_pgm_rsrc1_ieee_mode")
790 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
791 else if (ID == "compute_pgm_rsrc2_scratch_en")
792 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
793 else if (ID == "compute_pgm_rsrc2_user_sgpr")
794 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
795 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
796 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
797 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
798 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
799 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
800 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
801 else if (ID == "compute_pgm_rsrc2_tg_size_en")
802 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
803 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
804 Header.compute_pgm_resource_registers |=
805 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
806 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
807 Header.compute_pgm_resource_registers |=
808 (S_00B84C_EXCP_EN_MSB(Value) << 32);
809 else if (ID == "compute_pgm_rsrc2_lds_size")
810 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
811 else if (ID == "compute_pgm_rsrc2_excp_en")
812 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
813 else if (ID == "compute_pgm_resource_registers")
814 Header.compute_pgm_resource_registers = Value;
815 else if (ID == "enable_sgpr_private_segment_buffer")
816 Header.code_properties |=
817 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
818 else if (ID == "enable_sgpr_dispatch_ptr")
819 Header.code_properties |=
820 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
821 else if (ID == "enable_sgpr_queue_ptr")
822 Header.code_properties |=
823 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
824 else if (ID == "enable_sgpr_kernarg_segment_ptr")
825 Header.code_properties |=
826 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
827 else if (ID == "enable_sgpr_dispatch_id")
828 Header.code_properties |=
829 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
830 else if (ID == "enable_sgpr_flat_scratch_init")
831 Header.code_properties |=
832 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
833 else if (ID == "enable_sgpr_private_segment_size")
834 Header.code_properties |=
835 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
836 else if (ID == "enable_sgpr_grid_workgroup_count_x")
837 Header.code_properties |=
838 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
839 else if (ID == "enable_sgpr_grid_workgroup_count_y")
840 Header.code_properties |=
841 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
842 else if (ID == "enable_sgpr_grid_workgroup_count_z")
843 Header.code_properties |=
844 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
845 else if (ID == "enable_ordered_append_gds")
846 Header.code_properties |=
847 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
848 else if (ID == "private_element_size")
849 Header.code_properties |=
850 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
851 else if (ID == "is_ptr64")
852 Header.code_properties |=
853 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
854 else if (ID == "is_dynamic_callstack")
855 Header.code_properties |=
856 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
857 else if (ID == "is_debug_enabled")
858 Header.code_properties |=
859 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
860 else if (ID == "is_xnack_enabled")
861 Header.code_properties |=
862 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
863 else if (ID == "workitem_private_segment_byte_size")
864 Header.workitem_private_segment_byte_size = Value;
865 else if (ID == "workgroup_group_segment_byte_size")
866 Header.workgroup_group_segment_byte_size = Value;
867 else if (ID == "gds_segment_byte_size")
868 Header.gds_segment_byte_size = Value;
869 else if (ID == "kernarg_segment_byte_size")
870 Header.kernarg_segment_byte_size = Value;
871 else if (ID == "workgroup_fbarrier_count")
872 Header.workgroup_fbarrier_count = Value;
873 else if (ID == "wavefront_sgpr_count")
874 Header.wavefront_sgpr_count = Value;
875 else if (ID == "workitem_vgpr_count")
876 Header.workitem_vgpr_count = Value;
877 else if (ID == "reserved_vgpr_first")
878 Header.reserved_vgpr_first = Value;
879 else if (ID == "reserved_vgpr_count")
880 Header.reserved_vgpr_count = Value;
881 else if (ID == "reserved_sgpr_first")
882 Header.reserved_sgpr_first = Value;
883 else if (ID == "reserved_sgpr_count")
884 Header.reserved_sgpr_count = Value;
885 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
886 Header.debug_wavefront_private_segment_offset_sgpr = Value;
887 else if (ID == "debug_private_segment_buffer_sgpr")
888 Header.debug_private_segment_buffer_sgpr = Value;
889 else if (ID == "kernarg_segment_alignment")
890 Header.kernarg_segment_alignment = Value;
891 else if (ID == "group_segment_alignment")
892 Header.group_segment_alignment = Value;
893 else if (ID == "private_segment_alignment")
894 Header.private_segment_alignment = Value;
895 else if (ID == "wavefront_size")
896 Header.wavefront_size = Value;
897 else if (ID == "call_convention")
898 Header.call_convention = Value;
899 else if (ID == "runtime_loader_kernel_symbol")
900 Header.runtime_loader_kernel_symbol = Value;
901 else
902 return TokError("amd_kernel_code_t value not recognized.");
903
904 return false;
905}
906
907bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
908
909 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000910 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000911
912 while (true) {
913
914 if (getLexer().isNot(AsmToken::EndOfStatement))
915 return TokError("amd_kernel_code_t values must begin on a new line");
916
917 // Lex EndOfStatement. This is in a while loop, because lexing a comment
918 // will set the current token to EndOfStatement.
919 while(getLexer().is(AsmToken::EndOfStatement))
920 Lex();
921
922 if (getLexer().isNot(AsmToken::Identifier))
923 return TokError("expected value identifier or .end_amd_kernel_code_t");
924
925 StringRef ID = getLexer().getTok().getIdentifier();
926 Lex();
927
928 if (ID == ".end_amd_kernel_code_t")
929 break;
930
931 if (ParseAMDKernelCodeTValue(ID, Header))
932 return true;
933 }
934
935 getTargetStreamer().EmitAMDKernelCodeT(Header);
936
937 return false;
938}
939
Tom Stellarde135ffd2015-09-25 21:41:28 +0000940bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
941 getParser().getStreamer().SwitchSection(
942 AMDGPU::getHSATextSection(getContext()));
943 return false;
944}
945
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000946bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
947 if (getLexer().isNot(AsmToken::Identifier))
948 return TokError("expected symbol name");
949
950 StringRef KernelName = Parser.getTok().getString();
951
952 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
953 ELF::STT_AMDGPU_HSA_KERNEL);
954 Lex();
955 return false;
956}
957
Tom Stellard45bb48e2015-06-13 03:28:10 +0000958bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000959 StringRef IDVal = DirectiveID.getString();
960
961 if (IDVal == ".hsa_code_object_version")
962 return ParseDirectiveHSACodeObjectVersion();
963
964 if (IDVal == ".hsa_code_object_isa")
965 return ParseDirectiveHSACodeObjectISA();
966
Tom Stellardff7416b2015-06-26 21:58:31 +0000967 if (IDVal == ".amd_kernel_code_t")
968 return ParseDirectiveAMDKernelCodeT();
969
Tom Stellarde135ffd2015-09-25 21:41:28 +0000970 if (IDVal == ".hsatext" || IDVal == ".text")
971 return ParseSectionDirectiveHSAText();
972
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000973 if (IDVal == ".amdgpu_hsa_kernel")
974 return ParseDirectiveAMDGPUHsaKernel();
975
Tom Stellard45bb48e2015-06-13 03:28:10 +0000976 return true;
977}
978
Matt Arsenault68802d32015-11-05 03:11:27 +0000979bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
980 unsigned RegNo) const {
981 if (!isVI())
982 return true;
983
984 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
985 // SI/CI have.
986 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
987 R.isValid(); ++R) {
988 if (*R == RegNo)
989 return false;
990 }
991
992 return true;
993}
994
Tom Stellard45bb48e2015-06-13 03:28:10 +0000995static bool operandsHaveModifiers(const OperandVector &Operands) {
996
997 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
998 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
999 if (Op.isRegKind() && Op.hasModifiers())
1000 return true;
1001 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1002 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1003 return true;
1004 }
1005 return false;
1006}
1007
1008AMDGPUAsmParser::OperandMatchResultTy
1009AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1010
1011 // Try to parse with a custom parser
1012 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1013
1014 // If we successfully parsed the operand or if there as an error parsing,
1015 // we are done.
1016 //
1017 // If we are parsing after we reach EndOfStatement then this means we
1018 // are appending default values to the Operands list. This is only done
1019 // by custom parser, so we shouldn't continue on to the generic parsing.
1020 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
1021 getLexer().is(AsmToken::EndOfStatement))
1022 return ResTy;
1023
1024 bool Negate = false, Abs = false;
1025 if (getLexer().getKind()== AsmToken::Minus) {
1026 Parser.Lex();
1027 Negate = true;
1028 }
1029
1030 if (getLexer().getKind() == AsmToken::Pipe) {
1031 Parser.Lex();
1032 Abs = true;
1033 }
1034
1035 switch(getLexer().getKind()) {
1036 case AsmToken::Integer: {
1037 SMLoc S = Parser.getTok().getLoc();
1038 int64_t IntVal;
1039 if (getParser().parseAbsoluteExpression(IntVal))
1040 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001041 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001042 Error(S, "invalid immediate: only 32-bit values are legal");
1043 return MatchOperand_ParseFail;
1044 }
1045
Tom Stellard45bb48e2015-06-13 03:28:10 +00001046 if (Negate)
1047 IntVal *= -1;
1048 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1049 return MatchOperand_Success;
1050 }
1051 case AsmToken::Real: {
1052 // FIXME: We should emit an error if a double precisions floating-point
1053 // value is used. I'm not sure the best way to detect this.
1054 SMLoc S = Parser.getTok().getLoc();
1055 int64_t IntVal;
1056 if (getParser().parseAbsoluteExpression(IntVal))
1057 return MatchOperand_ParseFail;
1058
1059 APFloat F((float)BitsToDouble(IntVal));
1060 if (Negate)
1061 F.changeSign();
1062 Operands.push_back(
1063 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1064 return MatchOperand_Success;
1065 }
1066 case AsmToken::Identifier: {
1067 SMLoc S, E;
1068 unsigned RegNo;
1069 if (!ParseRegister(RegNo, S, E)) {
1070
1071 bool HasModifiers = operandsHaveModifiers(Operands);
1072 unsigned Modifiers = 0;
1073
1074 if (Negate)
1075 Modifiers |= 0x1;
1076
1077 if (Abs) {
1078 if (getLexer().getKind() != AsmToken::Pipe)
1079 return MatchOperand_ParseFail;
1080 Parser.Lex();
1081 Modifiers |= 0x2;
1082 }
1083
1084 if (Modifiers && !HasModifiers) {
1085 // We are adding a modifier to src1 or src2 and previous sources
1086 // don't have modifiers, so we need to go back and empty modifers
1087 // for each previous source.
1088 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
1089 --PrevRegIdx) {
1090
1091 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
1092 RegOp.setModifiers(0);
1093 }
1094 }
1095
1096
1097 Operands.push_back(AMDGPUOperand::CreateReg(
1098 RegNo, S, E, getContext().getRegisterInfo(),
1099 isForcedVOP3()));
1100
1101 if (HasModifiers || Modifiers) {
1102 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1103 RegOp.setModifiers(Modifiers);
1104
1105 }
1106 } else {
1107 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1108 S));
1109 Parser.Lex();
1110 }
1111 return MatchOperand_Success;
1112 }
1113 default:
1114 return MatchOperand_NoMatch;
1115 }
1116}
1117
1118bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1119 StringRef Name,
1120 SMLoc NameLoc, OperandVector &Operands) {
1121
1122 // Clear any forced encodings from the previous instruction.
1123 setForcedEncodingSize(0);
1124
1125 if (Name.endswith("_e64"))
1126 setForcedEncodingSize(64);
1127 else if (Name.endswith("_e32"))
1128 setForcedEncodingSize(32);
1129
1130 // Add the instruction mnemonic
1131 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1132
1133 while (!getLexer().is(AsmToken::EndOfStatement)) {
1134 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1135
1136 // Eat the comma or space if there is one.
1137 if (getLexer().is(AsmToken::Comma))
1138 Parser.Lex();
1139
1140 switch (Res) {
1141 case MatchOperand_Success: break;
1142 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1143 "failed parsing operand.");
1144 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1145 "not a valid operand.");
1146 }
1147 }
1148
1149 // Once we reach end of statement, continue parsing so we can add default
1150 // values for optional arguments.
1151 AMDGPUAsmParser::OperandMatchResultTy Res;
1152 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
1153 if (Res != MatchOperand_Success)
1154 return Error(getLexer().getLoc(), "failed parsing operand.");
1155 }
1156 return false;
1157}
1158
1159//===----------------------------------------------------------------------===//
1160// Utility functions
1161//===----------------------------------------------------------------------===//
1162
1163AMDGPUAsmParser::OperandMatchResultTy
1164AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1165 int64_t Default) {
1166
1167 // We are at the end of the statement, and this is a default argument, so
1168 // use a default value.
1169 if (getLexer().is(AsmToken::EndOfStatement)) {
1170 Int = Default;
1171 return MatchOperand_Success;
1172 }
1173
1174 switch(getLexer().getKind()) {
1175 default: return MatchOperand_NoMatch;
1176 case AsmToken::Identifier: {
1177 StringRef OffsetName = Parser.getTok().getString();
1178 if (!OffsetName.equals(Prefix))
1179 return MatchOperand_NoMatch;
1180
1181 Parser.Lex();
1182 if (getLexer().isNot(AsmToken::Colon))
1183 return MatchOperand_ParseFail;
1184
1185 Parser.Lex();
1186 if (getLexer().isNot(AsmToken::Integer))
1187 return MatchOperand_ParseFail;
1188
1189 if (getParser().parseAbsoluteExpression(Int))
1190 return MatchOperand_ParseFail;
1191 break;
1192 }
1193 }
1194 return MatchOperand_Success;
1195}
1196
1197AMDGPUAsmParser::OperandMatchResultTy
1198AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1199 enum AMDGPUOperand::ImmTy ImmTy) {
1200
1201 SMLoc S = Parser.getTok().getLoc();
1202 int64_t Offset = 0;
1203
1204 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1205 if (Res != MatchOperand_Success)
1206 return Res;
1207
1208 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1209 return MatchOperand_Success;
1210}
1211
1212AMDGPUAsmParser::OperandMatchResultTy
1213AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1214 enum AMDGPUOperand::ImmTy ImmTy) {
1215 int64_t Bit = 0;
1216 SMLoc S = Parser.getTok().getLoc();
1217
1218 // We are at the end of the statement, and this is a default argument, so
1219 // use a default value.
1220 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1221 switch(getLexer().getKind()) {
1222 case AsmToken::Identifier: {
1223 StringRef Tok = Parser.getTok().getString();
1224 if (Tok == Name) {
1225 Bit = 1;
1226 Parser.Lex();
1227 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1228 Bit = 0;
1229 Parser.Lex();
1230 } else {
1231 return MatchOperand_NoMatch;
1232 }
1233 break;
1234 }
1235 default:
1236 return MatchOperand_NoMatch;
1237 }
1238 }
1239
1240 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1241 return MatchOperand_Success;
1242}
1243
1244static bool operandsHasOptionalOp(const OperandVector &Operands,
1245 const OptionalOperand &OOp) {
1246 for (unsigned i = 0; i < Operands.size(); i++) {
1247 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1248 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1249 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1250 return true;
1251
1252 }
1253 return false;
1254}
1255
1256AMDGPUAsmParser::OperandMatchResultTy
1257AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1258 OperandVector &Operands) {
1259 SMLoc S = Parser.getTok().getLoc();
1260 for (const OptionalOperand &Op : OptionalOps) {
1261 if (operandsHasOptionalOp(Operands, Op))
1262 continue;
1263 AMDGPUAsmParser::OperandMatchResultTy Res;
1264 int64_t Value;
1265 if (Op.IsBit) {
1266 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1267 if (Res == MatchOperand_NoMatch)
1268 continue;
1269 return Res;
1270 }
1271
1272 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1273
1274 if (Res == MatchOperand_NoMatch)
1275 continue;
1276
1277 if (Res != MatchOperand_Success)
1278 return Res;
1279
1280 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1281 return MatchOperand_ParseFail;
1282 }
1283
1284 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1285 return MatchOperand_Success;
1286 }
1287 return MatchOperand_NoMatch;
1288}
1289
1290//===----------------------------------------------------------------------===//
1291// ds
1292//===----------------------------------------------------------------------===//
1293
1294static const OptionalOperand DSOptionalOps [] = {
1295 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1296 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1297};
1298
1299static const OptionalOperand DSOptionalOpsOff01 [] = {
1300 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1301 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1302 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1303};
1304
1305AMDGPUAsmParser::OperandMatchResultTy
1306AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1307 return parseOptionalOps(DSOptionalOps, Operands);
1308}
1309AMDGPUAsmParser::OperandMatchResultTy
1310AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1311 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1312}
1313
1314AMDGPUAsmParser::OperandMatchResultTy
1315AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1316 SMLoc S = Parser.getTok().getLoc();
1317 AMDGPUAsmParser::OperandMatchResultTy Res =
1318 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1319 if (Res == MatchOperand_NoMatch) {
1320 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1321 AMDGPUOperand::ImmTyOffset));
1322 Res = MatchOperand_Success;
1323 }
1324 return Res;
1325}
1326
1327bool AMDGPUOperand::isDSOffset() const {
1328 return isImm() && isUInt<16>(getImm());
1329}
1330
1331bool AMDGPUOperand::isDSOffset01() const {
1332 return isImm() && isUInt<8>(getImm());
1333}
1334
1335void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1336 const OperandVector &Operands) {
1337
1338 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1339
1340 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1341 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1342
1343 // Add the register arguments
1344 if (Op.isReg()) {
1345 Op.addRegOperands(Inst, 1);
1346 continue;
1347 }
1348
1349 // Handle optional arguments
1350 OptionalIdx[Op.getImmTy()] = i;
1351 }
1352
1353 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1354 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1355 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1356
1357 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1358 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1359 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1360 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1361}
1362
1363void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1364
1365 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1366 bool GDSOnly = false;
1367
1368 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1369 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1370
1371 // Add the register arguments
1372 if (Op.isReg()) {
1373 Op.addRegOperands(Inst, 1);
1374 continue;
1375 }
1376
1377 if (Op.isToken() && Op.getToken() == "gds") {
1378 GDSOnly = true;
1379 continue;
1380 }
1381
1382 // Handle optional arguments
1383 OptionalIdx[Op.getImmTy()] = i;
1384 }
1385
1386 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1387 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
1388
1389 if (!GDSOnly) {
1390 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1391 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1392 }
1393 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1394}
1395
1396
1397//===----------------------------------------------------------------------===//
1398// s_waitcnt
1399//===----------------------------------------------------------------------===//
1400
1401bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1402 StringRef CntName = Parser.getTok().getString();
1403 int64_t CntVal;
1404
1405 Parser.Lex();
1406 if (getLexer().isNot(AsmToken::LParen))
1407 return true;
1408
1409 Parser.Lex();
1410 if (getLexer().isNot(AsmToken::Integer))
1411 return true;
1412
1413 if (getParser().parseAbsoluteExpression(CntVal))
1414 return true;
1415
1416 if (getLexer().isNot(AsmToken::RParen))
1417 return true;
1418
1419 Parser.Lex();
1420 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1421 Parser.Lex();
1422
1423 int CntShift;
1424 int CntMask;
1425
1426 if (CntName == "vmcnt") {
1427 CntMask = 0xf;
1428 CntShift = 0;
1429 } else if (CntName == "expcnt") {
1430 CntMask = 0x7;
1431 CntShift = 4;
1432 } else if (CntName == "lgkmcnt") {
1433 CntMask = 0x7;
1434 CntShift = 8;
1435 } else {
1436 return true;
1437 }
1438
1439 IntVal &= ~(CntMask << CntShift);
1440 IntVal |= (CntVal << CntShift);
1441 return false;
1442}
1443
1444AMDGPUAsmParser::OperandMatchResultTy
1445AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1446 // Disable all counters by default.
1447 // vmcnt [3:0]
1448 // expcnt [6:4]
1449 // lgkmcnt [10:8]
1450 int64_t CntVal = 0x77f;
1451 SMLoc S = Parser.getTok().getLoc();
1452
1453 switch(getLexer().getKind()) {
1454 default: return MatchOperand_ParseFail;
1455 case AsmToken::Integer:
1456 // The operand can be an integer value.
1457 if (getParser().parseAbsoluteExpression(CntVal))
1458 return MatchOperand_ParseFail;
1459 break;
1460
1461 case AsmToken::Identifier:
1462 do {
1463 if (parseCnt(CntVal))
1464 return MatchOperand_ParseFail;
1465 } while(getLexer().isNot(AsmToken::EndOfStatement));
1466 break;
1467 }
1468 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1469 return MatchOperand_Success;
1470}
1471
1472bool AMDGPUOperand::isSWaitCnt() const {
1473 return isImm();
1474}
1475
1476//===----------------------------------------------------------------------===//
1477// sopp branch targets
1478//===----------------------------------------------------------------------===//
1479
1480AMDGPUAsmParser::OperandMatchResultTy
1481AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1482 SMLoc S = Parser.getTok().getLoc();
1483
1484 switch (getLexer().getKind()) {
1485 default: return MatchOperand_ParseFail;
1486 case AsmToken::Integer: {
1487 int64_t Imm;
1488 if (getParser().parseAbsoluteExpression(Imm))
1489 return MatchOperand_ParseFail;
1490 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1491 return MatchOperand_Success;
1492 }
1493
1494 case AsmToken::Identifier:
1495 Operands.push_back(AMDGPUOperand::CreateExpr(
1496 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1497 Parser.getTok().getString()), getContext()), S));
1498 Parser.Lex();
1499 return MatchOperand_Success;
1500 }
1501}
1502
1503//===----------------------------------------------------------------------===//
1504// flat
1505//===----------------------------------------------------------------------===//
1506
1507static const OptionalOperand FlatOptionalOps [] = {
1508 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1509 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1510 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1511};
1512
1513static const OptionalOperand FlatAtomicOptionalOps [] = {
1514 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1515 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1516};
1517
1518AMDGPUAsmParser::OperandMatchResultTy
1519AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1520 return parseOptionalOps(FlatOptionalOps, Operands);
1521}
1522
1523AMDGPUAsmParser::OperandMatchResultTy
1524AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1525 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1526}
1527
1528void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1529 const OperandVector &Operands) {
1530 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1531
1532 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1533 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1534
1535 // Add the register arguments
1536 if (Op.isReg()) {
1537 Op.addRegOperands(Inst, 1);
1538 continue;
1539 }
1540
1541 // Handle 'glc' token which is sometimes hard-coded into the
1542 // asm string. There are no MCInst operands for these.
1543 if (Op.isToken())
1544 continue;
1545
1546 // Handle optional arguments
1547 OptionalIdx[Op.getImmTy()] = i;
1548
1549 }
1550
1551 // flat atomic instructions don't have a glc argument.
1552 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1553 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1554 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1555 }
1556
1557 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1558 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1559
1560 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1561 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1562}
1563
1564//===----------------------------------------------------------------------===//
1565// mubuf
1566//===----------------------------------------------------------------------===//
1567
1568static const OptionalOperand MubufOptionalOps [] = {
1569 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1570 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1571 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1572 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1573};
1574
1575AMDGPUAsmParser::OperandMatchResultTy
1576AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1577 return parseOptionalOps(MubufOptionalOps, Operands);
1578}
1579
1580AMDGPUAsmParser::OperandMatchResultTy
1581AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1582 return parseIntWithPrefix("offset", Operands);
1583}
1584
1585AMDGPUAsmParser::OperandMatchResultTy
1586AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1587 return parseNamedBit("glc", Operands);
1588}
1589
1590AMDGPUAsmParser::OperandMatchResultTy
1591AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1592 return parseNamedBit("slc", Operands);
1593}
1594
1595AMDGPUAsmParser::OperandMatchResultTy
1596AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1597 return parseNamedBit("tfe", Operands);
1598}
1599
1600bool AMDGPUOperand::isMubufOffset() const {
1601 return isImm() && isUInt<12>(getImm());
1602}
1603
1604void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1605 const OperandVector &Operands) {
1606 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1607
1608 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1609 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1610
1611 // Add the register arguments
1612 if (Op.isReg()) {
1613 Op.addRegOperands(Inst, 1);
1614 continue;
1615 }
1616
1617 // Handle the case where soffset is an immediate
1618 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1619 Op.addImmOperands(Inst, 1);
1620 continue;
1621 }
1622
1623 // Handle tokens like 'offen' which are sometimes hard-coded into the
1624 // asm string. There are no MCInst operands for these.
1625 if (Op.isToken()) {
1626 continue;
1627 }
1628 assert(Op.isImm());
1629
1630 // Handle optional arguments
1631 OptionalIdx[Op.getImmTy()] = i;
1632 }
1633
1634 assert(OptionalIdx.size() == 4);
1635
1636 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1637 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1638 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1639 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1640
1641 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1642 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1643 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1644 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1645}
1646
1647//===----------------------------------------------------------------------===//
1648// mimg
1649//===----------------------------------------------------------------------===//
1650
1651AMDGPUAsmParser::OperandMatchResultTy
1652AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1653 return parseIntWithPrefix("dmask", Operands);
1654}
1655
1656AMDGPUAsmParser::OperandMatchResultTy
1657AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1658 return parseNamedBit("unorm", Operands);
1659}
1660
1661AMDGPUAsmParser::OperandMatchResultTy
1662AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1663 return parseNamedBit("r128", Operands);
1664}
1665
1666//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001667// smrd
1668//===----------------------------------------------------------------------===//
1669
1670bool AMDGPUOperand::isSMRDOffset() const {
1671
1672 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1673 // information here.
1674 return isImm() && isUInt<8>(getImm());
1675}
1676
1677bool AMDGPUOperand::isSMRDLiteralOffset() const {
1678 // 32-bit literals are only supported on CI and we only want to use them
1679 // when the offset is > 8-bits.
1680 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1681}
1682
1683//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001684// vop3
1685//===----------------------------------------------------------------------===//
1686
1687static bool ConvertOmodMul(int64_t &Mul) {
1688 if (Mul != 1 && Mul != 2 && Mul != 4)
1689 return false;
1690
1691 Mul >>= 1;
1692 return true;
1693}
1694
1695static bool ConvertOmodDiv(int64_t &Div) {
1696 if (Div == 1) {
1697 Div = 0;
1698 return true;
1699 }
1700
1701 if (Div == 2) {
1702 Div = 3;
1703 return true;
1704 }
1705
1706 return false;
1707}
1708
1709static const OptionalOperand VOP3OptionalOps [] = {
1710 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1711 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1712 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1713};
1714
1715static bool isVOP3(OperandVector &Operands) {
1716 if (operandsHaveModifiers(Operands))
1717 return true;
1718
1719 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1720
1721 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1722 return true;
1723
1724 if (Operands.size() >= 5)
1725 return true;
1726
1727 if (Operands.size() > 3) {
1728 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1729 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1730 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1731 return true;
1732 }
1733 return false;
1734}
1735
1736AMDGPUAsmParser::OperandMatchResultTy
1737AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1738
1739 // The value returned by this function may change after parsing
1740 // an operand so store the original value here.
1741 bool HasModifiers = operandsHaveModifiers(Operands);
1742
1743 bool IsVOP3 = isVOP3(Operands);
1744 if (HasModifiers || IsVOP3 ||
1745 getLexer().isNot(AsmToken::EndOfStatement) ||
1746 getForcedEncodingSize() == 64) {
1747
1748 AMDGPUAsmParser::OperandMatchResultTy Res =
1749 parseOptionalOps(VOP3OptionalOps, Operands);
1750
1751 if (!HasModifiers && Res == MatchOperand_Success) {
1752 // We have added a modifier operation, so we need to make sure all
1753 // previous register operands have modifiers
1754 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1755 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1756 if (Op.isReg())
1757 Op.setModifiers(0);
1758 }
1759 }
1760 return Res;
1761 }
1762 return MatchOperand_NoMatch;
1763}
1764
1765void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Tom Stellard88e0b252015-10-06 15:57:53 +00001766
1767 unsigned i = 1;
1768 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1769 if (Desc.getNumDefs() > 0) {
1770 ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
1771 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001772
1773 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1774
1775 if (operandsHaveModifiers(Operands)) {
1776 for (unsigned e = Operands.size(); i != e; ++i) {
1777 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1778
1779 if (Op.isRegWithInputMods()) {
1780 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1781 continue;
1782 }
1783 OptionalIdx[Op.getImmTy()] = i;
1784 }
1785
1786 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1787 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1788
1789 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1790 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1791 } else {
1792 for (unsigned e = Operands.size(); i != e; ++i)
1793 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1794 }
1795}
1796
1797/// Force static initialization.
1798extern "C" void LLVMInitializeAMDGPUAsmParser() {
1799 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1800 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1801}
1802
1803#define GET_REGISTER_MATCHER
1804#define GET_MATCHER_IMPLEMENTATION
1805#include "AMDGPUGenAsmMatcher.inc"
1806