blob: db5cebf6e42c13a9e566ff8751aa90b30601eb99 [file] [log] [blame]
Tom Stellard45bb48e2015-06-13 03:28:10 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000011#include "MCTargetDesc/AMDGPUTargetStreamer.h"
12#include "Utils/AMDGPUBaseInfo.h"
Tom Stellardff7416b2015-06-26 21:58:31 +000013#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
15#include "llvm/ADT/APFloat.h"
16#include "llvm/ADT/SmallString.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/STLExtras.h"
19#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
28#include "llvm/MC/MCRegisterInfo.h"
29#include "llvm/MC/MCStreamer.h"
30#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000031#include "llvm/MC/MCSymbolELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCTargetAsmParser.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/Support/SourceMgr.h"
35#include "llvm/Support/TargetRegistry.h"
36#include "llvm/Support/raw_ostream.h"
37#include "llvm/Support/Debug.h"
38
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
45class AMDGPUOperand : public MCParsedAsmOperand {
46 enum KindTy {
47 Token,
48 Immediate,
49 Register,
50 Expression
51 } Kind;
52
53 SMLoc StartLoc, EndLoc;
54
55public:
56 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
57
58 MCContext *Ctx;
59
60 enum ImmTy {
61 ImmTyNone,
62 ImmTyDSOffset0,
63 ImmTyDSOffset1,
64 ImmTyGDS,
65 ImmTyOffset,
66 ImmTyGLC,
67 ImmTySLC,
68 ImmTyTFE,
69 ImmTyClamp,
70 ImmTyOMod
71 };
72
73 struct TokOp {
74 const char *Data;
75 unsigned Length;
76 };
77
78 struct ImmOp {
79 bool IsFPImm;
80 ImmTy Type;
81 int64_t Val;
82 };
83
84 struct RegOp {
85 unsigned RegNo;
86 int Modifiers;
87 const MCRegisterInfo *TRI;
88 bool IsForcedVOP3;
89 };
90
91 union {
92 TokOp Tok;
93 ImmOp Imm;
94 RegOp Reg;
95 const MCExpr *Expr;
96 };
97
98 void addImmOperands(MCInst &Inst, unsigned N) const {
99 Inst.addOperand(MCOperand::createImm(getImm()));
100 }
101
102 StringRef getToken() const {
103 return StringRef(Tok.Data, Tok.Length);
104 }
105
106 void addRegOperands(MCInst &Inst, unsigned N) const {
107 Inst.addOperand(MCOperand::createReg(getReg()));
108 }
109
110 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
111 if (isReg())
112 addRegOperands(Inst, N);
113 else
114 addImmOperands(Inst, N);
115 }
116
117 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
118 Inst.addOperand(MCOperand::createImm(
119 Reg.Modifiers == -1 ? 0 : Reg.Modifiers));
120 addRegOperands(Inst, N);
121 }
122
123 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
124 if (isImm())
125 addImmOperands(Inst, N);
126 else {
127 assert(isExpr());
128 Inst.addOperand(MCOperand::createExpr(Expr));
129 }
130 }
131
132 bool defaultTokenHasSuffix() const {
133 StringRef Token(Tok.Data, Tok.Length);
134
135 return Token.endswith("_e32") || Token.endswith("_e64");
136 }
137
138 bool isToken() const override {
139 return Kind == Token;
140 }
141
142 bool isImm() const override {
143 return Kind == Immediate;
144 }
145
146 bool isInlineImm() const {
147 float F = BitsToFloat(Imm.Val);
148 // TODO: Add 0.5pi for VI
149 return isImm() && ((Imm.Val <= 64 && Imm.Val >= -16) ||
150 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
151 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0));
152 }
153
154 bool isDSOffset0() const {
155 assert(isImm());
156 return Imm.Type == ImmTyDSOffset0;
157 }
158
159 bool isDSOffset1() const {
160 assert(isImm());
161 return Imm.Type == ImmTyDSOffset1;
162 }
163
164 int64_t getImm() const {
165 return Imm.Val;
166 }
167
168 enum ImmTy getImmTy() const {
169 assert(isImm());
170 return Imm.Type;
171 }
172
173 bool isRegKind() const {
174 return Kind == Register;
175 }
176
177 bool isReg() const override {
178 return Kind == Register && Reg.Modifiers == -1;
179 }
180
181 bool isRegWithInputMods() const {
182 return Kind == Register && (Reg.IsForcedVOP3 || Reg.Modifiers != -1);
183 }
184
185 void setModifiers(unsigned Mods) {
186 assert(isReg());
187 Reg.Modifiers = Mods;
188 }
189
190 bool hasModifiers() const {
191 assert(isRegKind());
192 return Reg.Modifiers != -1;
193 }
194
195 unsigned getReg() const override {
196 return Reg.RegNo;
197 }
198
199 bool isRegOrImm() const {
200 return isReg() || isImm();
201 }
202
203 bool isRegClass(unsigned RCID) const {
204 return Reg.TRI->getRegClass(RCID).contains(getReg());
205 }
206
207 bool isSCSrc32() const {
208 return isInlineImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
209 }
210
211 bool isSSrc32() const {
212 return isImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
213 }
214
215 bool isSSrc64() const {
216 return isImm() || isInlineImm() ||
217 (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
218 }
219
Matt Arsenault86d336e2015-09-08 21:15:00 +0000220 bool isSCSrc64() const {
221 return (isReg() && isRegClass(AMDGPU::SReg_64RegClassID)) || isInlineImm();
222 }
223
Tom Stellard45bb48e2015-06-13 03:28:10 +0000224 bool isVCSrc32() const {
225 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
226 }
227
228 bool isVCSrc64() const {
229 return isInlineImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
230 }
231
232 bool isVSrc32() const {
233 return isImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
234 }
235
236 bool isVSrc64() const {
237 return isImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
238 }
239
240 bool isMem() const override {
241 return false;
242 }
243
244 bool isExpr() const {
245 return Kind == Expression;
246 }
247
248 bool isSoppBrTarget() const {
249 return isExpr() || isImm();
250 }
251
252 SMLoc getStartLoc() const override {
253 return StartLoc;
254 }
255
256 SMLoc getEndLoc() const override {
257 return EndLoc;
258 }
259
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000260 void print(raw_ostream &OS) const override {
261 switch (Kind) {
262 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000263 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000264 break;
265 case Immediate:
266 OS << getImm();
267 break;
268 case Token:
269 OS << '\'' << getToken() << '\'';
270 break;
271 case Expression:
272 OS << "<expr " << *Expr << '>';
273 break;
274 }
275 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000276
277 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
278 enum ImmTy Type = ImmTyNone,
279 bool IsFPImm = false) {
280 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
281 Op->Imm.Val = Val;
282 Op->Imm.IsFPImm = IsFPImm;
283 Op->Imm.Type = Type;
284 Op->StartLoc = Loc;
285 Op->EndLoc = Loc;
286 return Op;
287 }
288
289 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
290 bool HasExplicitEncodingSize = true) {
291 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
292 Res->Tok.Data = Str.data();
293 Res->Tok.Length = Str.size();
294 Res->StartLoc = Loc;
295 Res->EndLoc = Loc;
296 return Res;
297 }
298
299 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
300 SMLoc E,
301 const MCRegisterInfo *TRI,
302 bool ForceVOP3) {
303 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
304 Op->Reg.RegNo = RegNo;
305 Op->Reg.TRI = TRI;
306 Op->Reg.Modifiers = -1;
307 Op->Reg.IsForcedVOP3 = ForceVOP3;
308 Op->StartLoc = S;
309 Op->EndLoc = E;
310 return Op;
311 }
312
313 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
314 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
315 Op->Expr = Expr;
316 Op->StartLoc = S;
317 Op->EndLoc = S;
318 return Op;
319 }
320
321 bool isDSOffset() const;
322 bool isDSOffset01() const;
323 bool isSWaitCnt() const;
324 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000325 bool isSMRDOffset() const;
326 bool isSMRDLiteralOffset() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000327};
328
329class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000330 const MCInstrInfo &MII;
331 MCAsmParser &Parser;
332
333 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000334
335 bool isVI() const {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000336 return getSTI().getFeatureBits()[AMDGPU::FeatureVolcanicIslands];
Matt Arsenault68802d32015-11-05 03:11:27 +0000337 }
338
339 bool hasSGPR102_SGPR103() const {
340 return !isVI();
341 }
342
Tom Stellard45bb48e2015-06-13 03:28:10 +0000343 /// @name Auto-generated Match Functions
344 /// {
345
346#define GET_ASSEMBLER_HEADER
347#include "AMDGPUGenAsmMatcher.inc"
348
349 /// }
350
Tom Stellard347ac792015-06-26 21:15:07 +0000351private:
352 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
353 bool ParseDirectiveHSACodeObjectVersion();
354 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000355 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
356 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000357 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000358 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000359 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000360
Tom Stellard45bb48e2015-06-13 03:28:10 +0000361public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000362public:
363 enum AMDGPUMatchResultTy {
364 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
365 };
366
Akira Hatanakab11ef082015-11-14 06:35:56 +0000367 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000368 const MCInstrInfo &MII,
369 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000370 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000371 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000372 MCAsmParserExtension::Initialize(Parser);
373
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000374 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000375 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000376 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000377 }
378
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000379 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000380 }
381
Tom Stellard347ac792015-06-26 21:15:07 +0000382 AMDGPUTargetStreamer &getTargetStreamer() {
383 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
384 return static_cast<AMDGPUTargetStreamer &>(TS);
385 }
386
Tom Stellard45bb48e2015-06-13 03:28:10 +0000387 unsigned getForcedEncodingSize() const {
388 return ForcedEncodingSize;
389 }
390
391 void setForcedEncodingSize(unsigned Size) {
392 ForcedEncodingSize = Size;
393 }
394
395 bool isForcedVOP3() const {
396 return ForcedEncodingSize == 64;
397 }
398
399 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
400 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
401 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
402 OperandVector &Operands, MCStreamer &Out,
403 uint64_t &ErrorInfo,
404 bool MatchingInlineAsm) override;
405 bool ParseDirective(AsmToken DirectiveID) override;
406 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
407 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
408 SMLoc NameLoc, OperandVector &Operands) override;
409
410 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
411 int64_t Default = 0);
412 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
413 OperandVector &Operands,
414 enum AMDGPUOperand::ImmTy ImmTy =
415 AMDGPUOperand::ImmTyNone);
416 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
417 enum AMDGPUOperand::ImmTy ImmTy =
418 AMDGPUOperand::ImmTyNone);
419 OperandMatchResultTy parseOptionalOps(
420 const ArrayRef<OptionalOperand> &OptionalOps,
421 OperandVector &Operands);
422
423
424 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
425 void cvtDS(MCInst &Inst, const OperandVector &Operands);
426 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
427 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
428 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
429
430 bool parseCnt(int64_t &IntVal);
431 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
432 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
433
434 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
435 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
436 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
437
438 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
439 OperandMatchResultTy parseOffset(OperandVector &Operands);
440 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
441 OperandMatchResultTy parseGLC(OperandVector &Operands);
442 OperandMatchResultTy parseSLC(OperandVector &Operands);
443 OperandMatchResultTy parseTFE(OperandVector &Operands);
444
445 OperandMatchResultTy parseDMask(OperandVector &Operands);
446 OperandMatchResultTy parseUNorm(OperandVector &Operands);
447 OperandMatchResultTy parseR128(OperandVector &Operands);
448
449 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
450 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
451};
452
453struct OptionalOperand {
454 const char *Name;
455 AMDGPUOperand::ImmTy Type;
456 bool IsBit;
457 int64_t Default;
458 bool (*ConvertResult)(int64_t&);
459};
460
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000461}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000462
Matt Arsenault967c2f52015-11-03 22:50:32 +0000463static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000464 if (IsVgpr) {
465 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000466 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000467 case 1: return AMDGPU::VGPR_32RegClassID;
468 case 2: return AMDGPU::VReg_64RegClassID;
469 case 3: return AMDGPU::VReg_96RegClassID;
470 case 4: return AMDGPU::VReg_128RegClassID;
471 case 8: return AMDGPU::VReg_256RegClassID;
472 case 16: return AMDGPU::VReg_512RegClassID;
473 }
474 }
475
476 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000477 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000478 case 1: return AMDGPU::SGPR_32RegClassID;
479 case 2: return AMDGPU::SGPR_64RegClassID;
480 case 4: return AMDGPU::SReg_128RegClassID;
481 case 8: return AMDGPU::SReg_256RegClassID;
482 case 16: return AMDGPU::SReg_512RegClassID;
483 }
484}
485
Craig Topper4e9b03d62015-09-21 00:18:00 +0000486static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000487
488 return StringSwitch<unsigned>(RegName)
489 .Case("exec", AMDGPU::EXEC)
490 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000491 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000492 .Case("m0", AMDGPU::M0)
493 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000494 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
495 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000496 .Case("vcc_lo", AMDGPU::VCC_LO)
497 .Case("vcc_hi", AMDGPU::VCC_HI)
498 .Case("exec_lo", AMDGPU::EXEC_LO)
499 .Case("exec_hi", AMDGPU::EXEC_HI)
500 .Default(0);
501}
502
503bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
504 const AsmToken Tok = Parser.getTok();
505 StartLoc = Tok.getLoc();
506 EndLoc = Tok.getEndLoc();
Matt Arsenault57116cc2015-09-10 21:51:15 +0000507 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000508 RegNo = getRegForName(RegName);
509
510 if (RegNo) {
511 Parser.Lex();
512 return false;
513 }
514
515 // Match vgprs and sgprs
516 if (RegName[0] != 's' && RegName[0] != 'v')
517 return true;
518
519 bool IsVgpr = RegName[0] == 'v';
520 unsigned RegWidth;
521 unsigned RegIndexInClass;
522 if (RegName.size() > 1) {
523 // We have a 32-bit register
524 RegWidth = 1;
525 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
526 return true;
527 Parser.Lex();
528 } else {
529 // We have a register greater than 32-bits.
530
531 int64_t RegLo, RegHi;
532 Parser.Lex();
533 if (getLexer().isNot(AsmToken::LBrac))
534 return true;
535
536 Parser.Lex();
537 if (getParser().parseAbsoluteExpression(RegLo))
538 return true;
539
540 if (getLexer().isNot(AsmToken::Colon))
541 return true;
542
543 Parser.Lex();
544 if (getParser().parseAbsoluteExpression(RegHi))
545 return true;
546
547 if (getLexer().isNot(AsmToken::RBrac))
548 return true;
549
550 Parser.Lex();
551 RegWidth = (RegHi - RegLo) + 1;
552 if (IsVgpr) {
553 // VGPR registers aren't aligned.
554 RegIndexInClass = RegLo;
555 } else {
556 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000557 unsigned Size = std::min(RegWidth, 4u);
558 if (RegLo % Size != 0)
559 return true;
560
561 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000562 }
563 }
564
Matt Arsenault3473c722015-11-03 22:50:27 +0000565 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
Matt Arsenault967c2f52015-11-03 22:50:32 +0000566 int RCID = getRegClass(IsVgpr, RegWidth);
567 if (RCID == -1)
568 return true;
569
570 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000571 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000572 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000573
574 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000575 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576}
577
578unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
579
580 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
581
582 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
583 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
584 return Match_InvalidOperand;
585
Tom Stellard88e0b252015-10-06 15:57:53 +0000586 if ((TSFlags & SIInstrFlags::VOP3) &&
587 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
588 getForcedEncodingSize() != 64)
589 return Match_PreferE32;
590
Tom Stellard45bb48e2015-06-13 03:28:10 +0000591 return Match_Success;
592}
593
594
595bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
596 OperandVector &Operands,
597 MCStreamer &Out,
598 uint64_t &ErrorInfo,
599 bool MatchingInlineAsm) {
600 MCInst Inst;
601
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000602 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000603 default: break;
604 case Match_Success:
605 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000606 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000607 return false;
608 case Match_MissingFeature:
609 return Error(IDLoc, "instruction not supported on this GPU");
610
611 case Match_MnemonicFail:
612 return Error(IDLoc, "unrecognized instruction mnemonic");
613
614 case Match_InvalidOperand: {
615 SMLoc ErrorLoc = IDLoc;
616 if (ErrorInfo != ~0ULL) {
617 if (ErrorInfo >= Operands.size()) {
618 if (isForcedVOP3()) {
619 // If 64-bit encoding has been forced we can end up with no
620 // clamp or omod operands if none of the registers have modifiers,
621 // so we need to add these to the operand list.
622 AMDGPUOperand &LastOp =
623 ((AMDGPUOperand &)*Operands[Operands.size() - 1]);
624 if (LastOp.isRegKind() ||
625 (LastOp.isImm() &&
626 LastOp.getImmTy() != AMDGPUOperand::ImmTyNone)) {
627 SMLoc S = Parser.getTok().getLoc();
628 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
629 AMDGPUOperand::ImmTyClamp));
630 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
631 AMDGPUOperand::ImmTyOMod));
632 bool Res = MatchAndEmitInstruction(IDLoc, Opcode, Operands,
633 Out, ErrorInfo,
634 MatchingInlineAsm);
635 if (!Res)
636 return Res;
637 }
638
639 }
640 return Error(IDLoc, "too few operands for instruction");
641 }
642
643 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
644 if (ErrorLoc == SMLoc())
645 ErrorLoc = IDLoc;
646 }
647 return Error(ErrorLoc, "invalid operand for instruction");
648 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000649 case Match_PreferE32:
650 return Error(IDLoc, "internal error: instruction without _e64 suffix "
651 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000652 }
653 llvm_unreachable("Implement any new match types added!");
654}
655
Tom Stellard347ac792015-06-26 21:15:07 +0000656bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
657 uint32_t &Minor) {
658 if (getLexer().isNot(AsmToken::Integer))
659 return TokError("invalid major version");
660
661 Major = getLexer().getTok().getIntVal();
662 Lex();
663
664 if (getLexer().isNot(AsmToken::Comma))
665 return TokError("minor version number required, comma expected");
666 Lex();
667
668 if (getLexer().isNot(AsmToken::Integer))
669 return TokError("invalid minor version");
670
671 Minor = getLexer().getTok().getIntVal();
672 Lex();
673
674 return false;
675}
676
677bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
678
679 uint32_t Major;
680 uint32_t Minor;
681
682 if (ParseDirectiveMajorMinor(Major, Minor))
683 return true;
684
685 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
686 return false;
687}
688
689bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
690
691 uint32_t Major;
692 uint32_t Minor;
693 uint32_t Stepping;
694 StringRef VendorName;
695 StringRef ArchName;
696
697 // If this directive has no arguments, then use the ISA version for the
698 // targeted GPU.
699 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000700 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000701 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
702 Isa.Stepping,
703 "AMD", "AMDGPU");
704 return false;
705 }
706
707
708 if (ParseDirectiveMajorMinor(Major, Minor))
709 return true;
710
711 if (getLexer().isNot(AsmToken::Comma))
712 return TokError("stepping version number required, comma expected");
713 Lex();
714
715 if (getLexer().isNot(AsmToken::Integer))
716 return TokError("invalid stepping version");
717
718 Stepping = getLexer().getTok().getIntVal();
719 Lex();
720
721 if (getLexer().isNot(AsmToken::Comma))
722 return TokError("vendor name required, comma expected");
723 Lex();
724
725 if (getLexer().isNot(AsmToken::String))
726 return TokError("invalid vendor name");
727
728 VendorName = getLexer().getTok().getStringContents();
729 Lex();
730
731 if (getLexer().isNot(AsmToken::Comma))
732 return TokError("arch name required, comma expected");
733 Lex();
734
735 if (getLexer().isNot(AsmToken::String))
736 return TokError("invalid arch name");
737
738 ArchName = getLexer().getTok().getStringContents();
739 Lex();
740
741 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
742 VendorName, ArchName);
743 return false;
744}
745
Tom Stellardff7416b2015-06-26 21:58:31 +0000746bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
747 amd_kernel_code_t &Header) {
748
749 if (getLexer().isNot(AsmToken::Equal))
750 return TokError("expected '='");
751 Lex();
752
753 if (getLexer().isNot(AsmToken::Integer))
754 return TokError("amd_kernel_code_t values must be integers");
755
756 uint64_t Value = getLexer().getTok().getIntVal();
757 Lex();
758
759 if (ID == "kernel_code_version_major")
760 Header.amd_kernel_code_version_major = Value;
761 else if (ID == "kernel_code_version_minor")
762 Header.amd_kernel_code_version_minor = Value;
763 else if (ID == "machine_kind")
764 Header.amd_machine_kind = Value;
765 else if (ID == "machine_version_major")
766 Header.amd_machine_version_major = Value;
767 else if (ID == "machine_version_minor")
768 Header.amd_machine_version_minor = Value;
769 else if (ID == "machine_version_stepping")
770 Header.amd_machine_version_stepping = Value;
771 else if (ID == "kernel_code_entry_byte_offset")
772 Header.kernel_code_entry_byte_offset = Value;
773 else if (ID == "kernel_code_prefetch_byte_size")
774 Header.kernel_code_prefetch_byte_size = Value;
775 else if (ID == "max_scratch_backing_memory_byte_size")
776 Header.max_scratch_backing_memory_byte_size = Value;
777 else if (ID == "compute_pgm_rsrc1_vgprs")
778 Header.compute_pgm_resource_registers |= S_00B848_VGPRS(Value);
779 else if (ID == "compute_pgm_rsrc1_sgprs")
780 Header.compute_pgm_resource_registers |= S_00B848_SGPRS(Value);
781 else if (ID == "compute_pgm_rsrc1_priority")
782 Header.compute_pgm_resource_registers |= S_00B848_PRIORITY(Value);
783 else if (ID == "compute_pgm_rsrc1_float_mode")
784 Header.compute_pgm_resource_registers |= S_00B848_FLOAT_MODE(Value);
785 else if (ID == "compute_pgm_rsrc1_priv")
786 Header.compute_pgm_resource_registers |= S_00B848_PRIV(Value);
787 else if (ID == "compute_pgm_rsrc1_dx10_clamp")
788 Header.compute_pgm_resource_registers |= S_00B848_DX10_CLAMP(Value);
789 else if (ID == "compute_pgm_rsrc1_debug_mode")
790 Header.compute_pgm_resource_registers |= S_00B848_DEBUG_MODE(Value);
791 else if (ID == "compute_pgm_rsrc1_ieee_mode")
792 Header.compute_pgm_resource_registers |= S_00B848_IEEE_MODE(Value);
793 else if (ID == "compute_pgm_rsrc2_scratch_en")
794 Header.compute_pgm_resource_registers |= (S_00B84C_SCRATCH_EN(Value) << 32);
795 else if (ID == "compute_pgm_rsrc2_user_sgpr")
796 Header.compute_pgm_resource_registers |= (S_00B84C_USER_SGPR(Value) << 32);
797 else if (ID == "compute_pgm_rsrc2_tgid_x_en")
798 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_X_EN(Value) << 32);
799 else if (ID == "compute_pgm_rsrc2_tgid_y_en")
800 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Y_EN(Value) << 32);
801 else if (ID == "compute_pgm_rsrc2_tgid_z_en")
802 Header.compute_pgm_resource_registers |= (S_00B84C_TGID_Z_EN(Value) << 32);
803 else if (ID == "compute_pgm_rsrc2_tg_size_en")
804 Header.compute_pgm_resource_registers |= (S_00B84C_TG_SIZE_EN(Value) << 32);
805 else if (ID == "compute_pgm_rsrc2_tidig_comp_cnt")
806 Header.compute_pgm_resource_registers |=
807 (S_00B84C_TIDIG_COMP_CNT(Value) << 32);
808 else if (ID == "compute_pgm_rsrc2_excp_en_msb")
809 Header.compute_pgm_resource_registers |=
810 (S_00B84C_EXCP_EN_MSB(Value) << 32);
811 else if (ID == "compute_pgm_rsrc2_lds_size")
812 Header.compute_pgm_resource_registers |= (S_00B84C_LDS_SIZE(Value) << 32);
813 else if (ID == "compute_pgm_rsrc2_excp_en")
814 Header.compute_pgm_resource_registers |= (S_00B84C_EXCP_EN(Value) << 32);
815 else if (ID == "compute_pgm_resource_registers")
816 Header.compute_pgm_resource_registers = Value;
817 else if (ID == "enable_sgpr_private_segment_buffer")
818 Header.code_properties |=
819 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER_SHIFT);
820 else if (ID == "enable_sgpr_dispatch_ptr")
821 Header.code_properties |=
822 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR_SHIFT);
823 else if (ID == "enable_sgpr_queue_ptr")
824 Header.code_properties |=
825 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR_SHIFT);
826 else if (ID == "enable_sgpr_kernarg_segment_ptr")
827 Header.code_properties |=
828 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR_SHIFT);
829 else if (ID == "enable_sgpr_dispatch_id")
830 Header.code_properties |=
831 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID_SHIFT);
832 else if (ID == "enable_sgpr_flat_scratch_init")
833 Header.code_properties |=
834 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT_SHIFT);
835 else if (ID == "enable_sgpr_private_segment_size")
836 Header.code_properties |=
837 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE_SHIFT);
838 else if (ID == "enable_sgpr_grid_workgroup_count_x")
839 Header.code_properties |=
840 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_X_SHIFT);
841 else if (ID == "enable_sgpr_grid_workgroup_count_y")
842 Header.code_properties |=
843 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Y_SHIFT);
844 else if (ID == "enable_sgpr_grid_workgroup_count_z")
845 Header.code_properties |=
846 (Value << AMD_CODE_PROPERTY_ENABLE_SGPR_GRID_WORKGROUP_COUNT_Z_SHIFT);
847 else if (ID == "enable_ordered_append_gds")
848 Header.code_properties |=
849 (Value << AMD_CODE_PROPERTY_ENABLE_ORDERED_APPEND_GDS_SHIFT);
850 else if (ID == "private_element_size")
851 Header.code_properties |=
852 (Value << AMD_CODE_PROPERTY_PRIVATE_ELEMENT_SIZE_SHIFT);
853 else if (ID == "is_ptr64")
854 Header.code_properties |=
855 (Value << AMD_CODE_PROPERTY_IS_PTR64_SHIFT);
856 else if (ID == "is_dynamic_callstack")
857 Header.code_properties |=
858 (Value << AMD_CODE_PROPERTY_IS_DYNAMIC_CALLSTACK_SHIFT);
859 else if (ID == "is_debug_enabled")
860 Header.code_properties |=
861 (Value << AMD_CODE_PROPERTY_IS_DEBUG_SUPPORTED_SHIFT);
862 else if (ID == "is_xnack_enabled")
863 Header.code_properties |=
864 (Value << AMD_CODE_PROPERTY_IS_XNACK_SUPPORTED_SHIFT);
865 else if (ID == "workitem_private_segment_byte_size")
866 Header.workitem_private_segment_byte_size = Value;
867 else if (ID == "workgroup_group_segment_byte_size")
868 Header.workgroup_group_segment_byte_size = Value;
869 else if (ID == "gds_segment_byte_size")
870 Header.gds_segment_byte_size = Value;
871 else if (ID == "kernarg_segment_byte_size")
872 Header.kernarg_segment_byte_size = Value;
873 else if (ID == "workgroup_fbarrier_count")
874 Header.workgroup_fbarrier_count = Value;
875 else if (ID == "wavefront_sgpr_count")
876 Header.wavefront_sgpr_count = Value;
877 else if (ID == "workitem_vgpr_count")
878 Header.workitem_vgpr_count = Value;
879 else if (ID == "reserved_vgpr_first")
880 Header.reserved_vgpr_first = Value;
881 else if (ID == "reserved_vgpr_count")
882 Header.reserved_vgpr_count = Value;
883 else if (ID == "reserved_sgpr_first")
884 Header.reserved_sgpr_first = Value;
885 else if (ID == "reserved_sgpr_count")
886 Header.reserved_sgpr_count = Value;
887 else if (ID == "debug_wavefront_private_segment_offset_sgpr")
888 Header.debug_wavefront_private_segment_offset_sgpr = Value;
889 else if (ID == "debug_private_segment_buffer_sgpr")
890 Header.debug_private_segment_buffer_sgpr = Value;
891 else if (ID == "kernarg_segment_alignment")
892 Header.kernarg_segment_alignment = Value;
893 else if (ID == "group_segment_alignment")
894 Header.group_segment_alignment = Value;
895 else if (ID == "private_segment_alignment")
896 Header.private_segment_alignment = Value;
897 else if (ID == "wavefront_size")
898 Header.wavefront_size = Value;
899 else if (ID == "call_convention")
900 Header.call_convention = Value;
901 else if (ID == "runtime_loader_kernel_symbol")
902 Header.runtime_loader_kernel_symbol = Value;
903 else
904 return TokError("amd_kernel_code_t value not recognized.");
905
906 return false;
907}
908
909bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
910
911 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000912 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000913
914 while (true) {
915
916 if (getLexer().isNot(AsmToken::EndOfStatement))
917 return TokError("amd_kernel_code_t values must begin on a new line");
918
919 // Lex EndOfStatement. This is in a while loop, because lexing a comment
920 // will set the current token to EndOfStatement.
921 while(getLexer().is(AsmToken::EndOfStatement))
922 Lex();
923
924 if (getLexer().isNot(AsmToken::Identifier))
925 return TokError("expected value identifier or .end_amd_kernel_code_t");
926
927 StringRef ID = getLexer().getTok().getIdentifier();
928 Lex();
929
930 if (ID == ".end_amd_kernel_code_t")
931 break;
932
933 if (ParseAMDKernelCodeTValue(ID, Header))
934 return true;
935 }
936
937 getTargetStreamer().EmitAMDKernelCodeT(Header);
938
939 return false;
940}
941
Tom Stellarde135ffd2015-09-25 21:41:28 +0000942bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
943 getParser().getStreamer().SwitchSection(
944 AMDGPU::getHSATextSection(getContext()));
945 return false;
946}
947
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000948bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
949 if (getLexer().isNot(AsmToken::Identifier))
950 return TokError("expected symbol name");
951
952 StringRef KernelName = Parser.getTok().getString();
953
954 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
955 ELF::STT_AMDGPU_HSA_KERNEL);
956 Lex();
957 return false;
958}
959
Tom Stellard45bb48e2015-06-13 03:28:10 +0000960bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000961 StringRef IDVal = DirectiveID.getString();
962
963 if (IDVal == ".hsa_code_object_version")
964 return ParseDirectiveHSACodeObjectVersion();
965
966 if (IDVal == ".hsa_code_object_isa")
967 return ParseDirectiveHSACodeObjectISA();
968
Tom Stellardff7416b2015-06-26 21:58:31 +0000969 if (IDVal == ".amd_kernel_code_t")
970 return ParseDirectiveAMDKernelCodeT();
971
Tom Stellarde135ffd2015-09-25 21:41:28 +0000972 if (IDVal == ".hsatext" || IDVal == ".text")
973 return ParseSectionDirectiveHSAText();
974
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000975 if (IDVal == ".amdgpu_hsa_kernel")
976 return ParseDirectiveAMDGPUHsaKernel();
977
Tom Stellard45bb48e2015-06-13 03:28:10 +0000978 return true;
979}
980
Matt Arsenault68802d32015-11-05 03:11:27 +0000981bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
982 unsigned RegNo) const {
983 if (!isVI())
984 return true;
985
986 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
987 // SI/CI have.
988 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
989 R.isValid(); ++R) {
990 if (*R == RegNo)
991 return false;
992 }
993
994 return true;
995}
996
Tom Stellard45bb48e2015-06-13 03:28:10 +0000997static bool operandsHaveModifiers(const OperandVector &Operands) {
998
999 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1000 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1001 if (Op.isRegKind() && Op.hasModifiers())
1002 return true;
1003 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1004 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1005 return true;
1006 }
1007 return false;
1008}
1009
1010AMDGPUAsmParser::OperandMatchResultTy
1011AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1012
1013 // Try to parse with a custom parser
1014 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1015
1016 // If we successfully parsed the operand or if there as an error parsing,
1017 // we are done.
1018 //
1019 // If we are parsing after we reach EndOfStatement then this means we
1020 // are appending default values to the Operands list. This is only done
1021 // by custom parser, so we shouldn't continue on to the generic parsing.
1022 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
1023 getLexer().is(AsmToken::EndOfStatement))
1024 return ResTy;
1025
1026 bool Negate = false, Abs = false;
1027 if (getLexer().getKind()== AsmToken::Minus) {
1028 Parser.Lex();
1029 Negate = true;
1030 }
1031
1032 if (getLexer().getKind() == AsmToken::Pipe) {
1033 Parser.Lex();
1034 Abs = true;
1035 }
1036
1037 switch(getLexer().getKind()) {
1038 case AsmToken::Integer: {
1039 SMLoc S = Parser.getTok().getLoc();
1040 int64_t IntVal;
1041 if (getParser().parseAbsoluteExpression(IntVal))
1042 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001043 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001044 Error(S, "invalid immediate: only 32-bit values are legal");
1045 return MatchOperand_ParseFail;
1046 }
1047
Tom Stellard45bb48e2015-06-13 03:28:10 +00001048 if (Negate)
1049 IntVal *= -1;
1050 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1051 return MatchOperand_Success;
1052 }
1053 case AsmToken::Real: {
1054 // FIXME: We should emit an error if a double precisions floating-point
1055 // value is used. I'm not sure the best way to detect this.
1056 SMLoc S = Parser.getTok().getLoc();
1057 int64_t IntVal;
1058 if (getParser().parseAbsoluteExpression(IntVal))
1059 return MatchOperand_ParseFail;
1060
1061 APFloat F((float)BitsToDouble(IntVal));
1062 if (Negate)
1063 F.changeSign();
1064 Operands.push_back(
1065 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1066 return MatchOperand_Success;
1067 }
1068 case AsmToken::Identifier: {
1069 SMLoc S, E;
1070 unsigned RegNo;
1071 if (!ParseRegister(RegNo, S, E)) {
1072
1073 bool HasModifiers = operandsHaveModifiers(Operands);
1074 unsigned Modifiers = 0;
1075
1076 if (Negate)
1077 Modifiers |= 0x1;
1078
1079 if (Abs) {
1080 if (getLexer().getKind() != AsmToken::Pipe)
1081 return MatchOperand_ParseFail;
1082 Parser.Lex();
1083 Modifiers |= 0x2;
1084 }
1085
1086 if (Modifiers && !HasModifiers) {
1087 // We are adding a modifier to src1 or src2 and previous sources
1088 // don't have modifiers, so we need to go back and empty modifers
1089 // for each previous source.
1090 for (unsigned PrevRegIdx = Operands.size() - 1; PrevRegIdx > 1;
1091 --PrevRegIdx) {
1092
1093 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[PrevRegIdx]);
1094 RegOp.setModifiers(0);
1095 }
1096 }
1097
1098
1099 Operands.push_back(AMDGPUOperand::CreateReg(
1100 RegNo, S, E, getContext().getRegisterInfo(),
1101 isForcedVOP3()));
1102
1103 if (HasModifiers || Modifiers) {
1104 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1105 RegOp.setModifiers(Modifiers);
1106
1107 }
1108 } else {
1109 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1110 S));
1111 Parser.Lex();
1112 }
1113 return MatchOperand_Success;
1114 }
1115 default:
1116 return MatchOperand_NoMatch;
1117 }
1118}
1119
1120bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1121 StringRef Name,
1122 SMLoc NameLoc, OperandVector &Operands) {
1123
1124 // Clear any forced encodings from the previous instruction.
1125 setForcedEncodingSize(0);
1126
1127 if (Name.endswith("_e64"))
1128 setForcedEncodingSize(64);
1129 else if (Name.endswith("_e32"))
1130 setForcedEncodingSize(32);
1131
1132 // Add the instruction mnemonic
1133 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1134
1135 while (!getLexer().is(AsmToken::EndOfStatement)) {
1136 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1137
1138 // Eat the comma or space if there is one.
1139 if (getLexer().is(AsmToken::Comma))
1140 Parser.Lex();
1141
1142 switch (Res) {
1143 case MatchOperand_Success: break;
1144 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1145 "failed parsing operand.");
1146 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1147 "not a valid operand.");
1148 }
1149 }
1150
1151 // Once we reach end of statement, continue parsing so we can add default
1152 // values for optional arguments.
1153 AMDGPUAsmParser::OperandMatchResultTy Res;
1154 while ((Res = parseOperand(Operands, Name)) != MatchOperand_NoMatch) {
1155 if (Res != MatchOperand_Success)
1156 return Error(getLexer().getLoc(), "failed parsing operand.");
1157 }
1158 return false;
1159}
1160
1161//===----------------------------------------------------------------------===//
1162// Utility functions
1163//===----------------------------------------------------------------------===//
1164
1165AMDGPUAsmParser::OperandMatchResultTy
1166AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1167 int64_t Default) {
1168
1169 // We are at the end of the statement, and this is a default argument, so
1170 // use a default value.
1171 if (getLexer().is(AsmToken::EndOfStatement)) {
1172 Int = Default;
1173 return MatchOperand_Success;
1174 }
1175
1176 switch(getLexer().getKind()) {
1177 default: return MatchOperand_NoMatch;
1178 case AsmToken::Identifier: {
1179 StringRef OffsetName = Parser.getTok().getString();
1180 if (!OffsetName.equals(Prefix))
1181 return MatchOperand_NoMatch;
1182
1183 Parser.Lex();
1184 if (getLexer().isNot(AsmToken::Colon))
1185 return MatchOperand_ParseFail;
1186
1187 Parser.Lex();
1188 if (getLexer().isNot(AsmToken::Integer))
1189 return MatchOperand_ParseFail;
1190
1191 if (getParser().parseAbsoluteExpression(Int))
1192 return MatchOperand_ParseFail;
1193 break;
1194 }
1195 }
1196 return MatchOperand_Success;
1197}
1198
1199AMDGPUAsmParser::OperandMatchResultTy
1200AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1201 enum AMDGPUOperand::ImmTy ImmTy) {
1202
1203 SMLoc S = Parser.getTok().getLoc();
1204 int64_t Offset = 0;
1205
1206 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1207 if (Res != MatchOperand_Success)
1208 return Res;
1209
1210 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1211 return MatchOperand_Success;
1212}
1213
1214AMDGPUAsmParser::OperandMatchResultTy
1215AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1216 enum AMDGPUOperand::ImmTy ImmTy) {
1217 int64_t Bit = 0;
1218 SMLoc S = Parser.getTok().getLoc();
1219
1220 // We are at the end of the statement, and this is a default argument, so
1221 // use a default value.
1222 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1223 switch(getLexer().getKind()) {
1224 case AsmToken::Identifier: {
1225 StringRef Tok = Parser.getTok().getString();
1226 if (Tok == Name) {
1227 Bit = 1;
1228 Parser.Lex();
1229 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1230 Bit = 0;
1231 Parser.Lex();
1232 } else {
1233 return MatchOperand_NoMatch;
1234 }
1235 break;
1236 }
1237 default:
1238 return MatchOperand_NoMatch;
1239 }
1240 }
1241
1242 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1243 return MatchOperand_Success;
1244}
1245
1246static bool operandsHasOptionalOp(const OperandVector &Operands,
1247 const OptionalOperand &OOp) {
1248 for (unsigned i = 0; i < Operands.size(); i++) {
1249 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1250 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1251 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1252 return true;
1253
1254 }
1255 return false;
1256}
1257
1258AMDGPUAsmParser::OperandMatchResultTy
1259AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1260 OperandVector &Operands) {
1261 SMLoc S = Parser.getTok().getLoc();
1262 for (const OptionalOperand &Op : OptionalOps) {
1263 if (operandsHasOptionalOp(Operands, Op))
1264 continue;
1265 AMDGPUAsmParser::OperandMatchResultTy Res;
1266 int64_t Value;
1267 if (Op.IsBit) {
1268 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1269 if (Res == MatchOperand_NoMatch)
1270 continue;
1271 return Res;
1272 }
1273
1274 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1275
1276 if (Res == MatchOperand_NoMatch)
1277 continue;
1278
1279 if (Res != MatchOperand_Success)
1280 return Res;
1281
1282 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1283 return MatchOperand_ParseFail;
1284 }
1285
1286 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1287 return MatchOperand_Success;
1288 }
1289 return MatchOperand_NoMatch;
1290}
1291
1292//===----------------------------------------------------------------------===//
1293// ds
1294//===----------------------------------------------------------------------===//
1295
1296static const OptionalOperand DSOptionalOps [] = {
1297 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1298 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1299};
1300
1301static const OptionalOperand DSOptionalOpsOff01 [] = {
1302 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1303 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1304 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1305};
1306
1307AMDGPUAsmParser::OperandMatchResultTy
1308AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1309 return parseOptionalOps(DSOptionalOps, Operands);
1310}
1311AMDGPUAsmParser::OperandMatchResultTy
1312AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1313 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1314}
1315
1316AMDGPUAsmParser::OperandMatchResultTy
1317AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1318 SMLoc S = Parser.getTok().getLoc();
1319 AMDGPUAsmParser::OperandMatchResultTy Res =
1320 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1321 if (Res == MatchOperand_NoMatch) {
1322 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1323 AMDGPUOperand::ImmTyOffset));
1324 Res = MatchOperand_Success;
1325 }
1326 return Res;
1327}
1328
1329bool AMDGPUOperand::isDSOffset() const {
1330 return isImm() && isUInt<16>(getImm());
1331}
1332
1333bool AMDGPUOperand::isDSOffset01() const {
1334 return isImm() && isUInt<8>(getImm());
1335}
1336
1337void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1338 const OperandVector &Operands) {
1339
1340 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1341
1342 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1343 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1344
1345 // Add the register arguments
1346 if (Op.isReg()) {
1347 Op.addRegOperands(Inst, 1);
1348 continue;
1349 }
1350
1351 // Handle optional arguments
1352 OptionalIdx[Op.getImmTy()] = i;
1353 }
1354
1355 unsigned Offset0Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset0];
1356 unsigned Offset1Idx = OptionalIdx[AMDGPUOperand::ImmTyDSOffset1];
1357 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1358
1359 ((AMDGPUOperand &)*Operands[Offset0Idx]).addImmOperands(Inst, 1); // offset0
1360 ((AMDGPUOperand &)*Operands[Offset1Idx]).addImmOperands(Inst, 1); // offset1
1361 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1362 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1363}
1364
1365void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1366
1367 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1368 bool GDSOnly = false;
1369
1370 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1371 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1372
1373 // Add the register arguments
1374 if (Op.isReg()) {
1375 Op.addRegOperands(Inst, 1);
1376 continue;
1377 }
1378
1379 if (Op.isToken() && Op.getToken() == "gds") {
1380 GDSOnly = true;
1381 continue;
1382 }
1383
1384 // Handle optional arguments
1385 OptionalIdx[Op.getImmTy()] = i;
1386 }
1387
1388 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1389 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1); // offset
1390
1391 if (!GDSOnly) {
1392 unsigned GDSIdx = OptionalIdx[AMDGPUOperand::ImmTyGDS];
1393 ((AMDGPUOperand &)*Operands[GDSIdx]).addImmOperands(Inst, 1); // gds
1394 }
1395 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1396}
1397
1398
1399//===----------------------------------------------------------------------===//
1400// s_waitcnt
1401//===----------------------------------------------------------------------===//
1402
1403bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1404 StringRef CntName = Parser.getTok().getString();
1405 int64_t CntVal;
1406
1407 Parser.Lex();
1408 if (getLexer().isNot(AsmToken::LParen))
1409 return true;
1410
1411 Parser.Lex();
1412 if (getLexer().isNot(AsmToken::Integer))
1413 return true;
1414
1415 if (getParser().parseAbsoluteExpression(CntVal))
1416 return true;
1417
1418 if (getLexer().isNot(AsmToken::RParen))
1419 return true;
1420
1421 Parser.Lex();
1422 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1423 Parser.Lex();
1424
1425 int CntShift;
1426 int CntMask;
1427
1428 if (CntName == "vmcnt") {
1429 CntMask = 0xf;
1430 CntShift = 0;
1431 } else if (CntName == "expcnt") {
1432 CntMask = 0x7;
1433 CntShift = 4;
1434 } else if (CntName == "lgkmcnt") {
1435 CntMask = 0x7;
1436 CntShift = 8;
1437 } else {
1438 return true;
1439 }
1440
1441 IntVal &= ~(CntMask << CntShift);
1442 IntVal |= (CntVal << CntShift);
1443 return false;
1444}
1445
1446AMDGPUAsmParser::OperandMatchResultTy
1447AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1448 // Disable all counters by default.
1449 // vmcnt [3:0]
1450 // expcnt [6:4]
1451 // lgkmcnt [10:8]
1452 int64_t CntVal = 0x77f;
1453 SMLoc S = Parser.getTok().getLoc();
1454
1455 switch(getLexer().getKind()) {
1456 default: return MatchOperand_ParseFail;
1457 case AsmToken::Integer:
1458 // The operand can be an integer value.
1459 if (getParser().parseAbsoluteExpression(CntVal))
1460 return MatchOperand_ParseFail;
1461 break;
1462
1463 case AsmToken::Identifier:
1464 do {
1465 if (parseCnt(CntVal))
1466 return MatchOperand_ParseFail;
1467 } while(getLexer().isNot(AsmToken::EndOfStatement));
1468 break;
1469 }
1470 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1471 return MatchOperand_Success;
1472}
1473
1474bool AMDGPUOperand::isSWaitCnt() const {
1475 return isImm();
1476}
1477
1478//===----------------------------------------------------------------------===//
1479// sopp branch targets
1480//===----------------------------------------------------------------------===//
1481
1482AMDGPUAsmParser::OperandMatchResultTy
1483AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1484 SMLoc S = Parser.getTok().getLoc();
1485
1486 switch (getLexer().getKind()) {
1487 default: return MatchOperand_ParseFail;
1488 case AsmToken::Integer: {
1489 int64_t Imm;
1490 if (getParser().parseAbsoluteExpression(Imm))
1491 return MatchOperand_ParseFail;
1492 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1493 return MatchOperand_Success;
1494 }
1495
1496 case AsmToken::Identifier:
1497 Operands.push_back(AMDGPUOperand::CreateExpr(
1498 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1499 Parser.getTok().getString()), getContext()), S));
1500 Parser.Lex();
1501 return MatchOperand_Success;
1502 }
1503}
1504
1505//===----------------------------------------------------------------------===//
1506// flat
1507//===----------------------------------------------------------------------===//
1508
1509static const OptionalOperand FlatOptionalOps [] = {
1510 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1511 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1512 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1513};
1514
1515static const OptionalOperand FlatAtomicOptionalOps [] = {
1516 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1517 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1518};
1519
1520AMDGPUAsmParser::OperandMatchResultTy
1521AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1522 return parseOptionalOps(FlatOptionalOps, Operands);
1523}
1524
1525AMDGPUAsmParser::OperandMatchResultTy
1526AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1527 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1528}
1529
1530void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1531 const OperandVector &Operands) {
1532 std::map<AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1533
1534 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1535 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1536
1537 // Add the register arguments
1538 if (Op.isReg()) {
1539 Op.addRegOperands(Inst, 1);
1540 continue;
1541 }
1542
1543 // Handle 'glc' token which is sometimes hard-coded into the
1544 // asm string. There are no MCInst operands for these.
1545 if (Op.isToken())
1546 continue;
1547
1548 // Handle optional arguments
1549 OptionalIdx[Op.getImmTy()] = i;
1550
1551 }
1552
1553 // flat atomic instructions don't have a glc argument.
1554 if (OptionalIdx.count(AMDGPUOperand::ImmTyGLC)) {
1555 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1556 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1557 }
1558
1559 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1560 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1561
1562 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1563 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1564}
1565
1566//===----------------------------------------------------------------------===//
1567// mubuf
1568//===----------------------------------------------------------------------===//
1569
1570static const OptionalOperand MubufOptionalOps [] = {
1571 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1572 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1573 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1574 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1575};
1576
1577AMDGPUAsmParser::OperandMatchResultTy
1578AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1579 return parseOptionalOps(MubufOptionalOps, Operands);
1580}
1581
1582AMDGPUAsmParser::OperandMatchResultTy
1583AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1584 return parseIntWithPrefix("offset", Operands);
1585}
1586
1587AMDGPUAsmParser::OperandMatchResultTy
1588AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1589 return parseNamedBit("glc", Operands);
1590}
1591
1592AMDGPUAsmParser::OperandMatchResultTy
1593AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1594 return parseNamedBit("slc", Operands);
1595}
1596
1597AMDGPUAsmParser::OperandMatchResultTy
1598AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1599 return parseNamedBit("tfe", Operands);
1600}
1601
1602bool AMDGPUOperand::isMubufOffset() const {
1603 return isImm() && isUInt<12>(getImm());
1604}
1605
1606void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1607 const OperandVector &Operands) {
1608 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1609
1610 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1611 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1612
1613 // Add the register arguments
1614 if (Op.isReg()) {
1615 Op.addRegOperands(Inst, 1);
1616 continue;
1617 }
1618
1619 // Handle the case where soffset is an immediate
1620 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1621 Op.addImmOperands(Inst, 1);
1622 continue;
1623 }
1624
1625 // Handle tokens like 'offen' which are sometimes hard-coded into the
1626 // asm string. There are no MCInst operands for these.
1627 if (Op.isToken()) {
1628 continue;
1629 }
1630 assert(Op.isImm());
1631
1632 // Handle optional arguments
1633 OptionalIdx[Op.getImmTy()] = i;
1634 }
1635
1636 assert(OptionalIdx.size() == 4);
1637
1638 unsigned OffsetIdx = OptionalIdx[AMDGPUOperand::ImmTyOffset];
1639 unsigned GLCIdx = OptionalIdx[AMDGPUOperand::ImmTyGLC];
1640 unsigned SLCIdx = OptionalIdx[AMDGPUOperand::ImmTySLC];
1641 unsigned TFEIdx = OptionalIdx[AMDGPUOperand::ImmTyTFE];
1642
1643 ((AMDGPUOperand &)*Operands[OffsetIdx]).addImmOperands(Inst, 1);
1644 ((AMDGPUOperand &)*Operands[GLCIdx]).addImmOperands(Inst, 1);
1645 ((AMDGPUOperand &)*Operands[SLCIdx]).addImmOperands(Inst, 1);
1646 ((AMDGPUOperand &)*Operands[TFEIdx]).addImmOperands(Inst, 1);
1647}
1648
1649//===----------------------------------------------------------------------===//
1650// mimg
1651//===----------------------------------------------------------------------===//
1652
1653AMDGPUAsmParser::OperandMatchResultTy
1654AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
1655 return parseIntWithPrefix("dmask", Operands);
1656}
1657
1658AMDGPUAsmParser::OperandMatchResultTy
1659AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
1660 return parseNamedBit("unorm", Operands);
1661}
1662
1663AMDGPUAsmParser::OperandMatchResultTy
1664AMDGPUAsmParser::parseR128(OperandVector &Operands) {
1665 return parseNamedBit("r128", Operands);
1666}
1667
1668//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001669// smrd
1670//===----------------------------------------------------------------------===//
1671
1672bool AMDGPUOperand::isSMRDOffset() const {
1673
1674 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1675 // information here.
1676 return isImm() && isUInt<8>(getImm());
1677}
1678
1679bool AMDGPUOperand::isSMRDLiteralOffset() const {
1680 // 32-bit literals are only supported on CI and we only want to use them
1681 // when the offset is > 8-bits.
1682 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1683}
1684
1685//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001686// vop3
1687//===----------------------------------------------------------------------===//
1688
1689static bool ConvertOmodMul(int64_t &Mul) {
1690 if (Mul != 1 && Mul != 2 && Mul != 4)
1691 return false;
1692
1693 Mul >>= 1;
1694 return true;
1695}
1696
1697static bool ConvertOmodDiv(int64_t &Div) {
1698 if (Div == 1) {
1699 Div = 0;
1700 return true;
1701 }
1702
1703 if (Div == 2) {
1704 Div = 3;
1705 return true;
1706 }
1707
1708 return false;
1709}
1710
1711static const OptionalOperand VOP3OptionalOps [] = {
1712 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1713 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1714 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1715};
1716
1717static bool isVOP3(OperandVector &Operands) {
1718 if (operandsHaveModifiers(Operands))
1719 return true;
1720
1721 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
1722
1723 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1724 return true;
1725
1726 if (Operands.size() >= 5)
1727 return true;
1728
1729 if (Operands.size() > 3) {
1730 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
1731 if (Src1Op.getReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1732 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
1733 return true;
1734 }
1735 return false;
1736}
1737
1738AMDGPUAsmParser::OperandMatchResultTy
1739AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1740
1741 // The value returned by this function may change after parsing
1742 // an operand so store the original value here.
1743 bool HasModifiers = operandsHaveModifiers(Operands);
1744
1745 bool IsVOP3 = isVOP3(Operands);
1746 if (HasModifiers || IsVOP3 ||
1747 getLexer().isNot(AsmToken::EndOfStatement) ||
1748 getForcedEncodingSize() == 64) {
1749
1750 AMDGPUAsmParser::OperandMatchResultTy Res =
1751 parseOptionalOps(VOP3OptionalOps, Operands);
1752
1753 if (!HasModifiers && Res == MatchOperand_Success) {
1754 // We have added a modifier operation, so we need to make sure all
1755 // previous register operands have modifiers
1756 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1757 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1758 if (Op.isReg())
1759 Op.setModifiers(0);
1760 }
1761 }
1762 return Res;
1763 }
1764 return MatchOperand_NoMatch;
1765}
1766
1767void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Tom Stellard88e0b252015-10-06 15:57:53 +00001768
1769 unsigned i = 1;
1770 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1771 if (Desc.getNumDefs() > 0) {
1772 ((AMDGPUOperand &)*Operands[i++]).addRegOperands(Inst, 1);
1773 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001774
1775 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1776
1777 if (operandsHaveModifiers(Operands)) {
1778 for (unsigned e = Operands.size(); i != e; ++i) {
1779 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1780
1781 if (Op.isRegWithInputMods()) {
1782 ((AMDGPUOperand &)*Operands[i]).addRegWithInputModsOperands(Inst, 2);
1783 continue;
1784 }
1785 OptionalIdx[Op.getImmTy()] = i;
1786 }
1787
1788 unsigned ClampIdx = OptionalIdx[AMDGPUOperand::ImmTyClamp];
1789 unsigned OModIdx = OptionalIdx[AMDGPUOperand::ImmTyOMod];
1790
1791 ((AMDGPUOperand &)*Operands[ClampIdx]).addImmOperands(Inst, 1);
1792 ((AMDGPUOperand &)*Operands[OModIdx]).addImmOperands(Inst, 1);
1793 } else {
1794 for (unsigned e = Operands.size(); i != e; ++i)
1795 ((AMDGPUOperand &)*Operands[i]).addRegOrImmOperands(Inst, 1);
1796 }
1797}
1798
1799/// Force static initialization.
1800extern "C" void LLVMInitializeAMDGPUAsmParser() {
1801 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
1802 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
1803}
1804
1805#define GET_REGISTER_MATCHER
1806#define GET_MATCHER_IMPLEMENTATION
1807#include "AMDGPUGenAsmMatcher.inc"
1808