blob: 66435a2f6e0efd28a192b3b1a9dbd41c5d61d1ac [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/SmallString.h"
19#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/StringSwitch.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000029#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000034#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetRegistry.h"
38#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039
40using namespace llvm;
41
42namespace {
43
44struct OptionalOperand;
45
46class AMDGPUOperand : public MCParsedAsmOperand {
47 enum KindTy {
48 Token,
49 Immediate,
50 Register,
51 Expression
52 } Kind;
53
54 SMLoc StartLoc, EndLoc;
55
56public:
57 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
58
59 MCContext *Ctx;
60
61 enum ImmTy {
62 ImmTyNone,
63 ImmTyDSOffset0,
64 ImmTyDSOffset1,
65 ImmTyGDS,
66 ImmTyOffset,
67 ImmTyGLC,
68 ImmTySLC,
69 ImmTyTFE,
70 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000071 ImmTyOMod,
Sam Koltondfa29f72016-03-09 12:29:31 +000072 ImmTyDppCtrl,
73 ImmTyDppRowMask,
74 ImmTyDppBankMask,
75 ImmTyDppBoundCtrl,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000076 ImmTyDMask,
77 ImmTyUNorm,
78 ImmTyDA,
79 ImmTyR128,
80 ImmTyLWE,
Tom Stellard45bb48e2015-06-13 03:28:10 +000081 };
82
83 struct TokOp {
84 const char *Data;
85 unsigned Length;
86 };
87
88 struct ImmOp {
89 bool IsFPImm;
90 ImmTy Type;
91 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000092 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000093 };
94
95 struct RegOp {
96 unsigned RegNo;
97 int Modifiers;
98 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000099 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000100 bool IsForcedVOP3;
101 };
102
103 union {
104 TokOp Tok;
105 ImmOp Imm;
106 RegOp Reg;
107 const MCExpr *Expr;
108 };
109
110 void addImmOperands(MCInst &Inst, unsigned N) const {
111 Inst.addOperand(MCOperand::createImm(getImm()));
112 }
113
114 StringRef getToken() const {
115 return StringRef(Tok.Data, Tok.Length);
116 }
117
118 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000119 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000120 }
121
122 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000123 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000124 addRegOperands(Inst, N);
125 else
126 addImmOperands(Inst, N);
127 }
128
Tom Stellardd93a34f2016-02-22 19:17:56 +0000129 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
130 if (isRegKind()) {
131 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
132 addRegOperands(Inst, N);
133 } else {
134 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
135 addImmOperands(Inst, N);
136 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000137 }
138
139 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
140 if (isImm())
141 addImmOperands(Inst, N);
142 else {
143 assert(isExpr());
144 Inst.addOperand(MCOperand::createExpr(Expr));
145 }
146 }
147
148 bool defaultTokenHasSuffix() const {
149 StringRef Token(Tok.Data, Tok.Length);
150
Sam Koltondfa29f72016-03-09 12:29:31 +0000151 return Token.endswith("_e32") || Token.endswith("_e64") ||
152 Token.endswith("_dpp");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000153 }
154
155 bool isToken() const override {
156 return Kind == Token;
157 }
158
159 bool isImm() const override {
160 return Kind == Immediate;
161 }
162
Tom Stellardd93a34f2016-02-22 19:17:56 +0000163 bool isInlinableImm() const {
164 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
165 immediates are inlinable (e.g. "clamp" attribute is not) */ )
166 return false;
167 // TODO: We should avoid using host float here. It would be better to
168 // check the float bit values which is what a few other places do.
169 // We've had bot failures before due to weird NaN support on mips hosts.
170 const float F = BitsToFloat(Imm.Val);
171 // TODO: Add 1/(2*pi) for VI
172 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000173 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000174 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000175 }
176
177 bool isDSOffset0() const {
178 assert(isImm());
179 return Imm.Type == ImmTyDSOffset0;
180 }
181
182 bool isDSOffset1() const {
183 assert(isImm());
184 return Imm.Type == ImmTyDSOffset1;
185 }
186
187 int64_t getImm() const {
188 return Imm.Val;
189 }
190
191 enum ImmTy getImmTy() const {
192 assert(isImm());
193 return Imm.Type;
194 }
195
196 bool isRegKind() const {
197 return Kind == Register;
198 }
199
200 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000201 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000202 }
203
Tom Stellardd93a34f2016-02-22 19:17:56 +0000204 bool isRegOrImmWithInputMods() const {
205 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000206 }
207
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000208 bool isImmTy(ImmTy ImmT) const {
209 return isImm() && Imm.Type == ImmT;
210 }
211
Tom Stellarda90b9522016-02-11 03:28:15 +0000212 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000213 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000214 }
215
216 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000217 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000218 }
219
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000220 bool isImmModifier() const {
221 return Kind == Immediate && Imm.Type != ImmTyNone;
222 }
223
224 bool isDMask() const {
225 return isImmTy(ImmTyDMask);
226 }
227
228 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
229 bool isDA() const { return isImmTy(ImmTyDA); }
230 bool isR128() const { return isImmTy(ImmTyUNorm); }
231 bool isLWE() const { return isImmTy(ImmTyLWE); }
232
Tom Stellarda90b9522016-02-11 03:28:15 +0000233 bool isMod() const {
234 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000235 }
236
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000237 bool isGDS() const { return isImmTy(ImmTyGDS); }
238 bool isGLC() const { return isImmTy(ImmTyGLC); }
239 bool isSLC() const { return isImmTy(ImmTySLC); }
240 bool isTFE() const { return isImmTy(ImmTyTFE); }
241
Sam Koltondfa29f72016-03-09 12:29:31 +0000242 bool isBankMask() const {
243 return isImmTy(ImmTyDppBankMask);
244 }
245
246 bool isRowMask() const {
247 return isImmTy(ImmTyDppRowMask);
248 }
249
250 bool isBoundCtrl() const {
251 return isImmTy(ImmTyDppBoundCtrl);
252 }
253
Tom Stellard45bb48e2015-06-13 03:28:10 +0000254 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000255 assert(isReg() || (isImm() && Imm.Modifiers == 0));
256 if (isReg())
257 Reg.Modifiers = Mods;
258 else
259 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000260 }
261
262 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000263 assert(isRegKind() || isImm());
264 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000265 }
266
267 unsigned getReg() const override {
268 return Reg.RegNo;
269 }
270
271 bool isRegOrImm() const {
272 return isReg() || isImm();
273 }
274
275 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000276 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000277 }
278
279 bool isSCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000280 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000281 }
282
Matt Arsenault86d336e2015-09-08 21:15:00 +0000283 bool isSCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000284 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::SReg_64RegClassID));
285 }
286
287 bool isSSrc32() const {
288 return isImm() || isSCSrc32();
289 }
290
291 bool isSSrc64() const {
292 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
293 // See isVSrc64().
294 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000295 }
296
Tom Stellard45bb48e2015-06-13 03:28:10 +0000297 bool isVCSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000298 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_32RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000299 }
300
301 bool isVCSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000302 return isInlinableImm() || (isReg() && isRegClass(AMDGPU::VS_64RegClassID));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000303 }
304
305 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000306 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000307 }
308
309 bool isVSrc64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000310 // TODO: Check if the 64-bit value (coming from assembly source) can be
311 // narrowed to 32 bits (in the instruction stream). That require knowledge
312 // of instruction type (unsigned/signed, floating or "untyped"/B64),
313 // see [AMD GCN3 ISA 6.3.1].
314 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
315 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000316 }
317
318 bool isMem() const override {
319 return false;
320 }
321
322 bool isExpr() const {
323 return Kind == Expression;
324 }
325
326 bool isSoppBrTarget() const {
327 return isExpr() || isImm();
328 }
329
330 SMLoc getStartLoc() const override {
331 return StartLoc;
332 }
333
334 SMLoc getEndLoc() const override {
335 return EndLoc;
336 }
337
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000338 void print(raw_ostream &OS) const override {
339 switch (Kind) {
340 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000341 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000342 break;
343 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000344 if (Imm.Type != AMDGPUOperand::ImmTyNone)
345 OS << getImm();
346 else
347 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000348 break;
349 case Token:
350 OS << '\'' << getToken() << '\'';
351 break;
352 case Expression:
353 OS << "<expr " << *Expr << '>';
354 break;
355 }
356 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000357
358 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
359 enum ImmTy Type = ImmTyNone,
360 bool IsFPImm = false) {
361 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
362 Op->Imm.Val = Val;
363 Op->Imm.IsFPImm = IsFPImm;
364 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000365 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000366 Op->StartLoc = Loc;
367 Op->EndLoc = Loc;
368 return Op;
369 }
370
371 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
372 bool HasExplicitEncodingSize = true) {
373 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
374 Res->Tok.Data = Str.data();
375 Res->Tok.Length = Str.size();
376 Res->StartLoc = Loc;
377 Res->EndLoc = Loc;
378 return Res;
379 }
380
381 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
382 SMLoc E,
383 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000384 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385 bool ForceVOP3) {
386 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
387 Op->Reg.RegNo = RegNo;
388 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000389 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000390 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000391 Op->Reg.IsForcedVOP3 = ForceVOP3;
392 Op->StartLoc = S;
393 Op->EndLoc = E;
394 return Op;
395 }
396
397 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
398 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
399 Op->Expr = Expr;
400 Op->StartLoc = S;
401 Op->EndLoc = S;
402 return Op;
403 }
404
405 bool isDSOffset() const;
406 bool isDSOffset01() const;
407 bool isSWaitCnt() const;
408 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000409 bool isSMRDOffset() const;
410 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000411 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000412};
413
414class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000415 const MCInstrInfo &MII;
416 MCAsmParser &Parser;
417
418 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000419
Matt Arsenault3b159672015-12-01 20:31:08 +0000420 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000421 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000422 }
423
424 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000425 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000426 }
427
Matt Arsenault68802d32015-11-05 03:11:27 +0000428 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000429 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000430 }
431
432 bool hasSGPR102_SGPR103() const {
433 return !isVI();
434 }
435
Tom Stellard45bb48e2015-06-13 03:28:10 +0000436 /// @name Auto-generated Match Functions
437 /// {
438
439#define GET_ASSEMBLER_HEADER
440#include "AMDGPUGenAsmMatcher.inc"
441
442 /// }
443
Tom Stellard347ac792015-06-26 21:15:07 +0000444private:
445 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
446 bool ParseDirectiveHSACodeObjectVersion();
447 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000448 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
449 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000450 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000451 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000452 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000453 bool ParseDirectiveAMDGPUHsaModuleGlobal();
454 bool ParseDirectiveAMDGPUHsaProgramGlobal();
455 bool ParseSectionDirectiveHSADataGlobalAgent();
456 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000457 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000458
Tom Stellard45bb48e2015-06-13 03:28:10 +0000459public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000460 enum AMDGPUMatchResultTy {
461 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
462 };
463
Akira Hatanakab11ef082015-11-14 06:35:56 +0000464 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000465 const MCInstrInfo &MII,
466 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000467 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000468 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000469 MCAsmParserExtension::Initialize(Parser);
470
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000471 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000472 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000473 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000474 }
475
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000476 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000477 }
478
Tom Stellard347ac792015-06-26 21:15:07 +0000479 AMDGPUTargetStreamer &getTargetStreamer() {
480 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
481 return static_cast<AMDGPUTargetStreamer &>(TS);
482 }
483
Tom Stellard45bb48e2015-06-13 03:28:10 +0000484 unsigned getForcedEncodingSize() const {
485 return ForcedEncodingSize;
486 }
487
488 void setForcedEncodingSize(unsigned Size) {
489 ForcedEncodingSize = Size;
490 }
491
492 bool isForcedVOP3() const {
493 return ForcedEncodingSize == 64;
494 }
495
496 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
497 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
498 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
499 OperandVector &Operands, MCStreamer &Out,
500 uint64_t &ErrorInfo,
501 bool MatchingInlineAsm) override;
502 bool ParseDirective(AsmToken DirectiveID) override;
503 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
504 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
505 SMLoc NameLoc, OperandVector &Operands) override;
506
507 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
508 int64_t Default = 0);
509 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
510 OperandVector &Operands,
511 enum AMDGPUOperand::ImmTy ImmTy =
512 AMDGPUOperand::ImmTyNone);
513 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
514 enum AMDGPUOperand::ImmTy ImmTy =
515 AMDGPUOperand::ImmTyNone);
516 OperandMatchResultTy parseOptionalOps(
517 const ArrayRef<OptionalOperand> &OptionalOps,
518 OperandVector &Operands);
519
520
521 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
522 void cvtDS(MCInst &Inst, const OperandVector &Operands);
523 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
524 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
525 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
526
527 bool parseCnt(int64_t &IntVal);
528 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
529 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
530
531 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
532 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
533 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000534 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000535
536 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
537 OperandMatchResultTy parseOffset(OperandVector &Operands);
538 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
539 OperandMatchResultTy parseGLC(OperandVector &Operands);
540 OperandMatchResultTy parseSLC(OperandVector &Operands);
541 OperandMatchResultTy parseTFE(OperandVector &Operands);
542
543 OperandMatchResultTy parseDMask(OperandVector &Operands);
544 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000545 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000546 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000547 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000548
Tom Stellarda90b9522016-02-11 03:28:15 +0000549 void cvtId(MCInst &Inst, const OperandVector &Operands);
550 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
551 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
552 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000553 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000554
555 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000556 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000557 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000558
559 OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands);
560 OperandMatchResultTy parseDPPOptionalOps(OperandVector &Operands);
561 void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
562 void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
563 void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000564};
565
566struct OptionalOperand {
567 const char *Name;
568 AMDGPUOperand::ImmTy Type;
569 bool IsBit;
570 int64_t Default;
571 bool (*ConvertResult)(int64_t&);
572};
573
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000574}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000575
Matt Arsenault967c2f52015-11-03 22:50:32 +0000576static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000577 if (IsVgpr) {
578 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000579 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000580 case 1: return AMDGPU::VGPR_32RegClassID;
581 case 2: return AMDGPU::VReg_64RegClassID;
582 case 3: return AMDGPU::VReg_96RegClassID;
583 case 4: return AMDGPU::VReg_128RegClassID;
584 case 8: return AMDGPU::VReg_256RegClassID;
585 case 16: return AMDGPU::VReg_512RegClassID;
586 }
587 }
588
589 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000590 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000591 case 1: return AMDGPU::SGPR_32RegClassID;
592 case 2: return AMDGPU::SGPR_64RegClassID;
593 case 4: return AMDGPU::SReg_128RegClassID;
594 case 8: return AMDGPU::SReg_256RegClassID;
595 case 16: return AMDGPU::SReg_512RegClassID;
596 }
597}
598
Craig Topper4e9b03d62015-09-21 00:18:00 +0000599static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000600
601 return StringSwitch<unsigned>(RegName)
602 .Case("exec", AMDGPU::EXEC)
603 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000604 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000605 .Case("m0", AMDGPU::M0)
606 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000607 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
608 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000609 .Case("vcc_lo", AMDGPU::VCC_LO)
610 .Case("vcc_hi", AMDGPU::VCC_HI)
611 .Case("exec_lo", AMDGPU::EXEC_LO)
612 .Case("exec_hi", AMDGPU::EXEC_HI)
613 .Default(0);
614}
615
616bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
617 const AsmToken Tok = Parser.getTok();
618 StartLoc = Tok.getLoc();
619 EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000620 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
621
Matt Arsenault57116cc2015-09-10 21:51:15 +0000622 StringRef RegName = Tok.getString();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000623 RegNo = getRegForName(RegName);
624
625 if (RegNo) {
626 Parser.Lex();
Matt Arsenault3b159672015-12-01 20:31:08 +0000627 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000628 }
629
630 // Match vgprs and sgprs
631 if (RegName[0] != 's' && RegName[0] != 'v')
632 return true;
633
634 bool IsVgpr = RegName[0] == 'v';
635 unsigned RegWidth;
636 unsigned RegIndexInClass;
637 if (RegName.size() > 1) {
638 // We have a 32-bit register
639 RegWidth = 1;
640 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
641 return true;
642 Parser.Lex();
643 } else {
644 // We have a register greater than 32-bits.
645
646 int64_t RegLo, RegHi;
647 Parser.Lex();
648 if (getLexer().isNot(AsmToken::LBrac))
649 return true;
650
651 Parser.Lex();
652 if (getParser().parseAbsoluteExpression(RegLo))
653 return true;
654
655 if (getLexer().isNot(AsmToken::Colon))
656 return true;
657
658 Parser.Lex();
659 if (getParser().parseAbsoluteExpression(RegHi))
660 return true;
661
662 if (getLexer().isNot(AsmToken::RBrac))
663 return true;
664
665 Parser.Lex();
666 RegWidth = (RegHi - RegLo) + 1;
667 if (IsVgpr) {
668 // VGPR registers aren't aligned.
669 RegIndexInClass = RegLo;
670 } else {
671 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000672 unsigned Size = std::min(RegWidth, 4u);
673 if (RegLo % Size != 0)
674 return true;
675
676 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000677 }
678 }
679
Matt Arsenault967c2f52015-11-03 22:50:32 +0000680 int RCID = getRegClass(IsVgpr, RegWidth);
681 if (RCID == -1)
682 return true;
683
684 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000685 if (RegIndexInClass >= RC.getNumRegs())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000686 return true;
Matt Arsenault3473c722015-11-03 22:50:27 +0000687
688 RegNo = RC.getRegister(RegIndexInClass);
Matt Arsenault68802d32015-11-05 03:11:27 +0000689 return !subtargetHasRegister(*TRI, RegNo);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000690}
691
692unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
693
694 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
695
696 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
697 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
698 return Match_InvalidOperand;
699
Tom Stellard88e0b252015-10-06 15:57:53 +0000700 if ((TSFlags & SIInstrFlags::VOP3) &&
701 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
702 getForcedEncodingSize() != 64)
703 return Match_PreferE32;
704
Tom Stellard45bb48e2015-06-13 03:28:10 +0000705 return Match_Success;
706}
707
708
709bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
710 OperandVector &Operands,
711 MCStreamer &Out,
712 uint64_t &ErrorInfo,
713 bool MatchingInlineAsm) {
714 MCInst Inst;
715
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000716 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000717 default: break;
718 case Match_Success:
719 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000720 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000721 return false;
722 case Match_MissingFeature:
723 return Error(IDLoc, "instruction not supported on this GPU");
724
725 case Match_MnemonicFail:
726 return Error(IDLoc, "unrecognized instruction mnemonic");
727
728 case Match_InvalidOperand: {
729 SMLoc ErrorLoc = IDLoc;
730 if (ErrorInfo != ~0ULL) {
731 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000732 return Error(IDLoc, "too few operands for instruction");
733 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000734 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
735 if (ErrorLoc == SMLoc())
736 ErrorLoc = IDLoc;
737 }
738 return Error(ErrorLoc, "invalid operand for instruction");
739 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000740 case Match_PreferE32:
741 return Error(IDLoc, "internal error: instruction without _e64 suffix "
742 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000743 }
744 llvm_unreachable("Implement any new match types added!");
745}
746
Tom Stellard347ac792015-06-26 21:15:07 +0000747bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
748 uint32_t &Minor) {
749 if (getLexer().isNot(AsmToken::Integer))
750 return TokError("invalid major version");
751
752 Major = getLexer().getTok().getIntVal();
753 Lex();
754
755 if (getLexer().isNot(AsmToken::Comma))
756 return TokError("minor version number required, comma expected");
757 Lex();
758
759 if (getLexer().isNot(AsmToken::Integer))
760 return TokError("invalid minor version");
761
762 Minor = getLexer().getTok().getIntVal();
763 Lex();
764
765 return false;
766}
767
768bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
769
770 uint32_t Major;
771 uint32_t Minor;
772
773 if (ParseDirectiveMajorMinor(Major, Minor))
774 return true;
775
776 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
777 return false;
778}
779
780bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
781
782 uint32_t Major;
783 uint32_t Minor;
784 uint32_t Stepping;
785 StringRef VendorName;
786 StringRef ArchName;
787
788 // If this directive has no arguments, then use the ISA version for the
789 // targeted GPU.
790 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000791 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000792 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
793 Isa.Stepping,
794 "AMD", "AMDGPU");
795 return false;
796 }
797
798
799 if (ParseDirectiveMajorMinor(Major, Minor))
800 return true;
801
802 if (getLexer().isNot(AsmToken::Comma))
803 return TokError("stepping version number required, comma expected");
804 Lex();
805
806 if (getLexer().isNot(AsmToken::Integer))
807 return TokError("invalid stepping version");
808
809 Stepping = getLexer().getTok().getIntVal();
810 Lex();
811
812 if (getLexer().isNot(AsmToken::Comma))
813 return TokError("vendor name required, comma expected");
814 Lex();
815
816 if (getLexer().isNot(AsmToken::String))
817 return TokError("invalid vendor name");
818
819 VendorName = getLexer().getTok().getStringContents();
820 Lex();
821
822 if (getLexer().isNot(AsmToken::Comma))
823 return TokError("arch name required, comma expected");
824 Lex();
825
826 if (getLexer().isNot(AsmToken::String))
827 return TokError("invalid arch name");
828
829 ArchName = getLexer().getTok().getStringContents();
830 Lex();
831
832 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
833 VendorName, ArchName);
834 return false;
835}
836
Tom Stellardff7416b2015-06-26 21:58:31 +0000837bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
838 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +0000839 SmallString<40> ErrStr;
840 raw_svector_ostream Err(ErrStr);
841 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
842 return TokError(Err.str());
843 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000844 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +0000845 return false;
846}
847
848bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
849
850 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000851 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000852
853 while (true) {
854
855 if (getLexer().isNot(AsmToken::EndOfStatement))
856 return TokError("amd_kernel_code_t values must begin on a new line");
857
858 // Lex EndOfStatement. This is in a while loop, because lexing a comment
859 // will set the current token to EndOfStatement.
860 while(getLexer().is(AsmToken::EndOfStatement))
861 Lex();
862
863 if (getLexer().isNot(AsmToken::Identifier))
864 return TokError("expected value identifier or .end_amd_kernel_code_t");
865
866 StringRef ID = getLexer().getTok().getIdentifier();
867 Lex();
868
869 if (ID == ".end_amd_kernel_code_t")
870 break;
871
872 if (ParseAMDKernelCodeTValue(ID, Header))
873 return true;
874 }
875
876 getTargetStreamer().EmitAMDKernelCodeT(Header);
877
878 return false;
879}
880
Tom Stellarde135ffd2015-09-25 21:41:28 +0000881bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
882 getParser().getStreamer().SwitchSection(
883 AMDGPU::getHSATextSection(getContext()));
884 return false;
885}
886
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000887bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
888 if (getLexer().isNot(AsmToken::Identifier))
889 return TokError("expected symbol name");
890
891 StringRef KernelName = Parser.getTok().getString();
892
893 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
894 ELF::STT_AMDGPU_HSA_KERNEL);
895 Lex();
896 return false;
897}
898
Tom Stellard00f2f912015-12-02 19:47:57 +0000899bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
900 if (getLexer().isNot(AsmToken::Identifier))
901 return TokError("expected symbol name");
902
903 StringRef GlobalName = Parser.getTok().getIdentifier();
904
905 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
906 Lex();
907 return false;
908}
909
910bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
911 if (getLexer().isNot(AsmToken::Identifier))
912 return TokError("expected symbol name");
913
914 StringRef GlobalName = Parser.getTok().getIdentifier();
915
916 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
917 Lex();
918 return false;
919}
920
921bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
922 getParser().getStreamer().SwitchSection(
923 AMDGPU::getHSADataGlobalAgentSection(getContext()));
924 return false;
925}
926
927bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
928 getParser().getStreamer().SwitchSection(
929 AMDGPU::getHSADataGlobalProgramSection(getContext()));
930 return false;
931}
932
Tom Stellard9760f032015-12-03 03:34:32 +0000933bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
934 getParser().getStreamer().SwitchSection(
935 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
936 return false;
937}
938
Tom Stellard45bb48e2015-06-13 03:28:10 +0000939bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000940 StringRef IDVal = DirectiveID.getString();
941
942 if (IDVal == ".hsa_code_object_version")
943 return ParseDirectiveHSACodeObjectVersion();
944
945 if (IDVal == ".hsa_code_object_isa")
946 return ParseDirectiveHSACodeObjectISA();
947
Tom Stellardff7416b2015-06-26 21:58:31 +0000948 if (IDVal == ".amd_kernel_code_t")
949 return ParseDirectiveAMDKernelCodeT();
950
Tom Stellarde135ffd2015-09-25 21:41:28 +0000951 if (IDVal == ".hsatext" || IDVal == ".text")
952 return ParseSectionDirectiveHSAText();
953
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000954 if (IDVal == ".amdgpu_hsa_kernel")
955 return ParseDirectiveAMDGPUHsaKernel();
956
Tom Stellard00f2f912015-12-02 19:47:57 +0000957 if (IDVal == ".amdgpu_hsa_module_global")
958 return ParseDirectiveAMDGPUHsaModuleGlobal();
959
960 if (IDVal == ".amdgpu_hsa_program_global")
961 return ParseDirectiveAMDGPUHsaProgramGlobal();
962
963 if (IDVal == ".hsadata_global_agent")
964 return ParseSectionDirectiveHSADataGlobalAgent();
965
966 if (IDVal == ".hsadata_global_program")
967 return ParseSectionDirectiveHSADataGlobalProgram();
968
Tom Stellard9760f032015-12-03 03:34:32 +0000969 if (IDVal == ".hsarodata_readonly_agent")
970 return ParseSectionDirectiveHSARodataReadonlyAgent();
971
Tom Stellard45bb48e2015-06-13 03:28:10 +0000972 return true;
973}
974
Matt Arsenault68802d32015-11-05 03:11:27 +0000975bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
976 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +0000977 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +0000978 return true;
979
Matt Arsenault3b159672015-12-01 20:31:08 +0000980 if (isSI()) {
981 // No flat_scr
982 switch (RegNo) {
983 case AMDGPU::FLAT_SCR:
984 case AMDGPU::FLAT_SCR_LO:
985 case AMDGPU::FLAT_SCR_HI:
986 return false;
987 default:
988 return true;
989 }
990 }
991
Matt Arsenault68802d32015-11-05 03:11:27 +0000992 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
993 // SI/CI have.
994 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
995 R.isValid(); ++R) {
996 if (*R == RegNo)
997 return false;
998 }
999
1000 return true;
1001}
1002
Tom Stellard45bb48e2015-06-13 03:28:10 +00001003static bool operandsHaveModifiers(const OperandVector &Operands) {
1004
1005 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1006 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1007 if (Op.isRegKind() && Op.hasModifiers())
1008 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001009 if (Op.isImm() && Op.hasModifiers())
1010 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001011 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1012 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1013 return true;
1014 }
1015 return false;
1016}
1017
1018AMDGPUAsmParser::OperandMatchResultTy
1019AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1020
1021 // Try to parse with a custom parser
1022 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1023
1024 // If we successfully parsed the operand or if there as an error parsing,
1025 // we are done.
1026 //
1027 // If we are parsing after we reach EndOfStatement then this means we
1028 // are appending default values to the Operands list. This is only done
1029 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001030 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001031 getLexer().is(AsmToken::EndOfStatement))
1032 return ResTy;
1033
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001034 bool Negate = false, Abs = false, Abs2 = false;
1035
Tom Stellard45bb48e2015-06-13 03:28:10 +00001036 if (getLexer().getKind()== AsmToken::Minus) {
1037 Parser.Lex();
1038 Negate = true;
1039 }
1040
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001041 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1042 Parser.Lex();
1043 Abs2 = true;
1044 if (getLexer().isNot(AsmToken::LParen)) {
1045 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1046 return MatchOperand_ParseFail;
1047 }
1048 Parser.Lex();
1049 }
1050
Tom Stellard45bb48e2015-06-13 03:28:10 +00001051 if (getLexer().getKind() == AsmToken::Pipe) {
1052 Parser.Lex();
1053 Abs = true;
1054 }
1055
1056 switch(getLexer().getKind()) {
1057 case AsmToken::Integer: {
1058 SMLoc S = Parser.getTok().getLoc();
1059 int64_t IntVal;
1060 if (getParser().parseAbsoluteExpression(IntVal))
1061 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001062 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001063 Error(S, "invalid immediate: only 32-bit values are legal");
1064 return MatchOperand_ParseFail;
1065 }
1066
Tom Stellard45bb48e2015-06-13 03:28:10 +00001067 if (Negate)
1068 IntVal *= -1;
1069 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1070 return MatchOperand_Success;
1071 }
1072 case AsmToken::Real: {
1073 // FIXME: We should emit an error if a double precisions floating-point
1074 // value is used. I'm not sure the best way to detect this.
1075 SMLoc S = Parser.getTok().getLoc();
1076 int64_t IntVal;
1077 if (getParser().parseAbsoluteExpression(IntVal))
1078 return MatchOperand_ParseFail;
1079
1080 APFloat F((float)BitsToDouble(IntVal));
1081 if (Negate)
1082 F.changeSign();
1083 Operands.push_back(
1084 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1085 return MatchOperand_Success;
1086 }
1087 case AsmToken::Identifier: {
1088 SMLoc S, E;
1089 unsigned RegNo;
1090 if (!ParseRegister(RegNo, S, E)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001091 unsigned Modifiers = 0;
1092
1093 if (Negate)
1094 Modifiers |= 0x1;
1095
1096 if (Abs) {
1097 if (getLexer().getKind() != AsmToken::Pipe)
1098 return MatchOperand_ParseFail;
1099 Parser.Lex();
1100 Modifiers |= 0x2;
1101 }
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001102 if (Abs2) {
1103 if (getLexer().isNot(AsmToken::RParen)) {
1104 return MatchOperand_ParseFail;
1105 }
1106 Parser.Lex();
1107 Modifiers |= 0x2;
1108 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001109 Operands.push_back(AMDGPUOperand::CreateReg(
Tom Stellard2b65ed32015-12-21 18:44:27 +00001110 RegNo, S, E, getContext().getRegisterInfo(), &getSTI(),
Tom Stellard45bb48e2015-06-13 03:28:10 +00001111 isForcedVOP3()));
1112
Tom Stellarda90b9522016-02-11 03:28:15 +00001113 if (Modifiers) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001114 AMDGPUOperand &RegOp = ((AMDGPUOperand&)*Operands[Operands.size() - 1]);
1115 RegOp.setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001116 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001117 } else {
1118 ResTy = parseVOP3OptionalOps(Operands);
1119 if (ResTy == MatchOperand_NoMatch) {
1120 Operands.push_back(AMDGPUOperand::CreateToken(Parser.getTok().getString(),
1121 S));
1122 Parser.Lex();
1123 }
1124 }
1125 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001126 }
1127 default:
1128 return MatchOperand_NoMatch;
1129 }
1130}
1131
1132bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1133 StringRef Name,
1134 SMLoc NameLoc, OperandVector &Operands) {
1135
1136 // Clear any forced encodings from the previous instruction.
1137 setForcedEncodingSize(0);
1138
1139 if (Name.endswith("_e64"))
1140 setForcedEncodingSize(64);
1141 else if (Name.endswith("_e32"))
1142 setForcedEncodingSize(32);
1143
1144 // Add the instruction mnemonic
1145 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1146
1147 while (!getLexer().is(AsmToken::EndOfStatement)) {
1148 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1149
1150 // Eat the comma or space if there is one.
1151 if (getLexer().is(AsmToken::Comma))
1152 Parser.Lex();
1153
1154 switch (Res) {
1155 case MatchOperand_Success: break;
1156 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1157 "failed parsing operand.");
1158 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1159 "not a valid operand.");
1160 }
1161 }
1162
Tom Stellard45bb48e2015-06-13 03:28:10 +00001163 return false;
1164}
1165
1166//===----------------------------------------------------------------------===//
1167// Utility functions
1168//===----------------------------------------------------------------------===//
1169
1170AMDGPUAsmParser::OperandMatchResultTy
1171AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1172 int64_t Default) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001173 // We are at the end of the statement, and this is a default argument, so
1174 // use a default value.
1175 if (getLexer().is(AsmToken::EndOfStatement)) {
1176 Int = Default;
1177 return MatchOperand_Success;
1178 }
1179
1180 switch(getLexer().getKind()) {
1181 default: return MatchOperand_NoMatch;
1182 case AsmToken::Identifier: {
1183 StringRef OffsetName = Parser.getTok().getString();
1184 if (!OffsetName.equals(Prefix))
1185 return MatchOperand_NoMatch;
1186
1187 Parser.Lex();
1188 if (getLexer().isNot(AsmToken::Colon))
1189 return MatchOperand_ParseFail;
1190
1191 Parser.Lex();
1192 if (getLexer().isNot(AsmToken::Integer))
1193 return MatchOperand_ParseFail;
1194
1195 if (getParser().parseAbsoluteExpression(Int))
1196 return MatchOperand_ParseFail;
1197 break;
1198 }
1199 }
1200 return MatchOperand_Success;
1201}
1202
1203AMDGPUAsmParser::OperandMatchResultTy
1204AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1205 enum AMDGPUOperand::ImmTy ImmTy) {
1206
1207 SMLoc S = Parser.getTok().getLoc();
1208 int64_t Offset = 0;
1209
1210 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1211 if (Res != MatchOperand_Success)
1212 return Res;
1213
1214 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1215 return MatchOperand_Success;
1216}
1217
1218AMDGPUAsmParser::OperandMatchResultTy
1219AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1220 enum AMDGPUOperand::ImmTy ImmTy) {
1221 int64_t Bit = 0;
1222 SMLoc S = Parser.getTok().getLoc();
1223
1224 // We are at the end of the statement, and this is a default argument, so
1225 // use a default value.
1226 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1227 switch(getLexer().getKind()) {
1228 case AsmToken::Identifier: {
1229 StringRef Tok = Parser.getTok().getString();
1230 if (Tok == Name) {
1231 Bit = 1;
1232 Parser.Lex();
1233 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1234 Bit = 0;
1235 Parser.Lex();
1236 } else {
1237 return MatchOperand_NoMatch;
1238 }
1239 break;
1240 }
1241 default:
1242 return MatchOperand_NoMatch;
1243 }
1244 }
1245
1246 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1247 return MatchOperand_Success;
1248}
1249
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001250typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1251
Sam Koltondfa29f72016-03-09 12:29:31 +00001252void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1253 OptionalImmIndexMap& OptionalIdx,
1254 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001255 auto i = OptionalIdx.find(ImmT);
1256 if (i != OptionalIdx.end()) {
1257 unsigned Idx = i->second;
1258 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1259 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001260 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001261 }
1262}
1263
Tom Stellard45bb48e2015-06-13 03:28:10 +00001264static bool operandsHasOptionalOp(const OperandVector &Operands,
1265 const OptionalOperand &OOp) {
1266 for (unsigned i = 0; i < Operands.size(); i++) {
1267 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1268 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1269 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1270 return true;
1271
1272 }
1273 return false;
1274}
1275
1276AMDGPUAsmParser::OperandMatchResultTy
1277AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1278 OperandVector &Operands) {
1279 SMLoc S = Parser.getTok().getLoc();
1280 for (const OptionalOperand &Op : OptionalOps) {
1281 if (operandsHasOptionalOp(Operands, Op))
1282 continue;
1283 AMDGPUAsmParser::OperandMatchResultTy Res;
1284 int64_t Value;
1285 if (Op.IsBit) {
1286 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1287 if (Res == MatchOperand_NoMatch)
1288 continue;
1289 return Res;
1290 }
1291
1292 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1293
1294 if (Res == MatchOperand_NoMatch)
1295 continue;
1296
1297 if (Res != MatchOperand_Success)
1298 return Res;
1299
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001300 bool DefaultValue = (Value == Op.Default);
1301
Tom Stellard45bb48e2015-06-13 03:28:10 +00001302 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1303 return MatchOperand_ParseFail;
1304 }
1305
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001306 if (!DefaultValue) {
1307 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1308 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001309 return MatchOperand_Success;
1310 }
1311 return MatchOperand_NoMatch;
1312}
1313
1314//===----------------------------------------------------------------------===//
1315// ds
1316//===----------------------------------------------------------------------===//
1317
1318static const OptionalOperand DSOptionalOps [] = {
1319 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1320 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1321};
1322
1323static const OptionalOperand DSOptionalOpsOff01 [] = {
1324 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1325 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1326 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1327};
1328
1329AMDGPUAsmParser::OperandMatchResultTy
1330AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1331 return parseOptionalOps(DSOptionalOps, Operands);
1332}
1333AMDGPUAsmParser::OperandMatchResultTy
1334AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1335 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1336}
1337
1338AMDGPUAsmParser::OperandMatchResultTy
1339AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1340 SMLoc S = Parser.getTok().getLoc();
1341 AMDGPUAsmParser::OperandMatchResultTy Res =
1342 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1343 if (Res == MatchOperand_NoMatch) {
1344 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1345 AMDGPUOperand::ImmTyOffset));
1346 Res = MatchOperand_Success;
1347 }
1348 return Res;
1349}
1350
1351bool AMDGPUOperand::isDSOffset() const {
1352 return isImm() && isUInt<16>(getImm());
1353}
1354
1355bool AMDGPUOperand::isDSOffset01() const {
1356 return isImm() && isUInt<8>(getImm());
1357}
1358
1359void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1360 const OperandVector &Operands) {
1361
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001362 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001363
1364 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1365 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1366
1367 // Add the register arguments
1368 if (Op.isReg()) {
1369 Op.addRegOperands(Inst, 1);
1370 continue;
1371 }
1372
1373 // Handle optional arguments
1374 OptionalIdx[Op.getImmTy()] = i;
1375 }
1376
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001377 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1378 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1379 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001380
Tom Stellard45bb48e2015-06-13 03:28:10 +00001381 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1382}
1383
1384void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1385
1386 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1387 bool GDSOnly = false;
1388
1389 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1390 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1391
1392 // Add the register arguments
1393 if (Op.isReg()) {
1394 Op.addRegOperands(Inst, 1);
1395 continue;
1396 }
1397
1398 if (Op.isToken() && Op.getToken() == "gds") {
1399 GDSOnly = true;
1400 continue;
1401 }
1402
1403 // Handle optional arguments
1404 OptionalIdx[Op.getImmTy()] = i;
1405 }
1406
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001407 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1408 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001409
1410 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001411 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001412 }
1413 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1414}
1415
1416
1417//===----------------------------------------------------------------------===//
1418// s_waitcnt
1419//===----------------------------------------------------------------------===//
1420
1421bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1422 StringRef CntName = Parser.getTok().getString();
1423 int64_t CntVal;
1424
1425 Parser.Lex();
1426 if (getLexer().isNot(AsmToken::LParen))
1427 return true;
1428
1429 Parser.Lex();
1430 if (getLexer().isNot(AsmToken::Integer))
1431 return true;
1432
1433 if (getParser().parseAbsoluteExpression(CntVal))
1434 return true;
1435
1436 if (getLexer().isNot(AsmToken::RParen))
1437 return true;
1438
1439 Parser.Lex();
1440 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1441 Parser.Lex();
1442
1443 int CntShift;
1444 int CntMask;
1445
1446 if (CntName == "vmcnt") {
1447 CntMask = 0xf;
1448 CntShift = 0;
1449 } else if (CntName == "expcnt") {
1450 CntMask = 0x7;
1451 CntShift = 4;
1452 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001453 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001454 CntShift = 8;
1455 } else {
1456 return true;
1457 }
1458
1459 IntVal &= ~(CntMask << CntShift);
1460 IntVal |= (CntVal << CntShift);
1461 return false;
1462}
1463
1464AMDGPUAsmParser::OperandMatchResultTy
1465AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1466 // Disable all counters by default.
1467 // vmcnt [3:0]
1468 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001469 // lgkmcnt [11:8]
1470 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001471 SMLoc S = Parser.getTok().getLoc();
1472
1473 switch(getLexer().getKind()) {
1474 default: return MatchOperand_ParseFail;
1475 case AsmToken::Integer:
1476 // The operand can be an integer value.
1477 if (getParser().parseAbsoluteExpression(CntVal))
1478 return MatchOperand_ParseFail;
1479 break;
1480
1481 case AsmToken::Identifier:
1482 do {
1483 if (parseCnt(CntVal))
1484 return MatchOperand_ParseFail;
1485 } while(getLexer().isNot(AsmToken::EndOfStatement));
1486 break;
1487 }
1488 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1489 return MatchOperand_Success;
1490}
1491
1492bool AMDGPUOperand::isSWaitCnt() const {
1493 return isImm();
1494}
1495
1496//===----------------------------------------------------------------------===//
1497// sopp branch targets
1498//===----------------------------------------------------------------------===//
1499
1500AMDGPUAsmParser::OperandMatchResultTy
1501AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1502 SMLoc S = Parser.getTok().getLoc();
1503
1504 switch (getLexer().getKind()) {
1505 default: return MatchOperand_ParseFail;
1506 case AsmToken::Integer: {
1507 int64_t Imm;
1508 if (getParser().parseAbsoluteExpression(Imm))
1509 return MatchOperand_ParseFail;
1510 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1511 return MatchOperand_Success;
1512 }
1513
1514 case AsmToken::Identifier:
1515 Operands.push_back(AMDGPUOperand::CreateExpr(
1516 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1517 Parser.getTok().getString()), getContext()), S));
1518 Parser.Lex();
1519 return MatchOperand_Success;
1520 }
1521}
1522
1523//===----------------------------------------------------------------------===//
1524// flat
1525//===----------------------------------------------------------------------===//
1526
1527static const OptionalOperand FlatOptionalOps [] = {
1528 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1529 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1530 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1531};
1532
1533static const OptionalOperand FlatAtomicOptionalOps [] = {
1534 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1535 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1536};
1537
1538AMDGPUAsmParser::OperandMatchResultTy
1539AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1540 return parseOptionalOps(FlatOptionalOps, Operands);
1541}
1542
1543AMDGPUAsmParser::OperandMatchResultTy
1544AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1545 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1546}
1547
1548void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1549 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001550 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001551
1552 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1553 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1554
1555 // Add the register arguments
1556 if (Op.isReg()) {
1557 Op.addRegOperands(Inst, 1);
1558 continue;
1559 }
1560
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001561 OptionalIdx[Op.getImmTy()] = i;
1562 }
1563 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1564 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1565 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1566}
1567
1568
1569void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1570 const OperandVector &Operands) {
1571 OptionalImmIndexMap OptionalIdx;
1572
1573 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1574 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1575
1576 // Add the register arguments
1577 if (Op.isReg()) {
1578 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001579 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001580 }
1581
1582 // Handle 'glc' token for flat atomics.
1583 if (Op.isToken()) {
1584 continue;
1585 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001586
1587 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001588 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001589 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001590 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1591 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001592}
1593
1594//===----------------------------------------------------------------------===//
1595// mubuf
1596//===----------------------------------------------------------------------===//
1597
1598static const OptionalOperand MubufOptionalOps [] = {
1599 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1600 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1601 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1602 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1603};
1604
1605AMDGPUAsmParser::OperandMatchResultTy
1606AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1607 return parseOptionalOps(MubufOptionalOps, Operands);
1608}
1609
1610AMDGPUAsmParser::OperandMatchResultTy
1611AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1612 return parseIntWithPrefix("offset", Operands);
1613}
1614
1615AMDGPUAsmParser::OperandMatchResultTy
1616AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1617 return parseNamedBit("glc", Operands);
1618}
1619
1620AMDGPUAsmParser::OperandMatchResultTy
1621AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1622 return parseNamedBit("slc", Operands);
1623}
1624
1625AMDGPUAsmParser::OperandMatchResultTy
1626AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1627 return parseNamedBit("tfe", Operands);
1628}
1629
1630bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001631 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001632}
1633
1634void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1635 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001636 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001637
1638 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1639 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1640
1641 // Add the register arguments
1642 if (Op.isReg()) {
1643 Op.addRegOperands(Inst, 1);
1644 continue;
1645 }
1646
1647 // Handle the case where soffset is an immediate
1648 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1649 Op.addImmOperands(Inst, 1);
1650 continue;
1651 }
1652
1653 // Handle tokens like 'offen' which are sometimes hard-coded into the
1654 // asm string. There are no MCInst operands for these.
1655 if (Op.isToken()) {
1656 continue;
1657 }
1658 assert(Op.isImm());
1659
1660 // Handle optional arguments
1661 OptionalIdx[Op.getImmTy()] = i;
1662 }
1663
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001664 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1665 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1666 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1667 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001668}
1669
1670//===----------------------------------------------------------------------===//
1671// mimg
1672//===----------------------------------------------------------------------===//
1673
1674AMDGPUAsmParser::OperandMatchResultTy
1675AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001676 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001677}
1678
1679AMDGPUAsmParser::OperandMatchResultTy
1680AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001681 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1682}
1683
1684AMDGPUAsmParser::OperandMatchResultTy
1685AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1686 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001687}
1688
1689AMDGPUAsmParser::OperandMatchResultTy
1690AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001691 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1692}
1693
1694AMDGPUAsmParser::OperandMatchResultTy
1695AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1696 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001697}
1698
1699//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001700// smrd
1701//===----------------------------------------------------------------------===//
1702
1703bool AMDGPUOperand::isSMRDOffset() const {
1704
1705 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1706 // information here.
1707 return isImm() && isUInt<8>(getImm());
1708}
1709
1710bool AMDGPUOperand::isSMRDLiteralOffset() const {
1711 // 32-bit literals are only supported on CI and we only want to use them
1712 // when the offset is > 8-bits.
1713 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1714}
1715
1716//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001717// vop3
1718//===----------------------------------------------------------------------===//
1719
1720static bool ConvertOmodMul(int64_t &Mul) {
1721 if (Mul != 1 && Mul != 2 && Mul != 4)
1722 return false;
1723
1724 Mul >>= 1;
1725 return true;
1726}
1727
1728static bool ConvertOmodDiv(int64_t &Div) {
1729 if (Div == 1) {
1730 Div = 0;
1731 return true;
1732 }
1733
1734 if (Div == 2) {
1735 Div = 3;
1736 return true;
1737 }
1738
1739 return false;
1740}
1741
1742static const OptionalOperand VOP3OptionalOps [] = {
1743 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1744 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1745 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1746};
1747
1748static bool isVOP3(OperandVector &Operands) {
1749 if (operandsHaveModifiers(Operands))
1750 return true;
1751
Tom Stellarda90b9522016-02-11 03:28:15 +00001752 if (Operands.size() >= 2) {
1753 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001754
Tom Stellarda90b9522016-02-11 03:28:15 +00001755 if (DstOp.isReg() && DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
1756 return true;
1757 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001758
1759 if (Operands.size() >= 5)
1760 return true;
1761
1762 if (Operands.size() > 3) {
1763 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Benjamin Kramerac5e36f2016-02-12 12:37:21 +00001764 if (Src1Op.isReg() && (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1765 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID)))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001766 return true;
1767 }
1768 return false;
1769}
1770
1771AMDGPUAsmParser::OperandMatchResultTy
1772AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1773
1774 // The value returned by this function may change after parsing
1775 // an operand so store the original value here.
1776 bool HasModifiers = operandsHaveModifiers(Operands);
1777
1778 bool IsVOP3 = isVOP3(Operands);
1779 if (HasModifiers || IsVOP3 ||
1780 getLexer().isNot(AsmToken::EndOfStatement) ||
1781 getForcedEncodingSize() == 64) {
1782
1783 AMDGPUAsmParser::OperandMatchResultTy Res =
1784 parseOptionalOps(VOP3OptionalOps, Operands);
1785
1786 if (!HasModifiers && Res == MatchOperand_Success) {
1787 // We have added a modifier operation, so we need to make sure all
1788 // previous register operands have modifiers
1789 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1790 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001791 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001792 Op.setModifiers(0);
1793 }
1794 }
1795 return Res;
1796 }
1797 return MatchOperand_NoMatch;
1798}
1799
Tom Stellarda90b9522016-02-11 03:28:15 +00001800void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1801 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001802 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001803 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001804 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1805 }
1806 for (unsigned E = Operands.size(); I != E; ++I)
1807 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1808}
1809
1810void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001811 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1812 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001813 cvtVOP3(Inst, Operands);
1814 } else {
1815 cvtId(Inst, Operands);
1816 }
1817}
1818
1819void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1820 if (operandsHaveModifiers(Operands)) {
1821 cvtVOP3(Inst, Operands);
1822 } else {
1823 cvtId(Inst, Operands);
1824 }
1825}
1826
1827void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1828 cvtVOP3(Inst, Operands);
1829}
1830
1831void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001832 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00001833 unsigned I = 1;
1834 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001835 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001836 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001837 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001838
Tom Stellarda90b9522016-02-11 03:28:15 +00001839 for (unsigned E = Operands.size(); I != E; ++I) {
1840 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001841 if (Op.isRegOrImmWithInputMods()) {
1842 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001843 } else if (Op.isImm()) {
1844 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001845 } else {
1846 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001847 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001848 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001849
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001850 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
1851 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001852}
1853
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001854void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001855 unsigned I = 1;
1856 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1857 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1858 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1859 }
1860
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001861 OptionalImmIndexMap OptionalIdx;
1862
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001863 for (unsigned E = Operands.size(); I != E; ++I) {
1864 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001865
1866 // Add the register arguments
1867 if (Op.isRegOrImm()) {
1868 Op.addRegOrImmOperands(Inst, 1);
1869 continue;
1870 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001871 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001872 } else {
1873 assert(false);
1874 }
1875 }
1876
1877 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1878 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1879 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1880 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1881 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1882 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1883 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1884 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1885}
1886
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001887void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
1888 unsigned I = 1;
1889 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1890 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1891 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1892 }
1893
1894 // Add src, same as dst
1895 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
1896
1897 OptionalImmIndexMap OptionalIdx;
1898
1899 for (unsigned E = Operands.size(); I != E; ++I) {
1900 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1901
1902 // Add the register arguments
1903 if (Op.isRegOrImm()) {
1904 Op.addRegOrImmOperands(Inst, 1);
1905 continue;
1906 } else if (Op.isImmModifier()) {
1907 OptionalIdx[Op.getImmTy()] = I;
1908 } else {
1909 assert(false);
1910 }
1911 }
1912
1913 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1914 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1915 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1916 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1917 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1918 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1919 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1920 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1921}
1922
Sam Koltondfa29f72016-03-09 12:29:31 +00001923//===----------------------------------------------------------------------===//
1924// dpp
1925//===----------------------------------------------------------------------===//
1926
1927bool AMDGPUOperand::isDPPCtrl() const {
1928 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
1929 if (result) {
1930 int64_t Imm = getImm();
1931 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
1932 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
1933 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
1934 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
1935 (Imm == 0x130) ||
1936 (Imm == 0x134) ||
1937 (Imm == 0x138) ||
1938 (Imm == 0x13c) ||
1939 (Imm == 0x140) ||
1940 (Imm == 0x141) ||
1941 (Imm == 0x142) ||
1942 (Imm == 0x143);
1943 }
1944 return false;
1945}
1946
1947AMDGPUAsmParser::OperandMatchResultTy
1948AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands) {
1949 // ToDo: use same syntax as sp3 for dpp_ctrl
1950 SMLoc S = Parser.getTok().getLoc();
1951 StringRef Prefix;
1952 int64_t Int;
1953
1954 switch(getLexer().getKind()) {
1955 default: return MatchOperand_NoMatch;
1956 case AsmToken::Identifier: {
1957 Prefix = Parser.getTok().getString();
1958
1959 Parser.Lex();
1960 if (getLexer().isNot(AsmToken::Colon))
1961 return MatchOperand_ParseFail;
1962
1963 Parser.Lex();
1964 if (getLexer().isNot(AsmToken::Integer))
1965 return MatchOperand_ParseFail;
1966
1967 if (getParser().parseAbsoluteExpression(Int))
1968 return MatchOperand_ParseFail;
1969 break;
1970 }
1971 }
1972
1973 if (Prefix.equals("row_shl")) {
1974 Int |= 0x100;
1975 } else if (Prefix.equals("row_shr")) {
1976 Int |= 0x110;
1977 } else if (Prefix.equals("row_ror")) {
1978 Int |= 0x120;
1979 } else if (Prefix.equals("wave_shl")) {
1980 Int = 0x130;
1981 } else if (Prefix.equals("wave_rol")) {
1982 Int = 0x134;
1983 } else if (Prefix.equals("wave_shr")) {
1984 Int = 0x138;
1985 } else if (Prefix.equals("wave_ror")) {
1986 Int = 0x13C;
1987 } else if (Prefix.equals("row_mirror")) {
1988 Int = 0x140;
1989 } else if (Prefix.equals("row_half_mirror")) {
1990 Int = 0x141;
1991 } else if (Prefix.equals("row_bcast")) {
1992 if (Int == 15) {
1993 Int = 0x142;
1994 } else if (Int == 31) {
1995 Int = 0x143;
1996 }
1997 } else if (!Prefix.equals("quad_perm")) {
1998 return MatchOperand_NoMatch;
1999 }
2000 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
2001 AMDGPUOperand::ImmTyDppCtrl));
2002 return MatchOperand_Success;
2003}
2004
2005static const OptionalOperand DPPOptionalOps [] = {
2006 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2007 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2008 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, nullptr}
2009};
2010
2011AMDGPUAsmParser::OperandMatchResultTy
2012AMDGPUAsmParser::parseDPPOptionalOps(OperandVector &Operands) {
2013 SMLoc S = Parser.getTok().getLoc();
2014 OperandMatchResultTy Res = parseOptionalOps(DPPOptionalOps, Operands);
2015 // XXX - sp3 use syntax "bound_ctrl:0" to indicate that bound_ctrl bit was set
2016 if (Res == MatchOperand_Success) {
2017 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands.back());
2018 // If last operand was parsed as bound_ctrl we should replace it with correct value (1)
2019 if (Op.isImmTy(AMDGPUOperand::ImmTyDppBoundCtrl)) {
2020 Operands.pop_back();
2021 Operands.push_back(
2022 AMDGPUOperand::CreateImm(1, S, AMDGPUOperand::ImmTyDppBoundCtrl));
2023 return MatchOperand_Success;
2024 }
2025 }
2026 return Res;
2027}
2028
2029void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
2030 cvtDPP(Inst, Operands, true);
2031}
2032
2033void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
2034 cvtDPP(Inst, Operands, false);
2035}
2036
2037void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
2038 bool HasMods) {
2039 OptionalImmIndexMap OptionalIdx;
2040
2041 unsigned I = 1;
2042 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2043 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2044 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2045 }
2046
2047 for (unsigned E = Operands.size(); I != E; ++I) {
2048 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2049 // Add the register arguments
2050 if (!HasMods && Op.isReg()) {
2051 Op.addRegOperands(Inst, 1);
2052 } else if (HasMods && Op.isRegOrImmWithInputMods()) {
2053 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2054 } else if (Op.isDPPCtrl()) {
2055 Op.addImmOperands(Inst, 1);
2056 } else if (Op.isImm()) {
2057 // Handle optional arguments
2058 OptionalIdx[Op.getImmTy()] = I;
2059 } else {
2060 llvm_unreachable("Invalid operand type");
2061 }
2062 }
2063
2064 // ToDo: fix default values for row_mask and bank_mask
2065 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2066 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2067 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2068}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002069
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002070
Tom Stellard45bb48e2015-06-13 03:28:10 +00002071/// Force static initialization.
2072extern "C" void LLVMInitializeAMDGPUAsmParser() {
2073 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2074 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2075}
2076
2077#define GET_REGISTER_MATCHER
2078#define GET_MATCHER_IMPLEMENTATION
2079#include "AMDGPUGenAsmMatcher.inc"