blob: 75f886fa988f2347dba30bc81c1d4c07e5a6a6be [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/SmallString.h"
19#include "llvm/ADT/SmallVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/StringSwitch.h"
21#include "llvm/ADT/Twine.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
26#include "llvm/MC/MCParser/MCAsmLexer.h"
27#include "llvm/MC/MCParser/MCAsmParser.h"
28#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000029#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000033#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000034#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000035#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/Support/SourceMgr.h"
37#include "llvm/Support/TargetRegistry.h"
38#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000039
40using namespace llvm;
41
42namespace {
43
44struct OptionalOperand;
45
46class AMDGPUOperand : public MCParsedAsmOperand {
47 enum KindTy {
48 Token,
49 Immediate,
50 Register,
51 Expression
52 } Kind;
53
54 SMLoc StartLoc, EndLoc;
55
56public:
57 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
58
59 MCContext *Ctx;
60
61 enum ImmTy {
62 ImmTyNone,
63 ImmTyDSOffset0,
64 ImmTyDSOffset1,
65 ImmTyGDS,
66 ImmTyOffset,
67 ImmTyGLC,
68 ImmTySLC,
69 ImmTyTFE,
70 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000071 ImmTyOMod,
Sam Koltondfa29f72016-03-09 12:29:31 +000072 ImmTyDppCtrl,
73 ImmTyDppRowMask,
74 ImmTyDppBankMask,
75 ImmTyDppBoundCtrl,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000076 ImmTyDMask,
77 ImmTyUNorm,
78 ImmTyDA,
79 ImmTyR128,
80 ImmTyLWE,
Tom Stellard45bb48e2015-06-13 03:28:10 +000081 };
82
83 struct TokOp {
84 const char *Data;
85 unsigned Length;
86 };
87
88 struct ImmOp {
89 bool IsFPImm;
90 ImmTy Type;
91 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000092 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000093 };
94
95 struct RegOp {
96 unsigned RegNo;
97 int Modifiers;
98 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +000099 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000100 bool IsForcedVOP3;
101 };
102
103 union {
104 TokOp Tok;
105 ImmOp Imm;
106 RegOp Reg;
107 const MCExpr *Expr;
108 };
109
110 void addImmOperands(MCInst &Inst, unsigned N) const {
111 Inst.addOperand(MCOperand::createImm(getImm()));
112 }
113
114 StringRef getToken() const {
115 return StringRef(Tok.Data, Tok.Length);
116 }
117
118 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000119 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000120 }
121
122 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000123 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000124 addRegOperands(Inst, N);
125 else
126 addImmOperands(Inst, N);
127 }
128
Tom Stellardd93a34f2016-02-22 19:17:56 +0000129 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
130 if (isRegKind()) {
131 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
132 addRegOperands(Inst, N);
133 } else {
134 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
135 addImmOperands(Inst, N);
136 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000137 }
138
139 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
140 if (isImm())
141 addImmOperands(Inst, N);
142 else {
143 assert(isExpr());
144 Inst.addOperand(MCOperand::createExpr(Expr));
145 }
146 }
147
148 bool defaultTokenHasSuffix() const {
149 StringRef Token(Tok.Data, Tok.Length);
150
Sam Koltona74cd522016-03-18 15:35:51 +0000151 return Token.endswith("_e32") || Token.endswith("_e64") ||
Sam Koltondfa29f72016-03-09 12:29:31 +0000152 Token.endswith("_dpp");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000153 }
154
155 bool isToken() const override {
156 return Kind == Token;
157 }
158
159 bool isImm() const override {
160 return Kind == Immediate;
161 }
162
Tom Stellardd93a34f2016-02-22 19:17:56 +0000163 bool isInlinableImm() const {
164 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
165 immediates are inlinable (e.g. "clamp" attribute is not) */ )
166 return false;
167 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000168 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000169 // We've had bot failures before due to weird NaN support on mips hosts.
170 const float F = BitsToFloat(Imm.Val);
171 // TODO: Add 1/(2*pi) for VI
172 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000173 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000174 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000175 }
176
177 bool isDSOffset0() const {
178 assert(isImm());
179 return Imm.Type == ImmTyDSOffset0;
180 }
181
182 bool isDSOffset1() const {
183 assert(isImm());
184 return Imm.Type == ImmTyDSOffset1;
185 }
186
187 int64_t getImm() const {
188 return Imm.Val;
189 }
190
191 enum ImmTy getImmTy() const {
192 assert(isImm());
193 return Imm.Type;
194 }
195
196 bool isRegKind() const {
197 return Kind == Register;
198 }
199
200 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000201 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000202 }
203
Tom Stellardd93a34f2016-02-22 19:17:56 +0000204 bool isRegOrImmWithInputMods() const {
205 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000206 }
207
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000208 bool isImmTy(ImmTy ImmT) const {
209 return isImm() && Imm.Type == ImmT;
210 }
211
Tom Stellarda90b9522016-02-11 03:28:15 +0000212 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000213 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000214 }
215
216 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000217 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000218 }
219
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000220 bool isImmModifier() const {
221 return Kind == Immediate && Imm.Type != ImmTyNone;
222 }
223
224 bool isDMask() const {
225 return isImmTy(ImmTyDMask);
226 }
227
228 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
229 bool isDA() const { return isImmTy(ImmTyDA); }
230 bool isR128() const { return isImmTy(ImmTyUNorm); }
231 bool isLWE() const { return isImmTy(ImmTyLWE); }
232
Tom Stellarda90b9522016-02-11 03:28:15 +0000233 bool isMod() const {
234 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000235 }
236
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000237 bool isGDS() const { return isImmTy(ImmTyGDS); }
238 bool isGLC() const { return isImmTy(ImmTyGLC); }
239 bool isSLC() const { return isImmTy(ImmTySLC); }
240 bool isTFE() const { return isImmTy(ImmTyTFE); }
241
Sam Koltondfa29f72016-03-09 12:29:31 +0000242 bool isBankMask() const {
243 return isImmTy(ImmTyDppBankMask);
244 }
245
246 bool isRowMask() const {
247 return isImmTy(ImmTyDppRowMask);
248 }
249
250 bool isBoundCtrl() const {
251 return isImmTy(ImmTyDppBoundCtrl);
252 }
Sam Koltona74cd522016-03-18 15:35:51 +0000253
Tom Stellard45bb48e2015-06-13 03:28:10 +0000254 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000255 assert(isReg() || (isImm() && Imm.Modifiers == 0));
256 if (isReg())
257 Reg.Modifiers = Mods;
258 else
259 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000260 }
261
262 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000263 assert(isRegKind() || isImm());
264 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000265 }
266
267 unsigned getReg() const override {
268 return Reg.RegNo;
269 }
270
271 bool isRegOrImm() const {
272 return isReg() || isImm();
273 }
274
275 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000276 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000277 }
278
279 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000280 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000281 }
282
Matt Arsenault86d336e2015-09-08 21:15:00 +0000283 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000284 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000285 }
286
287 bool isSSrc32() const {
288 return isImm() || isSCSrc32();
289 }
290
291 bool isSSrc64() const {
292 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
293 // See isVSrc64().
294 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000295 }
296
Tom Stellard45bb48e2015-06-13 03:28:10 +0000297 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000298 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000299 }
300
301 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000302 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000303 }
304
305 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000306 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000307 }
308
309 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000310 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000311 // narrowed to 32 bits (in the instruction stream). That require knowledge
312 // of instruction type (unsigned/signed, floating or "untyped"/B64),
313 // see [AMD GCN3 ISA 6.3.1].
314 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
315 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000316 }
317
318 bool isMem() const override {
319 return false;
320 }
321
322 bool isExpr() const {
323 return Kind == Expression;
324 }
325
326 bool isSoppBrTarget() const {
327 return isExpr() || isImm();
328 }
329
330 SMLoc getStartLoc() const override {
331 return StartLoc;
332 }
333
334 SMLoc getEndLoc() const override {
335 return EndLoc;
336 }
337
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000338 void print(raw_ostream &OS) const override {
339 switch (Kind) {
340 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000341 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000342 break;
343 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000344 if (Imm.Type != AMDGPUOperand::ImmTyNone)
345 OS << getImm();
Sam Koltona74cd522016-03-18 15:35:51 +0000346 else
Tom Stellardd93a34f2016-02-22 19:17:56 +0000347 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000348 break;
349 case Token:
350 OS << '\'' << getToken() << '\'';
351 break;
352 case Expression:
353 OS << "<expr " << *Expr << '>';
354 break;
355 }
356 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000357
358 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
359 enum ImmTy Type = ImmTyNone,
360 bool IsFPImm = false) {
361 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
362 Op->Imm.Val = Val;
363 Op->Imm.IsFPImm = IsFPImm;
364 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000365 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000366 Op->StartLoc = Loc;
367 Op->EndLoc = Loc;
368 return Op;
369 }
370
371 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
372 bool HasExplicitEncodingSize = true) {
373 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
374 Res->Tok.Data = Str.data();
375 Res->Tok.Length = Str.size();
376 Res->StartLoc = Loc;
377 Res->EndLoc = Loc;
378 return Res;
379 }
380
381 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
382 SMLoc E,
383 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000384 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000385 bool ForceVOP3) {
386 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
387 Op->Reg.RegNo = RegNo;
388 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000389 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000390 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000391 Op->Reg.IsForcedVOP3 = ForceVOP3;
392 Op->StartLoc = S;
393 Op->EndLoc = E;
394 return Op;
395 }
396
397 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
398 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
399 Op->Expr = Expr;
400 Op->StartLoc = S;
401 Op->EndLoc = S;
402 return Op;
403 }
404
405 bool isDSOffset() const;
406 bool isDSOffset01() const;
407 bool isSWaitCnt() const;
408 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000409 bool isSMRDOffset() const;
410 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000411 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000412};
413
414class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000415 const MCInstrInfo &MII;
416 MCAsmParser &Parser;
417
418 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000419
Matt Arsenault3b159672015-12-01 20:31:08 +0000420 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000421 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000422 }
423
424 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000425 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000426 }
427
Matt Arsenault68802d32015-11-05 03:11:27 +0000428 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000429 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000430 }
431
432 bool hasSGPR102_SGPR103() const {
433 return !isVI();
434 }
435
Tom Stellard45bb48e2015-06-13 03:28:10 +0000436 /// @name Auto-generated Match Functions
437 /// {
438
439#define GET_ASSEMBLER_HEADER
440#include "AMDGPUGenAsmMatcher.inc"
441
442 /// }
443
Tom Stellard347ac792015-06-26 21:15:07 +0000444private:
445 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
446 bool ParseDirectiveHSACodeObjectVersion();
447 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000448 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
449 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000450 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000451 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000452 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000453 bool ParseDirectiveAMDGPUHsaModuleGlobal();
454 bool ParseDirectiveAMDGPUHsaProgramGlobal();
455 bool ParseSectionDirectiveHSADataGlobalAgent();
456 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000457 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Tom Stellard347ac792015-06-26 21:15:07 +0000458
Tom Stellard45bb48e2015-06-13 03:28:10 +0000459public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000460 enum AMDGPUMatchResultTy {
461 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
462 };
463
Akira Hatanakab11ef082015-11-14 06:35:56 +0000464 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000465 const MCInstrInfo &MII,
466 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000467 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000468 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000469 MCAsmParserExtension::Initialize(Parser);
470
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000471 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000472 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000473 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000474 }
475
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000476 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000477 }
478
Tom Stellard347ac792015-06-26 21:15:07 +0000479 AMDGPUTargetStreamer &getTargetStreamer() {
480 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
481 return static_cast<AMDGPUTargetStreamer &>(TS);
482 }
483
Tom Stellard45bb48e2015-06-13 03:28:10 +0000484 unsigned getForcedEncodingSize() const {
485 return ForcedEncodingSize;
486 }
487
488 void setForcedEncodingSize(unsigned Size) {
489 ForcedEncodingSize = Size;
490 }
491
492 bool isForcedVOP3() const {
493 return ForcedEncodingSize == 64;
494 }
495
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000496 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000497 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
498 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
499 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
500 OperandVector &Operands, MCStreamer &Out,
501 uint64_t &ErrorInfo,
502 bool MatchingInlineAsm) override;
503 bool ParseDirective(AsmToken DirectiveID) override;
504 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
505 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
506 SMLoc NameLoc, OperandVector &Operands) override;
507
508 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
509 int64_t Default = 0);
510 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
511 OperandVector &Operands,
512 enum AMDGPUOperand::ImmTy ImmTy =
513 AMDGPUOperand::ImmTyNone);
514 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
515 enum AMDGPUOperand::ImmTy ImmTy =
516 AMDGPUOperand::ImmTyNone);
517 OperandMatchResultTy parseOptionalOps(
518 const ArrayRef<OptionalOperand> &OptionalOps,
519 OperandVector &Operands);
520
521
522 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
523 void cvtDS(MCInst &Inst, const OperandVector &Operands);
524 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
525 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
526 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
527
528 bool parseCnt(int64_t &IntVal);
529 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
530 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
531
532 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
533 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
534 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000535 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000536
537 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
538 OperandMatchResultTy parseOffset(OperandVector &Operands);
539 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
540 OperandMatchResultTy parseGLC(OperandVector &Operands);
541 OperandMatchResultTy parseSLC(OperandVector &Operands);
542 OperandMatchResultTy parseTFE(OperandVector &Operands);
543
544 OperandMatchResultTy parseDMask(OperandVector &Operands);
545 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000546 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000547 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000548 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000549
Tom Stellarda90b9522016-02-11 03:28:15 +0000550 void cvtId(MCInst &Inst, const OperandVector &Operands);
551 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
552 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
553 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000555
556 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000557 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000558 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000559
560 OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands);
561 OperandMatchResultTy parseDPPOptionalOps(OperandVector &Operands);
562 void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
563 void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
564 void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000565};
566
567struct OptionalOperand {
568 const char *Name;
569 AMDGPUOperand::ImmTy Type;
570 bool IsBit;
571 int64_t Default;
572 bool (*ConvertResult)(int64_t&);
573};
574
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000575}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000576
Matt Arsenault967c2f52015-11-03 22:50:32 +0000577static int getRegClass(bool IsVgpr, unsigned RegWidth) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000578 if (IsVgpr) {
579 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000580 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000581 case 1: return AMDGPU::VGPR_32RegClassID;
582 case 2: return AMDGPU::VReg_64RegClassID;
583 case 3: return AMDGPU::VReg_96RegClassID;
584 case 4: return AMDGPU::VReg_128RegClassID;
585 case 8: return AMDGPU::VReg_256RegClassID;
586 case 16: return AMDGPU::VReg_512RegClassID;
587 }
588 }
589
590 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000591 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000592 case 1: return AMDGPU::SGPR_32RegClassID;
593 case 2: return AMDGPU::SGPR_64RegClassID;
594 case 4: return AMDGPU::SReg_128RegClassID;
595 case 8: return AMDGPU::SReg_256RegClassID;
596 case 16: return AMDGPU::SReg_512RegClassID;
597 }
598}
599
Craig Topper4e9b03d62015-09-21 00:18:00 +0000600static unsigned getRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000601
602 return StringSwitch<unsigned>(RegName)
603 .Case("exec", AMDGPU::EXEC)
604 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000605 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000606 .Case("m0", AMDGPU::M0)
607 .Case("scc", AMDGPU::SCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000608 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
609 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000610 .Case("vcc_lo", AMDGPU::VCC_LO)
611 .Case("vcc_hi", AMDGPU::VCC_HI)
612 .Case("exec_lo", AMDGPU::EXEC_LO)
613 .Case("exec_hi", AMDGPU::EXEC_HI)
614 .Default(0);
615}
616
617bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000618 auto R = parseRegister();
619 if (!R) return true;
620 assert(R->isReg());
621 RegNo = R->getReg();
622 StartLoc = R->getStartLoc();
623 EndLoc = R->getEndLoc();
624 return false;
625}
626
627std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
628 const AsmToken &Tok = Parser.getTok();
629 SMLoc StartLoc = Tok.getLoc();
630 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000631 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
632
Matt Arsenault57116cc2015-09-10 21:51:15 +0000633 StringRef RegName = Tok.getString();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000634 unsigned RegNo = getRegForName(RegName);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000635
636 if (RegNo) {
637 Parser.Lex();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000638 if (!subtargetHasRegister(*TRI, RegNo))
639 return nullptr;
640 return AMDGPUOperand::CreateReg(RegNo, StartLoc, EndLoc,
641 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000642 }
643
644 // Match vgprs and sgprs
645 if (RegName[0] != 's' && RegName[0] != 'v')
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000646 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000647
648 bool IsVgpr = RegName[0] == 'v';
649 unsigned RegWidth;
650 unsigned RegIndexInClass;
651 if (RegName.size() > 1) {
652 // We have a 32-bit register
653 RegWidth = 1;
654 if (RegName.substr(1).getAsInteger(10, RegIndexInClass))
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000655 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000656 Parser.Lex();
657 } else {
658 // We have a register greater than 32-bits.
659
660 int64_t RegLo, RegHi;
661 Parser.Lex();
662 if (getLexer().isNot(AsmToken::LBrac))
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000663 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000664
665 Parser.Lex();
666 if (getParser().parseAbsoluteExpression(RegLo))
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000667 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000668
669 if (getLexer().isNot(AsmToken::Colon))
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000670 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000671
672 Parser.Lex();
673 if (getParser().parseAbsoluteExpression(RegHi))
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000674 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000675
676 if (getLexer().isNot(AsmToken::RBrac))
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000677 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000678
679 Parser.Lex();
680 RegWidth = (RegHi - RegLo) + 1;
681 if (IsVgpr) {
682 // VGPR registers aren't aligned.
683 RegIndexInClass = RegLo;
684 } else {
685 // SGPR registers are aligned. Max alignment is 4 dwords.
Matt Arsenault967c2f52015-11-03 22:50:32 +0000686 unsigned Size = std::min(RegWidth, 4u);
687 if (RegLo % Size != 0)
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000688 return nullptr;
Matt Arsenault967c2f52015-11-03 22:50:32 +0000689
690 RegIndexInClass = RegLo / Size;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000691 }
692 }
693
Matt Arsenault967c2f52015-11-03 22:50:32 +0000694 int RCID = getRegClass(IsVgpr, RegWidth);
695 if (RCID == -1)
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000696 return nullptr;
Matt Arsenault967c2f52015-11-03 22:50:32 +0000697
698 const MCRegisterClass RC = TRI->getRegClass(RCID);
Matt Arsenault3473c722015-11-03 22:50:27 +0000699 if (RegIndexInClass >= RC.getNumRegs())
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000700 return nullptr;
Matt Arsenault3473c722015-11-03 22:50:27 +0000701
702 RegNo = RC.getRegister(RegIndexInClass);
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000703 if (!subtargetHasRegister(*TRI, RegNo))
704 return nullptr;
705
706 return AMDGPUOperand::CreateReg(RegNo, StartLoc, EndLoc,
707 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000708}
709
710unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
711
712 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
713
714 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
715 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
716 return Match_InvalidOperand;
717
Tom Stellard88e0b252015-10-06 15:57:53 +0000718 if ((TSFlags & SIInstrFlags::VOP3) &&
719 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
720 getForcedEncodingSize() != 64)
721 return Match_PreferE32;
722
Tom Stellard45bb48e2015-06-13 03:28:10 +0000723 return Match_Success;
724}
725
726
727bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
728 OperandVector &Operands,
729 MCStreamer &Out,
730 uint64_t &ErrorInfo,
731 bool MatchingInlineAsm) {
732 MCInst Inst;
733
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000734 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000735 default: break;
736 case Match_Success:
737 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000738 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000739 return false;
740 case Match_MissingFeature:
741 return Error(IDLoc, "instruction not supported on this GPU");
742
743 case Match_MnemonicFail:
744 return Error(IDLoc, "unrecognized instruction mnemonic");
745
746 case Match_InvalidOperand: {
747 SMLoc ErrorLoc = IDLoc;
748 if (ErrorInfo != ~0ULL) {
749 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000750 return Error(IDLoc, "too few operands for instruction");
751 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000752 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
753 if (ErrorLoc == SMLoc())
754 ErrorLoc = IDLoc;
755 }
756 return Error(ErrorLoc, "invalid operand for instruction");
757 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000758 case Match_PreferE32:
759 return Error(IDLoc, "internal error: instruction without _e64 suffix "
760 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000761 }
762 llvm_unreachable("Implement any new match types added!");
763}
764
Tom Stellard347ac792015-06-26 21:15:07 +0000765bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
766 uint32_t &Minor) {
767 if (getLexer().isNot(AsmToken::Integer))
768 return TokError("invalid major version");
769
770 Major = getLexer().getTok().getIntVal();
771 Lex();
772
773 if (getLexer().isNot(AsmToken::Comma))
774 return TokError("minor version number required, comma expected");
775 Lex();
776
777 if (getLexer().isNot(AsmToken::Integer))
778 return TokError("invalid minor version");
779
780 Minor = getLexer().getTok().getIntVal();
781 Lex();
782
783 return false;
784}
785
786bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
787
788 uint32_t Major;
789 uint32_t Minor;
790
791 if (ParseDirectiveMajorMinor(Major, Minor))
792 return true;
793
794 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
795 return false;
796}
797
798bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
799
800 uint32_t Major;
801 uint32_t Minor;
802 uint32_t Stepping;
803 StringRef VendorName;
804 StringRef ArchName;
805
806 // If this directive has no arguments, then use the ISA version for the
807 // targeted GPU.
808 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000809 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000810 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
811 Isa.Stepping,
812 "AMD", "AMDGPU");
813 return false;
814 }
815
816
817 if (ParseDirectiveMajorMinor(Major, Minor))
818 return true;
819
820 if (getLexer().isNot(AsmToken::Comma))
821 return TokError("stepping version number required, comma expected");
822 Lex();
823
824 if (getLexer().isNot(AsmToken::Integer))
825 return TokError("invalid stepping version");
826
827 Stepping = getLexer().getTok().getIntVal();
828 Lex();
829
830 if (getLexer().isNot(AsmToken::Comma))
831 return TokError("vendor name required, comma expected");
832 Lex();
833
834 if (getLexer().isNot(AsmToken::String))
835 return TokError("invalid vendor name");
836
837 VendorName = getLexer().getTok().getStringContents();
838 Lex();
839
840 if (getLexer().isNot(AsmToken::Comma))
841 return TokError("arch name required, comma expected");
842 Lex();
843
844 if (getLexer().isNot(AsmToken::String))
845 return TokError("invalid arch name");
846
847 ArchName = getLexer().getTok().getStringContents();
848 Lex();
849
850 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
851 VendorName, ArchName);
852 return false;
853}
854
Tom Stellardff7416b2015-06-26 21:58:31 +0000855bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
856 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +0000857 SmallString<40> ErrStr;
858 raw_svector_ostream Err(ErrStr);
859 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
860 return TokError(Err.str());
861 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000862 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +0000863 return false;
864}
865
866bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
867
868 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000869 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000870
871 while (true) {
872
873 if (getLexer().isNot(AsmToken::EndOfStatement))
874 return TokError("amd_kernel_code_t values must begin on a new line");
875
876 // Lex EndOfStatement. This is in a while loop, because lexing a comment
877 // will set the current token to EndOfStatement.
878 while(getLexer().is(AsmToken::EndOfStatement))
879 Lex();
880
881 if (getLexer().isNot(AsmToken::Identifier))
882 return TokError("expected value identifier or .end_amd_kernel_code_t");
883
884 StringRef ID = getLexer().getTok().getIdentifier();
885 Lex();
886
887 if (ID == ".end_amd_kernel_code_t")
888 break;
889
890 if (ParseAMDKernelCodeTValue(ID, Header))
891 return true;
892 }
893
894 getTargetStreamer().EmitAMDKernelCodeT(Header);
895
896 return false;
897}
898
Tom Stellarde135ffd2015-09-25 21:41:28 +0000899bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
900 getParser().getStreamer().SwitchSection(
901 AMDGPU::getHSATextSection(getContext()));
902 return false;
903}
904
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000905bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
906 if (getLexer().isNot(AsmToken::Identifier))
907 return TokError("expected symbol name");
908
909 StringRef KernelName = Parser.getTok().getString();
910
911 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
912 ELF::STT_AMDGPU_HSA_KERNEL);
913 Lex();
914 return false;
915}
916
Tom Stellard00f2f912015-12-02 19:47:57 +0000917bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
918 if (getLexer().isNot(AsmToken::Identifier))
919 return TokError("expected symbol name");
920
921 StringRef GlobalName = Parser.getTok().getIdentifier();
922
923 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
924 Lex();
925 return false;
926}
927
928bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
929 if (getLexer().isNot(AsmToken::Identifier))
930 return TokError("expected symbol name");
931
932 StringRef GlobalName = Parser.getTok().getIdentifier();
933
934 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
935 Lex();
936 return false;
937}
938
939bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
940 getParser().getStreamer().SwitchSection(
941 AMDGPU::getHSADataGlobalAgentSection(getContext()));
942 return false;
943}
944
945bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
946 getParser().getStreamer().SwitchSection(
947 AMDGPU::getHSADataGlobalProgramSection(getContext()));
948 return false;
949}
950
Tom Stellard9760f032015-12-03 03:34:32 +0000951bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
952 getParser().getStreamer().SwitchSection(
953 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
954 return false;
955}
956
Tom Stellard45bb48e2015-06-13 03:28:10 +0000957bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +0000958 StringRef IDVal = DirectiveID.getString();
959
960 if (IDVal == ".hsa_code_object_version")
961 return ParseDirectiveHSACodeObjectVersion();
962
963 if (IDVal == ".hsa_code_object_isa")
964 return ParseDirectiveHSACodeObjectISA();
965
Tom Stellardff7416b2015-06-26 21:58:31 +0000966 if (IDVal == ".amd_kernel_code_t")
967 return ParseDirectiveAMDKernelCodeT();
968
Tom Stellarde135ffd2015-09-25 21:41:28 +0000969 if (IDVal == ".hsatext" || IDVal == ".text")
970 return ParseSectionDirectiveHSAText();
971
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000972 if (IDVal == ".amdgpu_hsa_kernel")
973 return ParseDirectiveAMDGPUHsaKernel();
974
Tom Stellard00f2f912015-12-02 19:47:57 +0000975 if (IDVal == ".amdgpu_hsa_module_global")
976 return ParseDirectiveAMDGPUHsaModuleGlobal();
977
978 if (IDVal == ".amdgpu_hsa_program_global")
979 return ParseDirectiveAMDGPUHsaProgramGlobal();
980
981 if (IDVal == ".hsadata_global_agent")
982 return ParseSectionDirectiveHSADataGlobalAgent();
983
984 if (IDVal == ".hsadata_global_program")
985 return ParseSectionDirectiveHSADataGlobalProgram();
986
Tom Stellard9760f032015-12-03 03:34:32 +0000987 if (IDVal == ".hsarodata_readonly_agent")
988 return ParseSectionDirectiveHSARodataReadonlyAgent();
989
Tom Stellard45bb48e2015-06-13 03:28:10 +0000990 return true;
991}
992
Matt Arsenault68802d32015-11-05 03:11:27 +0000993bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
994 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +0000995 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +0000996 return true;
997
Matt Arsenault3b159672015-12-01 20:31:08 +0000998 if (isSI()) {
999 // No flat_scr
1000 switch (RegNo) {
1001 case AMDGPU::FLAT_SCR:
1002 case AMDGPU::FLAT_SCR_LO:
1003 case AMDGPU::FLAT_SCR_HI:
1004 return false;
1005 default:
1006 return true;
1007 }
1008 }
1009
Matt Arsenault68802d32015-11-05 03:11:27 +00001010 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1011 // SI/CI have.
1012 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1013 R.isValid(); ++R) {
1014 if (*R == RegNo)
1015 return false;
1016 }
1017
1018 return true;
1019}
1020
Tom Stellard45bb48e2015-06-13 03:28:10 +00001021static bool operandsHaveModifiers(const OperandVector &Operands) {
1022
1023 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1024 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1025 if (Op.isRegKind() && Op.hasModifiers())
1026 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001027 if (Op.isImm() && Op.hasModifiers())
1028 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001029 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1030 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1031 return true;
1032 }
1033 return false;
1034}
1035
1036AMDGPUAsmParser::OperandMatchResultTy
1037AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1038
1039 // Try to parse with a custom parser
1040 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1041
1042 // If we successfully parsed the operand or if there as an error parsing,
1043 // we are done.
1044 //
1045 // If we are parsing after we reach EndOfStatement then this means we
1046 // are appending default values to the Operands list. This is only done
1047 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001048 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001049 getLexer().is(AsmToken::EndOfStatement))
1050 return ResTy;
1051
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001052 bool Negate = false, Abs = false, Abs2 = false;
1053
Tom Stellard45bb48e2015-06-13 03:28:10 +00001054 if (getLexer().getKind()== AsmToken::Minus) {
1055 Parser.Lex();
1056 Negate = true;
1057 }
1058
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001059 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1060 Parser.Lex();
1061 Abs2 = true;
1062 if (getLexer().isNot(AsmToken::LParen)) {
1063 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1064 return MatchOperand_ParseFail;
1065 }
1066 Parser.Lex();
1067 }
1068
Tom Stellard45bb48e2015-06-13 03:28:10 +00001069 if (getLexer().getKind() == AsmToken::Pipe) {
1070 Parser.Lex();
1071 Abs = true;
1072 }
1073
1074 switch(getLexer().getKind()) {
1075 case AsmToken::Integer: {
1076 SMLoc S = Parser.getTok().getLoc();
1077 int64_t IntVal;
1078 if (getParser().parseAbsoluteExpression(IntVal))
1079 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001080 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001081 Error(S, "invalid immediate: only 32-bit values are legal");
1082 return MatchOperand_ParseFail;
1083 }
1084
Tom Stellard45bb48e2015-06-13 03:28:10 +00001085 if (Negate)
1086 IntVal *= -1;
1087 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1088 return MatchOperand_Success;
1089 }
1090 case AsmToken::Real: {
1091 // FIXME: We should emit an error if a double precisions floating-point
1092 // value is used. I'm not sure the best way to detect this.
1093 SMLoc S = Parser.getTok().getLoc();
1094 int64_t IntVal;
1095 if (getParser().parseAbsoluteExpression(IntVal))
1096 return MatchOperand_ParseFail;
1097
1098 APFloat F((float)BitsToDouble(IntVal));
1099 if (Negate)
1100 F.changeSign();
1101 Operands.push_back(
1102 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1103 return MatchOperand_Success;
1104 }
1105 case AsmToken::Identifier: {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001106 if (auto R = parseRegister()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001107 unsigned Modifiers = 0;
1108
1109 if (Negate)
1110 Modifiers |= 0x1;
1111
1112 if (Abs) {
1113 if (getLexer().getKind() != AsmToken::Pipe)
1114 return MatchOperand_ParseFail;
1115 Parser.Lex();
1116 Modifiers |= 0x2;
1117 }
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001118 if (Abs2) {
1119 if (getLexer().isNot(AsmToken::RParen)) {
1120 return MatchOperand_ParseFail;
1121 }
1122 Parser.Lex();
1123 Modifiers |= 0x2;
1124 }
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001125 assert(R->isReg());
1126 R->Reg.IsForcedVOP3 = isForcedVOP3();
Tom Stellarda90b9522016-02-11 03:28:15 +00001127 if (Modifiers) {
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001128 R->setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001129 }
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001130 Operands.push_back(std::move(R));
Tom Stellarda90b9522016-02-11 03:28:15 +00001131 } else {
1132 ResTy = parseVOP3OptionalOps(Operands);
1133 if (ResTy == MatchOperand_NoMatch) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001134 const auto &Tok = Parser.getTok();
1135 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(),
1136 Tok.getLoc()));
Tom Stellarda90b9522016-02-11 03:28:15 +00001137 Parser.Lex();
1138 }
1139 }
1140 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001141 }
1142 default:
1143 return MatchOperand_NoMatch;
1144 }
1145}
1146
1147bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1148 StringRef Name,
1149 SMLoc NameLoc, OperandVector &Operands) {
1150
1151 // Clear any forced encodings from the previous instruction.
1152 setForcedEncodingSize(0);
1153
1154 if (Name.endswith("_e64"))
1155 setForcedEncodingSize(64);
1156 else if (Name.endswith("_e32"))
1157 setForcedEncodingSize(32);
1158
1159 // Add the instruction mnemonic
1160 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1161
1162 while (!getLexer().is(AsmToken::EndOfStatement)) {
1163 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1164
1165 // Eat the comma or space if there is one.
1166 if (getLexer().is(AsmToken::Comma))
1167 Parser.Lex();
1168
1169 switch (Res) {
1170 case MatchOperand_Success: break;
1171 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1172 "failed parsing operand.");
1173 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1174 "not a valid operand.");
1175 }
1176 }
1177
Tom Stellard45bb48e2015-06-13 03:28:10 +00001178 return false;
1179}
1180
1181//===----------------------------------------------------------------------===//
1182// Utility functions
1183//===----------------------------------------------------------------------===//
1184
1185AMDGPUAsmParser::OperandMatchResultTy
1186AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1187 int64_t Default) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001188 // We are at the end of the statement, and this is a default argument, so
1189 // use a default value.
1190 if (getLexer().is(AsmToken::EndOfStatement)) {
1191 Int = Default;
1192 return MatchOperand_Success;
1193 }
1194
1195 switch(getLexer().getKind()) {
1196 default: return MatchOperand_NoMatch;
1197 case AsmToken::Identifier: {
1198 StringRef OffsetName = Parser.getTok().getString();
1199 if (!OffsetName.equals(Prefix))
1200 return MatchOperand_NoMatch;
1201
1202 Parser.Lex();
1203 if (getLexer().isNot(AsmToken::Colon))
1204 return MatchOperand_ParseFail;
1205
1206 Parser.Lex();
1207 if (getLexer().isNot(AsmToken::Integer))
1208 return MatchOperand_ParseFail;
1209
1210 if (getParser().parseAbsoluteExpression(Int))
1211 return MatchOperand_ParseFail;
1212 break;
1213 }
1214 }
1215 return MatchOperand_Success;
1216}
1217
1218AMDGPUAsmParser::OperandMatchResultTy
1219AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1220 enum AMDGPUOperand::ImmTy ImmTy) {
1221
1222 SMLoc S = Parser.getTok().getLoc();
1223 int64_t Offset = 0;
1224
1225 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1226 if (Res != MatchOperand_Success)
1227 return Res;
1228
1229 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1230 return MatchOperand_Success;
1231}
1232
1233AMDGPUAsmParser::OperandMatchResultTy
1234AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1235 enum AMDGPUOperand::ImmTy ImmTy) {
1236 int64_t Bit = 0;
1237 SMLoc S = Parser.getTok().getLoc();
1238
1239 // We are at the end of the statement, and this is a default argument, so
1240 // use a default value.
1241 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1242 switch(getLexer().getKind()) {
1243 case AsmToken::Identifier: {
1244 StringRef Tok = Parser.getTok().getString();
1245 if (Tok == Name) {
1246 Bit = 1;
1247 Parser.Lex();
1248 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1249 Bit = 0;
1250 Parser.Lex();
1251 } else {
1252 return MatchOperand_NoMatch;
1253 }
1254 break;
1255 }
1256 default:
1257 return MatchOperand_NoMatch;
1258 }
1259 }
1260
1261 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1262 return MatchOperand_Success;
1263}
1264
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001265typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1266
Sam Koltona74cd522016-03-18 15:35:51 +00001267void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1268 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001269 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001270 auto i = OptionalIdx.find(ImmT);
1271 if (i != OptionalIdx.end()) {
1272 unsigned Idx = i->second;
1273 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1274 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001275 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001276 }
1277}
1278
Tom Stellard45bb48e2015-06-13 03:28:10 +00001279static bool operandsHasOptionalOp(const OperandVector &Operands,
1280 const OptionalOperand &OOp) {
1281 for (unsigned i = 0; i < Operands.size(); i++) {
1282 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1283 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1284 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1285 return true;
1286
1287 }
1288 return false;
1289}
1290
1291AMDGPUAsmParser::OperandMatchResultTy
1292AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1293 OperandVector &Operands) {
1294 SMLoc S = Parser.getTok().getLoc();
1295 for (const OptionalOperand &Op : OptionalOps) {
1296 if (operandsHasOptionalOp(Operands, Op))
1297 continue;
1298 AMDGPUAsmParser::OperandMatchResultTy Res;
1299 int64_t Value;
1300 if (Op.IsBit) {
1301 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1302 if (Res == MatchOperand_NoMatch)
1303 continue;
1304 return Res;
1305 }
1306
1307 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1308
1309 if (Res == MatchOperand_NoMatch)
1310 continue;
1311
1312 if (Res != MatchOperand_Success)
1313 return Res;
1314
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001315 bool DefaultValue = (Value == Op.Default);
1316
Tom Stellard45bb48e2015-06-13 03:28:10 +00001317 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1318 return MatchOperand_ParseFail;
1319 }
1320
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001321 if (!DefaultValue) {
1322 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1323 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001324 return MatchOperand_Success;
1325 }
1326 return MatchOperand_NoMatch;
1327}
1328
1329//===----------------------------------------------------------------------===//
1330// ds
1331//===----------------------------------------------------------------------===//
1332
1333static const OptionalOperand DSOptionalOps [] = {
1334 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1335 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1336};
1337
1338static const OptionalOperand DSOptionalOpsOff01 [] = {
1339 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1340 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1341 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1342};
1343
1344AMDGPUAsmParser::OperandMatchResultTy
1345AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1346 return parseOptionalOps(DSOptionalOps, Operands);
1347}
1348AMDGPUAsmParser::OperandMatchResultTy
1349AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1350 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1351}
1352
1353AMDGPUAsmParser::OperandMatchResultTy
1354AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1355 SMLoc S = Parser.getTok().getLoc();
1356 AMDGPUAsmParser::OperandMatchResultTy Res =
1357 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1358 if (Res == MatchOperand_NoMatch) {
1359 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1360 AMDGPUOperand::ImmTyOffset));
1361 Res = MatchOperand_Success;
1362 }
1363 return Res;
1364}
1365
1366bool AMDGPUOperand::isDSOffset() const {
1367 return isImm() && isUInt<16>(getImm());
1368}
1369
1370bool AMDGPUOperand::isDSOffset01() const {
1371 return isImm() && isUInt<8>(getImm());
1372}
1373
1374void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1375 const OperandVector &Operands) {
1376
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001377 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001378
1379 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1380 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1381
1382 // Add the register arguments
1383 if (Op.isReg()) {
1384 Op.addRegOperands(Inst, 1);
1385 continue;
1386 }
1387
1388 // Handle optional arguments
1389 OptionalIdx[Op.getImmTy()] = i;
1390 }
1391
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001392 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1393 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1394 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001395
Tom Stellard45bb48e2015-06-13 03:28:10 +00001396 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1397}
1398
1399void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1400
1401 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1402 bool GDSOnly = false;
1403
1404 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1405 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1406
1407 // Add the register arguments
1408 if (Op.isReg()) {
1409 Op.addRegOperands(Inst, 1);
1410 continue;
1411 }
1412
1413 if (Op.isToken() && Op.getToken() == "gds") {
1414 GDSOnly = true;
1415 continue;
1416 }
1417
1418 // Handle optional arguments
1419 OptionalIdx[Op.getImmTy()] = i;
1420 }
1421
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001422 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1423 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001424
1425 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001426 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001427 }
1428 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1429}
1430
1431
1432//===----------------------------------------------------------------------===//
1433// s_waitcnt
1434//===----------------------------------------------------------------------===//
1435
1436bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1437 StringRef CntName = Parser.getTok().getString();
1438 int64_t CntVal;
1439
1440 Parser.Lex();
1441 if (getLexer().isNot(AsmToken::LParen))
1442 return true;
1443
1444 Parser.Lex();
1445 if (getLexer().isNot(AsmToken::Integer))
1446 return true;
1447
1448 if (getParser().parseAbsoluteExpression(CntVal))
1449 return true;
1450
1451 if (getLexer().isNot(AsmToken::RParen))
1452 return true;
1453
1454 Parser.Lex();
1455 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1456 Parser.Lex();
1457
1458 int CntShift;
1459 int CntMask;
1460
1461 if (CntName == "vmcnt") {
1462 CntMask = 0xf;
1463 CntShift = 0;
1464 } else if (CntName == "expcnt") {
1465 CntMask = 0x7;
1466 CntShift = 4;
1467 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001468 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001469 CntShift = 8;
1470 } else {
1471 return true;
1472 }
1473
1474 IntVal &= ~(CntMask << CntShift);
1475 IntVal |= (CntVal << CntShift);
1476 return false;
1477}
1478
1479AMDGPUAsmParser::OperandMatchResultTy
1480AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1481 // Disable all counters by default.
1482 // vmcnt [3:0]
1483 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001484 // lgkmcnt [11:8]
1485 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001486 SMLoc S = Parser.getTok().getLoc();
1487
1488 switch(getLexer().getKind()) {
1489 default: return MatchOperand_ParseFail;
1490 case AsmToken::Integer:
1491 // The operand can be an integer value.
1492 if (getParser().parseAbsoluteExpression(CntVal))
1493 return MatchOperand_ParseFail;
1494 break;
1495
1496 case AsmToken::Identifier:
1497 do {
1498 if (parseCnt(CntVal))
1499 return MatchOperand_ParseFail;
1500 } while(getLexer().isNot(AsmToken::EndOfStatement));
1501 break;
1502 }
1503 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1504 return MatchOperand_Success;
1505}
1506
1507bool AMDGPUOperand::isSWaitCnt() const {
1508 return isImm();
1509}
1510
1511//===----------------------------------------------------------------------===//
1512// sopp branch targets
1513//===----------------------------------------------------------------------===//
1514
1515AMDGPUAsmParser::OperandMatchResultTy
1516AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1517 SMLoc S = Parser.getTok().getLoc();
1518
1519 switch (getLexer().getKind()) {
1520 default: return MatchOperand_ParseFail;
1521 case AsmToken::Integer: {
1522 int64_t Imm;
1523 if (getParser().parseAbsoluteExpression(Imm))
1524 return MatchOperand_ParseFail;
1525 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1526 return MatchOperand_Success;
1527 }
1528
1529 case AsmToken::Identifier:
1530 Operands.push_back(AMDGPUOperand::CreateExpr(
1531 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1532 Parser.getTok().getString()), getContext()), S));
1533 Parser.Lex();
1534 return MatchOperand_Success;
1535 }
1536}
1537
1538//===----------------------------------------------------------------------===//
1539// flat
1540//===----------------------------------------------------------------------===//
1541
1542static const OptionalOperand FlatOptionalOps [] = {
1543 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1544 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1545 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1546};
1547
1548static const OptionalOperand FlatAtomicOptionalOps [] = {
1549 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1550 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1551};
1552
1553AMDGPUAsmParser::OperandMatchResultTy
1554AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1555 return parseOptionalOps(FlatOptionalOps, Operands);
1556}
1557
1558AMDGPUAsmParser::OperandMatchResultTy
1559AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1560 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1561}
1562
1563void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1564 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001565 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001566
1567 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1568 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1569
1570 // Add the register arguments
1571 if (Op.isReg()) {
1572 Op.addRegOperands(Inst, 1);
1573 continue;
1574 }
1575
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001576 OptionalIdx[Op.getImmTy()] = i;
1577 }
1578 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1579 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1580 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1581}
1582
1583
1584void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1585 const OperandVector &Operands) {
1586 OptionalImmIndexMap OptionalIdx;
1587
1588 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1589 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1590
1591 // Add the register arguments
1592 if (Op.isReg()) {
1593 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001594 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001595 }
1596
1597 // Handle 'glc' token for flat atomics.
1598 if (Op.isToken()) {
1599 continue;
1600 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001601
1602 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001603 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001604 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001605 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1606 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001607}
1608
1609//===----------------------------------------------------------------------===//
1610// mubuf
1611//===----------------------------------------------------------------------===//
1612
1613static const OptionalOperand MubufOptionalOps [] = {
1614 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1615 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1616 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1617 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1618};
1619
1620AMDGPUAsmParser::OperandMatchResultTy
1621AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1622 return parseOptionalOps(MubufOptionalOps, Operands);
1623}
1624
1625AMDGPUAsmParser::OperandMatchResultTy
1626AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1627 return parseIntWithPrefix("offset", Operands);
1628}
1629
1630AMDGPUAsmParser::OperandMatchResultTy
1631AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1632 return parseNamedBit("glc", Operands);
1633}
1634
1635AMDGPUAsmParser::OperandMatchResultTy
1636AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1637 return parseNamedBit("slc", Operands);
1638}
1639
1640AMDGPUAsmParser::OperandMatchResultTy
1641AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1642 return parseNamedBit("tfe", Operands);
1643}
1644
1645bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001646 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001647}
1648
1649void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1650 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001651 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001652
1653 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1654 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1655
1656 // Add the register arguments
1657 if (Op.isReg()) {
1658 Op.addRegOperands(Inst, 1);
1659 continue;
1660 }
1661
1662 // Handle the case where soffset is an immediate
1663 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1664 Op.addImmOperands(Inst, 1);
1665 continue;
1666 }
1667
1668 // Handle tokens like 'offen' which are sometimes hard-coded into the
1669 // asm string. There are no MCInst operands for these.
1670 if (Op.isToken()) {
1671 continue;
1672 }
1673 assert(Op.isImm());
1674
1675 // Handle optional arguments
1676 OptionalIdx[Op.getImmTy()] = i;
1677 }
1678
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001679 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1680 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1681 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1682 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001683}
1684
1685//===----------------------------------------------------------------------===//
1686// mimg
1687//===----------------------------------------------------------------------===//
1688
1689AMDGPUAsmParser::OperandMatchResultTy
1690AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001691 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001692}
1693
1694AMDGPUAsmParser::OperandMatchResultTy
1695AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001696 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1697}
1698
1699AMDGPUAsmParser::OperandMatchResultTy
1700AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1701 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001702}
1703
1704AMDGPUAsmParser::OperandMatchResultTy
1705AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001706 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1707}
1708
1709AMDGPUAsmParser::OperandMatchResultTy
1710AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1711 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001712}
1713
1714//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001715// smrd
1716//===----------------------------------------------------------------------===//
1717
1718bool AMDGPUOperand::isSMRDOffset() const {
1719
1720 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1721 // information here.
1722 return isImm() && isUInt<8>(getImm());
1723}
1724
1725bool AMDGPUOperand::isSMRDLiteralOffset() const {
1726 // 32-bit literals are only supported on CI and we only want to use them
1727 // when the offset is > 8-bits.
1728 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1729}
1730
1731//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001732// vop3
1733//===----------------------------------------------------------------------===//
1734
1735static bool ConvertOmodMul(int64_t &Mul) {
1736 if (Mul != 1 && Mul != 2 && Mul != 4)
1737 return false;
1738
1739 Mul >>= 1;
1740 return true;
1741}
1742
1743static bool ConvertOmodDiv(int64_t &Div) {
1744 if (Div == 1) {
1745 Div = 0;
1746 return true;
1747 }
1748
1749 if (Div == 2) {
1750 Div = 3;
1751 return true;
1752 }
1753
1754 return false;
1755}
1756
1757static const OptionalOperand VOP3OptionalOps [] = {
1758 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1759 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1760 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1761};
1762
1763static bool isVOP3(OperandVector &Operands) {
1764 if (operandsHaveModifiers(Operands))
1765 return true;
1766
Tom Stellarda90b9522016-02-11 03:28:15 +00001767 if (Operands.size() >= 2) {
1768 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001769
Valery Pykhtinf91911c2016-03-14 05:01:45 +00001770 if (DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
Tom Stellarda90b9522016-02-11 03:28:15 +00001771 return true;
1772 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001773
1774 if (Operands.size() >= 5)
1775 return true;
1776
1777 if (Operands.size() > 3) {
1778 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Valery Pykhtinf91911c2016-03-14 05:01:45 +00001779 if (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1780 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001781 return true;
1782 }
1783 return false;
1784}
1785
1786AMDGPUAsmParser::OperandMatchResultTy
1787AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1788
1789 // The value returned by this function may change after parsing
1790 // an operand so store the original value here.
1791 bool HasModifiers = operandsHaveModifiers(Operands);
1792
1793 bool IsVOP3 = isVOP3(Operands);
1794 if (HasModifiers || IsVOP3 ||
1795 getLexer().isNot(AsmToken::EndOfStatement) ||
1796 getForcedEncodingSize() == 64) {
1797
1798 AMDGPUAsmParser::OperandMatchResultTy Res =
1799 parseOptionalOps(VOP3OptionalOps, Operands);
1800
1801 if (!HasModifiers && Res == MatchOperand_Success) {
1802 // We have added a modifier operation, so we need to make sure all
1803 // previous register operands have modifiers
1804 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1805 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001806 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001807 Op.setModifiers(0);
1808 }
1809 }
1810 return Res;
1811 }
1812 return MatchOperand_NoMatch;
1813}
1814
Tom Stellarda90b9522016-02-11 03:28:15 +00001815void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1816 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001817 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001818 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001819 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1820 }
1821 for (unsigned E = Operands.size(); I != E; ++I)
1822 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1823}
1824
1825void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001826 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1827 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001828 cvtVOP3(Inst, Operands);
1829 } else {
1830 cvtId(Inst, Operands);
1831 }
1832}
1833
1834void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1835 if (operandsHaveModifiers(Operands)) {
1836 cvtVOP3(Inst, Operands);
1837 } else {
1838 cvtId(Inst, Operands);
1839 }
1840}
1841
1842void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
1843 cvtVOP3(Inst, Operands);
1844}
1845
1846void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001847 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00001848 unsigned I = 1;
1849 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001850 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001851 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00001852 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001853
Tom Stellarda90b9522016-02-11 03:28:15 +00001854 for (unsigned E = Operands.size(); I != E; ++I) {
1855 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001856 if (Op.isRegOrImmWithInputMods()) {
1857 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001858 } else if (Op.isImm()) {
1859 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00001860 } else {
1861 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001862 }
Tom Stellarda90b9522016-02-11 03:28:15 +00001863 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001864
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001865 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
1866 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001867}
1868
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001869void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001870 unsigned I = 1;
1871 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1872 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1873 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1874 }
1875
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001876 OptionalImmIndexMap OptionalIdx;
1877
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001878 for (unsigned E = Operands.size(); I != E; ++I) {
1879 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001880
1881 // Add the register arguments
1882 if (Op.isRegOrImm()) {
1883 Op.addRegOrImmOperands(Inst, 1);
1884 continue;
1885 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001886 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001887 } else {
1888 assert(false);
1889 }
1890 }
1891
1892 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1893 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1894 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1895 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1896 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1897 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1898 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1899 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1900}
1901
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001902void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
1903 unsigned I = 1;
1904 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1905 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
1906 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1907 }
1908
1909 // Add src, same as dst
1910 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
1911
1912 OptionalImmIndexMap OptionalIdx;
1913
1914 for (unsigned E = Operands.size(); I != E; ++I) {
1915 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
1916
1917 // Add the register arguments
1918 if (Op.isRegOrImm()) {
1919 Op.addRegOrImmOperands(Inst, 1);
1920 continue;
1921 } else if (Op.isImmModifier()) {
1922 OptionalIdx[Op.getImmTy()] = I;
1923 } else {
1924 assert(false);
1925 }
1926 }
1927
1928 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
1929 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
1930 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1931 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
1932 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
1933 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1934 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
1935 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1936}
1937
Sam Koltondfa29f72016-03-09 12:29:31 +00001938//===----------------------------------------------------------------------===//
1939// dpp
1940//===----------------------------------------------------------------------===//
1941
1942bool AMDGPUOperand::isDPPCtrl() const {
1943 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
1944 if (result) {
1945 int64_t Imm = getImm();
1946 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
1947 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
1948 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
1949 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
1950 (Imm == 0x130) ||
1951 (Imm == 0x134) ||
1952 (Imm == 0x138) ||
1953 (Imm == 0x13c) ||
1954 (Imm == 0x140) ||
1955 (Imm == 0x141) ||
1956 (Imm == 0x142) ||
1957 (Imm == 0x143);
1958 }
1959 return false;
1960}
1961
Sam Koltona74cd522016-03-18 15:35:51 +00001962AMDGPUAsmParser::OperandMatchResultTy
Sam Koltondfa29f72016-03-09 12:29:31 +00001963AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands) {
1964 // ToDo: use same syntax as sp3 for dpp_ctrl
1965 SMLoc S = Parser.getTok().getLoc();
1966 StringRef Prefix;
1967 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00001968
Sam Koltona74cd522016-03-18 15:35:51 +00001969 if (getLexer().getKind() == AsmToken::Identifier) {
1970 Prefix = Parser.getTok().getString();
1971 } else {
1972 return MatchOperand_NoMatch;
1973 }
1974
1975 if (Prefix == "row_mirror") {
1976 Int = 0x140;
1977 } else if (Prefix == "row_half_mirror") {
1978 Int = 0x141;
1979 } else {
1980 Parser.Lex();
1981 if (getLexer().isNot(AsmToken::Colon))
1982 return MatchOperand_ParseFail;
1983
1984 if (Prefix == "quad_perm") {
1985 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00001986 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00001987 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00001988 return MatchOperand_ParseFail;
1989
1990 Parser.Lex();
1991 if (getLexer().isNot(AsmToken::Integer))
1992 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00001993 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00001994
Sam Koltona74cd522016-03-18 15:35:51 +00001995 Parser.Lex();
1996 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00001997 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00001998 Parser.Lex();
1999 if (getLexer().isNot(AsmToken::Integer))
2000 return MatchOperand_ParseFail;
2001 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002002
Sam Koltona74cd522016-03-18 15:35:51 +00002003 Parser.Lex();
2004 if (getLexer().isNot(AsmToken::Comma))
2005 return MatchOperand_ParseFail;
2006 Parser.Lex();
2007 if (getLexer().isNot(AsmToken::Integer))
2008 return MatchOperand_ParseFail;
2009 Int += (getLexer().getTok().getIntVal() << 4);
2010
2011 Parser.Lex();
2012 if (getLexer().isNot(AsmToken::Comma))
2013 return MatchOperand_ParseFail;
2014 Parser.Lex();
2015 if (getLexer().isNot(AsmToken::Integer))
2016 return MatchOperand_ParseFail;
2017 Int += (getLexer().getTok().getIntVal() << 6);
2018
2019 Parser.Lex();
2020 if (getLexer().isNot(AsmToken::RBrac))
2021 return MatchOperand_ParseFail;
2022
2023 } else {
2024 // sel:%d
2025 Parser.Lex();
2026 if (getLexer().isNot(AsmToken::Integer))
2027 return MatchOperand_ParseFail;
2028 Int = getLexer().getTok().getIntVal();
2029
2030 if (Prefix == "row_shl") {
2031 Int |= 0x100;
2032 } else if (Prefix == "row_shr") {
2033 Int |= 0x110;
2034 } else if (Prefix == "row_ror") {
2035 Int |= 0x120;
2036 } else if (Prefix == "wave_shl") {
2037 Int = 0x130;
2038 } else if (Prefix == "wave_rol") {
2039 Int = 0x134;
2040 } else if (Prefix == "wave_shr") {
2041 Int = 0x138;
2042 } else if (Prefix == "wave_ror") {
2043 Int = 0x13C;
2044 } else if (Prefix == "row_bcast") {
2045 if (Int == 15) {
2046 Int = 0x142;
2047 } else if (Int == 31) {
2048 Int = 0x143;
2049 }
2050 } else {
2051 return MatchOperand_NoMatch;
2052 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002053 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002054 }
Sam Koltona74cd522016-03-18 15:35:51 +00002055 Parser.Lex(); // eat last token
2056
2057 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002058 AMDGPUOperand::ImmTyDppCtrl));
2059 return MatchOperand_Success;
2060}
2061
2062static const OptionalOperand DPPOptionalOps [] = {
2063 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2064 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2065 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, nullptr}
2066};
2067
Sam Koltona74cd522016-03-18 15:35:51 +00002068AMDGPUAsmParser::OperandMatchResultTy
Sam Koltondfa29f72016-03-09 12:29:31 +00002069AMDGPUAsmParser::parseDPPOptionalOps(OperandVector &Operands) {
2070 SMLoc S = Parser.getTok().getLoc();
2071 OperandMatchResultTy Res = parseOptionalOps(DPPOptionalOps, Operands);
2072 // XXX - sp3 use syntax "bound_ctrl:0" to indicate that bound_ctrl bit was set
2073 if (Res == MatchOperand_Success) {
2074 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands.back());
2075 // If last operand was parsed as bound_ctrl we should replace it with correct value (1)
2076 if (Op.isImmTy(AMDGPUOperand::ImmTyDppBoundCtrl)) {
2077 Operands.pop_back();
2078 Operands.push_back(
2079 AMDGPUOperand::CreateImm(1, S, AMDGPUOperand::ImmTyDppBoundCtrl));
2080 return MatchOperand_Success;
2081 }
2082 }
2083 return Res;
2084}
2085
2086void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
2087 cvtDPP(Inst, Operands, true);
2088}
2089
2090void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
2091 cvtDPP(Inst, Operands, false);
2092}
2093
Sam Koltona74cd522016-03-18 15:35:51 +00002094void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
Sam Koltondfa29f72016-03-09 12:29:31 +00002095 bool HasMods) {
2096 OptionalImmIndexMap OptionalIdx;
2097
2098 unsigned I = 1;
2099 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2100 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2101 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2102 }
2103
2104 for (unsigned E = Operands.size(); I != E; ++I) {
2105 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2106 // Add the register arguments
2107 if (!HasMods && Op.isReg()) {
2108 Op.addRegOperands(Inst, 1);
2109 } else if (HasMods && Op.isRegOrImmWithInputMods()) {
2110 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2111 } else if (Op.isDPPCtrl()) {
2112 Op.addImmOperands(Inst, 1);
2113 } else if (Op.isImm()) {
2114 // Handle optional arguments
2115 OptionalIdx[Op.getImmTy()] = I;
2116 } else {
2117 llvm_unreachable("Invalid operand type");
2118 }
2119 }
2120
2121 // ToDo: fix default values for row_mask and bank_mask
2122 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2123 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2124 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2125}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002126
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002127
Tom Stellard45bb48e2015-06-13 03:28:10 +00002128/// Force static initialization.
2129extern "C" void LLVMInitializeAMDGPUAsmParser() {
2130 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2131 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2132}
2133
2134#define GET_REGISTER_MATCHER
2135#define GET_MATCHER_IMPLEMENTATION
2136#include "AMDGPUGenAsmMatcher.inc"