blob: c099c9b3a36b87fb57626a694d7068bee83a307f [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000016#include "llvm/ADT/APFloat.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "llvm/ADT/STLExtras.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000018#include "llvm/ADT/SmallString.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/StringSwitch.h"
20#include "llvm/ADT/Twine.h"
21#include "llvm/MC/MCContext.h"
22#include "llvm/MC/MCExpr.h"
23#include "llvm/MC/MCInst.h"
24#include "llvm/MC/MCInstrInfo.h"
25#include "llvm/MC/MCParser/MCAsmLexer.h"
26#include "llvm/MC/MCParser/MCAsmParser.h"
27#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000028#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000029#include "llvm/MC/MCRegisterInfo.h"
30#include "llvm/MC/MCStreamer.h"
31#include "llvm/MC/MCSubtargetInfo.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000032#include "llvm/MC/MCSymbolELF.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000033#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000034#include "llvm/Support/ELF.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000035#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038
39using namespace llvm;
40
41namespace {
42
43struct OptionalOperand;
44
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000045enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
46
Tom Stellard45bb48e2015-06-13 03:28:10 +000047class AMDGPUOperand : public MCParsedAsmOperand {
48 enum KindTy {
49 Token,
50 Immediate,
51 Register,
52 Expression
53 } Kind;
54
55 SMLoc StartLoc, EndLoc;
56
57public:
58 AMDGPUOperand(enum KindTy K) : MCParsedAsmOperand(), Kind(K) {}
59
60 MCContext *Ctx;
61
62 enum ImmTy {
63 ImmTyNone,
64 ImmTyDSOffset0,
65 ImmTyDSOffset1,
66 ImmTyGDS,
67 ImmTyOffset,
68 ImmTyGLC,
69 ImmTySLC,
70 ImmTyTFE,
71 ImmTyClamp,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000072 ImmTyOMod,
Sam Koltondfa29f72016-03-09 12:29:31 +000073 ImmTyDppCtrl,
74 ImmTyDppRowMask,
75 ImmTyDppBankMask,
76 ImmTyDppBoundCtrl,
Nikolay Haustov2f684f12016-02-26 09:51:05 +000077 ImmTyDMask,
78 ImmTyUNorm,
79 ImmTyDA,
80 ImmTyR128,
81 ImmTyLWE,
Artem Tamazovd6468662016-04-25 14:13:51 +000082 ImmTyHwreg,
Tom Stellard45bb48e2015-06-13 03:28:10 +000083 };
84
85 struct TokOp {
86 const char *Data;
87 unsigned Length;
88 };
89
90 struct ImmOp {
91 bool IsFPImm;
92 ImmTy Type;
93 int64_t Val;
Tom Stellardd93a34f2016-02-22 19:17:56 +000094 int Modifiers;
Tom Stellard45bb48e2015-06-13 03:28:10 +000095 };
96
97 struct RegOp {
98 unsigned RegNo;
99 int Modifiers;
100 const MCRegisterInfo *TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000101 const MCSubtargetInfo *STI;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000102 bool IsForcedVOP3;
103 };
104
105 union {
106 TokOp Tok;
107 ImmOp Imm;
108 RegOp Reg;
109 const MCExpr *Expr;
110 };
111
112 void addImmOperands(MCInst &Inst, unsigned N) const {
113 Inst.addOperand(MCOperand::createImm(getImm()));
114 }
115
116 StringRef getToken() const {
117 return StringRef(Tok.Data, Tok.Length);
118 }
119
120 void addRegOperands(MCInst &Inst, unsigned N) const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000121 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), *Reg.STI)));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000122 }
123
124 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000125 if (isRegKind())
Tom Stellard45bb48e2015-06-13 03:28:10 +0000126 addRegOperands(Inst, N);
127 else
128 addImmOperands(Inst, N);
129 }
130
Tom Stellardd93a34f2016-02-22 19:17:56 +0000131 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
132 if (isRegKind()) {
133 Inst.addOperand(MCOperand::createImm(Reg.Modifiers));
134 addRegOperands(Inst, N);
135 } else {
136 Inst.addOperand(MCOperand::createImm(Imm.Modifiers));
137 addImmOperands(Inst, N);
138 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000139 }
140
141 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
142 if (isImm())
143 addImmOperands(Inst, N);
144 else {
145 assert(isExpr());
146 Inst.addOperand(MCOperand::createExpr(Expr));
147 }
148 }
149
150 bool defaultTokenHasSuffix() const {
151 StringRef Token(Tok.Data, Tok.Length);
152
Sam Koltona74cd522016-03-18 15:35:51 +0000153 return Token.endswith("_e32") || Token.endswith("_e64") ||
Sam Koltondfa29f72016-03-09 12:29:31 +0000154 Token.endswith("_dpp");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000155 }
156
157 bool isToken() const override {
158 return Kind == Token;
159 }
160
161 bool isImm() const override {
162 return Kind == Immediate;
163 }
164
Tom Stellardd93a34f2016-02-22 19:17:56 +0000165 bool isInlinableImm() const {
166 if (!isImm() || Imm.Type != AMDGPUOperand::ImmTyNone /* Only plain
167 immediates are inlinable (e.g. "clamp" attribute is not) */ )
168 return false;
169 // TODO: We should avoid using host float here. It would be better to
Sam Koltona74cd522016-03-18 15:35:51 +0000170 // check the float bit values which is what a few other places do.
Tom Stellardd93a34f2016-02-22 19:17:56 +0000171 // We've had bot failures before due to weird NaN support on mips hosts.
172 const float F = BitsToFloat(Imm.Val);
173 // TODO: Add 1/(2*pi) for VI
174 return (Imm.Val <= 64 && Imm.Val >= -16) ||
Tom Stellard45bb48e2015-06-13 03:28:10 +0000175 (F == 0.0 || F == 0.5 || F == -0.5 || F == 1.0 || F == -1.0 ||
Tom Stellardd93a34f2016-02-22 19:17:56 +0000176 F == 2.0 || F == -2.0 || F == 4.0 || F == -4.0);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000177 }
178
179 bool isDSOffset0() const {
180 assert(isImm());
181 return Imm.Type == ImmTyDSOffset0;
182 }
183
184 bool isDSOffset1() const {
185 assert(isImm());
186 return Imm.Type == ImmTyDSOffset1;
187 }
188
189 int64_t getImm() const {
190 return Imm.Val;
191 }
192
193 enum ImmTy getImmTy() const {
194 assert(isImm());
195 return Imm.Type;
196 }
197
198 bool isRegKind() const {
199 return Kind == Register;
200 }
201
202 bool isReg() const override {
Tom Stellarda90b9522016-02-11 03:28:15 +0000203 return Kind == Register && Reg.Modifiers == 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204 }
205
Tom Stellardd93a34f2016-02-22 19:17:56 +0000206 bool isRegOrImmWithInputMods() const {
207 return Kind == Register || isInlinableImm();
Tom Stellarda90b9522016-02-11 03:28:15 +0000208 }
209
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000210 bool isImmTy(ImmTy ImmT) const {
211 return isImm() && Imm.Type == ImmT;
212 }
213
Tom Stellarda90b9522016-02-11 03:28:15 +0000214 bool isClamp() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000215 return isImmTy(ImmTyClamp);
Tom Stellarda90b9522016-02-11 03:28:15 +0000216 }
217
218 bool isOMod() const {
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000219 return isImmTy(ImmTyOMod);
Tom Stellarda90b9522016-02-11 03:28:15 +0000220 }
221
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000222 bool isImmModifier() const {
223 return Kind == Immediate && Imm.Type != ImmTyNone;
224 }
225
226 bool isDMask() const {
227 return isImmTy(ImmTyDMask);
228 }
229
230 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
231 bool isDA() const { return isImmTy(ImmTyDA); }
232 bool isR128() const { return isImmTy(ImmTyUNorm); }
233 bool isLWE() const { return isImmTy(ImmTyLWE); }
234
Tom Stellarda90b9522016-02-11 03:28:15 +0000235 bool isMod() const {
236 return isClamp() || isOMod();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000237 }
238
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000239 bool isGDS() const { return isImmTy(ImmTyGDS); }
240 bool isGLC() const { return isImmTy(ImmTyGLC); }
241 bool isSLC() const { return isImmTy(ImmTySLC); }
242 bool isTFE() const { return isImmTy(ImmTyTFE); }
243
Sam Koltondfa29f72016-03-09 12:29:31 +0000244 bool isBankMask() const {
245 return isImmTy(ImmTyDppBankMask);
246 }
247
248 bool isRowMask() const {
249 return isImmTy(ImmTyDppRowMask);
250 }
251
252 bool isBoundCtrl() const {
253 return isImmTy(ImmTyDppBoundCtrl);
254 }
Sam Koltona74cd522016-03-18 15:35:51 +0000255
Tom Stellard45bb48e2015-06-13 03:28:10 +0000256 void setModifiers(unsigned Mods) {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000257 assert(isReg() || (isImm() && Imm.Modifiers == 0));
258 if (isReg())
259 Reg.Modifiers = Mods;
260 else
261 Imm.Modifiers = Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000262 }
263
264 bool hasModifiers() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000265 assert(isRegKind() || isImm());
266 return isRegKind() ? Reg.Modifiers != 0 : Imm.Modifiers != 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000267 }
268
269 unsigned getReg() const override {
270 return Reg.RegNo;
271 }
272
273 bool isRegOrImm() const {
274 return isReg() || isImm();
275 }
276
277 bool isRegClass(unsigned RCID) const {
Tom Stellarda90b9522016-02-11 03:28:15 +0000278 return isReg() && Reg.TRI->getRegClass(RCID).contains(getReg());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000279 }
280
281 bool isSCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000282 return isInlinableImm() || isRegClass(AMDGPU::SReg_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000283 }
284
Matt Arsenault86d336e2015-09-08 21:15:00 +0000285 bool isSCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000286 return isInlinableImm() || isRegClass(AMDGPU::SReg_64RegClassID);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000287 }
288
289 bool isSSrc32() const {
290 return isImm() || isSCSrc32();
291 }
292
293 bool isSSrc64() const {
294 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
295 // See isVSrc64().
296 return isImm() || isSCSrc64();
Matt Arsenault86d336e2015-09-08 21:15:00 +0000297 }
298
Tom Stellard45bb48e2015-06-13 03:28:10 +0000299 bool isVCSrc32() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000300 return isInlinableImm() || isRegClass(AMDGPU::VS_32RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000301 }
302
303 bool isVCSrc64() const {
Valery Pykhtinf91911c2016-03-14 05:01:45 +0000304 return isInlinableImm() || isRegClass(AMDGPU::VS_64RegClassID);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000305 }
306
307 bool isVSrc32() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000308 return isImm() || isVCSrc32();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000309 }
310
311 bool isVSrc64() const {
Sam Koltona74cd522016-03-18 15:35:51 +0000312 // TODO: Check if the 64-bit value (coming from assembly source) can be
Tom Stellardd93a34f2016-02-22 19:17:56 +0000313 // narrowed to 32 bits (in the instruction stream). That require knowledge
314 // of instruction type (unsigned/signed, floating or "untyped"/B64),
315 // see [AMD GCN3 ISA 6.3.1].
316 // TODO: How 64-bit values are formed from 32-bit literals in _B64 insns?
317 return isImm() || isVCSrc64();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000318 }
319
320 bool isMem() const override {
321 return false;
322 }
323
324 bool isExpr() const {
325 return Kind == Expression;
326 }
327
328 bool isSoppBrTarget() const {
329 return isExpr() || isImm();
330 }
331
332 SMLoc getStartLoc() const override {
333 return StartLoc;
334 }
335
336 SMLoc getEndLoc() const override {
337 return EndLoc;
338 }
339
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000340 void print(raw_ostream &OS) const override {
341 switch (Kind) {
342 case Register:
Matt Arsenault2ea0a232015-10-24 00:12:56 +0000343 OS << "<register " << getReg() << " mods: " << Reg.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000344 break;
345 case Immediate:
Tom Stellardd93a34f2016-02-22 19:17:56 +0000346 if (Imm.Type != AMDGPUOperand::ImmTyNone)
347 OS << getImm();
Sam Koltona74cd522016-03-18 15:35:51 +0000348 else
Tom Stellardd93a34f2016-02-22 19:17:56 +0000349 OS << '<' << getImm() << " mods: " << Imm.Modifiers << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000350 break;
351 case Token:
352 OS << '\'' << getToken() << '\'';
353 break;
354 case Expression:
355 OS << "<expr " << *Expr << '>';
356 break;
357 }
358 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000359
360 static std::unique_ptr<AMDGPUOperand> CreateImm(int64_t Val, SMLoc Loc,
361 enum ImmTy Type = ImmTyNone,
362 bool IsFPImm = false) {
363 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate);
364 Op->Imm.Val = Val;
365 Op->Imm.IsFPImm = IsFPImm;
366 Op->Imm.Type = Type;
Tom Stellardd93a34f2016-02-22 19:17:56 +0000367 Op->Imm.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000368 Op->StartLoc = Loc;
369 Op->EndLoc = Loc;
370 return Op;
371 }
372
373 static std::unique_ptr<AMDGPUOperand> CreateToken(StringRef Str, SMLoc Loc,
374 bool HasExplicitEncodingSize = true) {
375 auto Res = llvm::make_unique<AMDGPUOperand>(Token);
376 Res->Tok.Data = Str.data();
377 Res->Tok.Length = Str.size();
378 Res->StartLoc = Loc;
379 Res->EndLoc = Loc;
380 return Res;
381 }
382
383 static std::unique_ptr<AMDGPUOperand> CreateReg(unsigned RegNo, SMLoc S,
384 SMLoc E,
385 const MCRegisterInfo *TRI,
Tom Stellard2b65ed32015-12-21 18:44:27 +0000386 const MCSubtargetInfo *STI,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000387 bool ForceVOP3) {
388 auto Op = llvm::make_unique<AMDGPUOperand>(Register);
389 Op->Reg.RegNo = RegNo;
390 Op->Reg.TRI = TRI;
Tom Stellard2b65ed32015-12-21 18:44:27 +0000391 Op->Reg.STI = STI;
Tom Stellarda90b9522016-02-11 03:28:15 +0000392 Op->Reg.Modifiers = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000393 Op->Reg.IsForcedVOP3 = ForceVOP3;
394 Op->StartLoc = S;
395 Op->EndLoc = E;
396 return Op;
397 }
398
399 static std::unique_ptr<AMDGPUOperand> CreateExpr(const class MCExpr *Expr, SMLoc S) {
400 auto Op = llvm::make_unique<AMDGPUOperand>(Expression);
401 Op->Expr = Expr;
402 Op->StartLoc = S;
403 Op->EndLoc = S;
404 return Op;
405 }
406
407 bool isDSOffset() const;
408 bool isDSOffset01() const;
409 bool isSWaitCnt() const;
Artem Tamazovd6468662016-04-25 14:13:51 +0000410 bool isHwreg() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000411 bool isMubufOffset() const;
Tom Stellard217361c2015-08-06 19:28:38 +0000412 bool isSMRDOffset() const;
413 bool isSMRDLiteralOffset() const;
Sam Koltondfa29f72016-03-09 12:29:31 +0000414 bool isDPPCtrl() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000415};
416
417class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000418 const MCInstrInfo &MII;
419 MCAsmParser &Parser;
420
421 unsigned ForcedEncodingSize;
Matt Arsenault68802d32015-11-05 03:11:27 +0000422
Matt Arsenault3b159672015-12-01 20:31:08 +0000423 bool isSI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000424 return AMDGPU::isSI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000425 }
426
427 bool isCI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000428 return AMDGPU::isCI(getSTI());
Matt Arsenault3b159672015-12-01 20:31:08 +0000429 }
430
Matt Arsenault68802d32015-11-05 03:11:27 +0000431 bool isVI() const {
Tom Stellard2b65ed32015-12-21 18:44:27 +0000432 return AMDGPU::isVI(getSTI());
Matt Arsenault68802d32015-11-05 03:11:27 +0000433 }
434
435 bool hasSGPR102_SGPR103() const {
436 return !isVI();
437 }
438
Tom Stellard45bb48e2015-06-13 03:28:10 +0000439 /// @name Auto-generated Match Functions
440 /// {
441
442#define GET_ASSEMBLER_HEADER
443#include "AMDGPUGenAsmMatcher.inc"
444
445 /// }
446
Tom Stellard347ac792015-06-26 21:15:07 +0000447private:
448 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
449 bool ParseDirectiveHSACodeObjectVersion();
450 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000451 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
452 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000453 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000454 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000455 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000456 bool ParseDirectiveAMDGPUHsaModuleGlobal();
457 bool ParseDirectiveAMDGPUHsaProgramGlobal();
458 bool ParseSectionDirectiveHSADataGlobalAgent();
459 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000460 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000461 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
462 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Tom Stellard347ac792015-06-26 21:15:07 +0000463
Tom Stellard45bb48e2015-06-13 03:28:10 +0000464public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000465 enum AMDGPUMatchResultTy {
466 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
467 };
468
Akira Hatanakab11ef082015-11-14 06:35:56 +0000469 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000470 const MCInstrInfo &MII,
471 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000472 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Matt Arsenault68802d32015-11-05 03:11:27 +0000473 ForcedEncodingSize(0) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000474 MCAsmParserExtension::Initialize(Parser);
475
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000476 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000477 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000478 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000479 }
480
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000481 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Tom Stellard45bb48e2015-06-13 03:28:10 +0000482 }
483
Tom Stellard347ac792015-06-26 21:15:07 +0000484 AMDGPUTargetStreamer &getTargetStreamer() {
485 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
486 return static_cast<AMDGPUTargetStreamer &>(TS);
487 }
488
Tom Stellard45bb48e2015-06-13 03:28:10 +0000489 unsigned getForcedEncodingSize() const {
490 return ForcedEncodingSize;
491 }
492
493 void setForcedEncodingSize(unsigned Size) {
494 ForcedEncodingSize = Size;
495 }
496
497 bool isForcedVOP3() const {
498 return ForcedEncodingSize == 64;
499 }
500
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000501 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000502 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
503 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
504 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
505 OperandVector &Operands, MCStreamer &Out,
506 uint64_t &ErrorInfo,
507 bool MatchingInlineAsm) override;
508 bool ParseDirective(AsmToken DirectiveID) override;
509 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
510 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
511 SMLoc NameLoc, OperandVector &Operands) override;
512
513 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int,
514 int64_t Default = 0);
515 OperandMatchResultTy parseIntWithPrefix(const char *Prefix,
516 OperandVector &Operands,
517 enum AMDGPUOperand::ImmTy ImmTy =
518 AMDGPUOperand::ImmTyNone);
519 OperandMatchResultTy parseNamedBit(const char *Name, OperandVector &Operands,
520 enum AMDGPUOperand::ImmTy ImmTy =
521 AMDGPUOperand::ImmTyNone);
522 OperandMatchResultTy parseOptionalOps(
523 const ArrayRef<OptionalOperand> &OptionalOps,
524 OperandVector &Operands);
525
526
527 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
528 void cvtDS(MCInst &Inst, const OperandVector &Operands);
529 OperandMatchResultTy parseDSOptionalOps(OperandVector &Operands);
530 OperandMatchResultTy parseDSOff01OptionalOps(OperandVector &Operands);
531 OperandMatchResultTy parseDSOffsetOptional(OperandVector &Operands);
532
533 bool parseCnt(int64_t &IntVal);
534 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Artem Tamazovd6468662016-04-25 14:13:51 +0000535 bool parseHwreg(int64_t &HwRegCode, int64_t &Offset, int64_t &Width);
536 OperandMatchResultTy parseHwregOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000537 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
538
539 OperandMatchResultTy parseFlatOptionalOps(OperandVector &Operands);
540 OperandMatchResultTy parseFlatAtomicOptionalOps(OperandVector &Operands);
541 void cvtFlat(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +0000542 void cvtFlatAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000543
544 void cvtMubuf(MCInst &Inst, const OperandVector &Operands);
545 OperandMatchResultTy parseOffset(OperandVector &Operands);
546 OperandMatchResultTy parseMubufOptionalOps(OperandVector &Operands);
547 OperandMatchResultTy parseGLC(OperandVector &Operands);
548 OperandMatchResultTy parseSLC(OperandVector &Operands);
549 OperandMatchResultTy parseTFE(OperandVector &Operands);
550
551 OperandMatchResultTy parseDMask(OperandVector &Operands);
552 OperandMatchResultTy parseUNorm(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000553 OperandMatchResultTy parseDA(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000554 OperandMatchResultTy parseR128(OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000555 OperandMatchResultTy parseLWE(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000556
Tom Stellarda90b9522016-02-11 03:28:15 +0000557 void cvtId(MCInst &Inst, const OperandVector &Operands);
558 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
559 void cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands);
560 void cvtVOP3_only(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000561 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000562
563 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000564 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000565 OperandMatchResultTy parseVOP3OptionalOps(OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000566
567 OperandMatchResultTy parseDPPCtrlOps(OperandVector &Operands);
568 OperandMatchResultTy parseDPPOptionalOps(OperandVector &Operands);
569 void cvtDPP_mod(MCInst &Inst, const OperandVector &Operands);
570 void cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands);
571 void cvtDPP(MCInst &Inst, const OperandVector &Operands, bool HasMods);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000572};
573
574struct OptionalOperand {
575 const char *Name;
576 AMDGPUOperand::ImmTy Type;
577 bool IsBit;
578 int64_t Default;
579 bool (*ConvertResult)(int64_t&);
580};
581
Alexander Kornienkof00654e2015-06-23 09:49:53 +0000582}
Tom Stellard45bb48e2015-06-13 03:28:10 +0000583
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000584static int getRegClass(RegisterKind Is, unsigned RegWidth) {
585 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000586 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +0000587 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000588 case 1: return AMDGPU::VGPR_32RegClassID;
589 case 2: return AMDGPU::VReg_64RegClassID;
590 case 3: return AMDGPU::VReg_96RegClassID;
591 case 4: return AMDGPU::VReg_128RegClassID;
592 case 8: return AMDGPU::VReg_256RegClassID;
593 case 16: return AMDGPU::VReg_512RegClassID;
594 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000595 } else if (Is == IS_TTMP) {
596 switch (RegWidth) {
597 default: return -1;
598 case 1: return AMDGPU::TTMP_32RegClassID;
599 case 2: return AMDGPU::TTMP_64RegClassID;
600 }
601 } else if (Is == IS_SGPR) {
602 switch (RegWidth) {
603 default: return -1;
604 case 1: return AMDGPU::SGPR_32RegClassID;
605 case 2: return AMDGPU::SGPR_64RegClassID;
606 case 4: return AMDGPU::SReg_128RegClassID;
607 case 8: return AMDGPU::SReg_256RegClassID;
608 case 16: return AMDGPU::SReg_512RegClassID;
609 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000610 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000611 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000612}
613
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000614static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000615 return StringSwitch<unsigned>(RegName)
616 .Case("exec", AMDGPU::EXEC)
617 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000618 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000619 .Case("m0", AMDGPU::M0)
620 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000621 .Case("tba", AMDGPU::TBA)
622 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +0000623 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
624 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000625 .Case("vcc_lo", AMDGPU::VCC_LO)
626 .Case("vcc_hi", AMDGPU::VCC_HI)
627 .Case("exec_lo", AMDGPU::EXEC_LO)
628 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +0000629 .Case("tma_lo", AMDGPU::TMA_LO)
630 .Case("tma_hi", AMDGPU::TMA_HI)
631 .Case("tba_lo", AMDGPU::TBA_LO)
632 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +0000633 .Default(0);
634}
635
636bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000637 auto R = parseRegister();
638 if (!R) return true;
639 assert(R->isReg());
640 RegNo = R->getReg();
641 StartLoc = R->getStartLoc();
642 EndLoc = R->getEndLoc();
643 return false;
644}
645
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000646bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
647{
648 switch (RegKind) {
649 case IS_SPECIAL:
650 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
651 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
652 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
653 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
654 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
655 return false;
656 case IS_VGPR:
657 case IS_SGPR:
658 case IS_TTMP:
659 if (Reg1 != Reg + RegWidth) { return false; }
660 RegWidth++;
661 return true;
662 default:
663 assert(false); return false;
664 }
665}
666
667bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
668{
669 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
670 if (getLexer().is(AsmToken::Identifier)) {
671 StringRef RegName = Parser.getTok().getString();
672 if ((Reg = getSpecialRegForName(RegName))) {
673 Parser.Lex();
674 RegKind = IS_SPECIAL;
675 } else {
676 unsigned RegNumIndex = 0;
677 if (RegName[0] == 'v') { RegNumIndex = 1; RegKind = IS_VGPR; }
678 else if (RegName[0] == 's') { RegNumIndex = 1; RegKind = IS_SGPR; }
679 else if (RegName.startswith("ttmp")) { RegNumIndex = strlen("ttmp"); RegKind = IS_TTMP; }
680 else { return false; }
681 if (RegName.size() > RegNumIndex) {
682 // Single 32-bit register: vXX.
683 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum)) { return false; }
684 Parser.Lex();
685 RegWidth = 1;
686 } else {
687 // Range of registers: v[XX:YY].
688 Parser.Lex();
689 int64_t RegLo, RegHi;
690 if (getLexer().isNot(AsmToken::LBrac)) { return false; }
691 Parser.Lex();
692
693 if (getParser().parseAbsoluteExpression(RegLo)) { return false; }
694
695 if (getLexer().isNot(AsmToken::Colon)) { return false; }
696 Parser.Lex();
697
698 if (getParser().parseAbsoluteExpression(RegHi)) { return false; }
699
700 if (getLexer().isNot(AsmToken::RBrac)) { return false; }
701 Parser.Lex();
702
703 RegNum = (unsigned) RegLo;
704 RegWidth = (RegHi - RegLo) + 1;
705 }
706 }
707 } else if (getLexer().is(AsmToken::LBrac)) {
708 // List of consecutive registers: [s0,s1,s2,s3]
709 Parser.Lex();
710 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) { return false; }
711 if (RegWidth != 1) { return false; }
712 RegisterKind RegKind1;
713 unsigned Reg1, RegNum1, RegWidth1;
714 do {
715 if (getLexer().is(AsmToken::Comma)) {
716 Parser.Lex();
717 } else if (getLexer().is(AsmToken::RBrac)) {
718 Parser.Lex();
719 break;
720 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
721 if (RegWidth1 != 1) { return false; }
722 if (RegKind1 != RegKind) { return false; }
723 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) { return false; }
724 } else {
725 return false;
726 }
727 } while (true);
728 } else {
729 return false;
730 }
731 switch (RegKind) {
732 case IS_SPECIAL:
733 RegNum = 0;
734 RegWidth = 1;
735 break;
736 case IS_VGPR:
737 case IS_SGPR:
738 case IS_TTMP:
739 {
740 unsigned Size = 1;
741 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
742 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
743 Size = std::min(RegWidth, 4u);
744 }
745 if (RegNum % Size != 0) { return false; }
746 RegNum = RegNum / Size;
747 int RCID = getRegClass(RegKind, RegWidth);
748 if (RCID == -1) { return false; }
749 const MCRegisterClass RC = TRI->getRegClass(RCID);
750 if (RegNum >= RC.getNumRegs()) { return false; }
751 Reg = RC.getRegister(RegNum);
752 break;
753 }
754
755 default:
756 assert(false); return false;
757 }
758
759 if (!subtargetHasRegister(*TRI, Reg)) { return false; }
760 return true;
761}
762
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000763std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000764 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000765 SMLoc StartLoc = Tok.getLoc();
766 SMLoc EndLoc = Tok.getEndLoc();
Matt Arsenault3b159672015-12-01 20:31:08 +0000767 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
768
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000769 RegisterKind RegKind;
770 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000771
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000772 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
773 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000774 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000775 return AMDGPUOperand::CreateReg(Reg, StartLoc, EndLoc,
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000776 TRI, &getSTI(), false);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000777}
778
779unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
780
781 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
782
783 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
784 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)))
785 return Match_InvalidOperand;
786
Tom Stellard88e0b252015-10-06 15:57:53 +0000787 if ((TSFlags & SIInstrFlags::VOP3) &&
788 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
789 getForcedEncodingSize() != 64)
790 return Match_PreferE32;
791
Tom Stellard45bb48e2015-06-13 03:28:10 +0000792 return Match_Success;
793}
794
795
796bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
797 OperandVector &Operands,
798 MCStreamer &Out,
799 uint64_t &ErrorInfo,
800 bool MatchingInlineAsm) {
801 MCInst Inst;
802
Ranjeet Singh86ecbb72015-06-30 12:32:53 +0000803 switch (MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000804 default: break;
805 case Match_Success:
806 Inst.setLoc(IDLoc);
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000807 Out.EmitInstruction(Inst, getSTI());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000808 return false;
809 case Match_MissingFeature:
810 return Error(IDLoc, "instruction not supported on this GPU");
811
812 case Match_MnemonicFail:
813 return Error(IDLoc, "unrecognized instruction mnemonic");
814
815 case Match_InvalidOperand: {
816 SMLoc ErrorLoc = IDLoc;
817 if (ErrorInfo != ~0ULL) {
818 if (ErrorInfo >= Operands.size()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000819 return Error(IDLoc, "too few operands for instruction");
820 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000821 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
822 if (ErrorLoc == SMLoc())
823 ErrorLoc = IDLoc;
824 }
825 return Error(ErrorLoc, "invalid operand for instruction");
826 }
Tom Stellard88e0b252015-10-06 15:57:53 +0000827 case Match_PreferE32:
828 return Error(IDLoc, "internal error: instruction without _e64 suffix "
829 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000830 }
831 llvm_unreachable("Implement any new match types added!");
832}
833
Tom Stellard347ac792015-06-26 21:15:07 +0000834bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
835 uint32_t &Minor) {
836 if (getLexer().isNot(AsmToken::Integer))
837 return TokError("invalid major version");
838
839 Major = getLexer().getTok().getIntVal();
840 Lex();
841
842 if (getLexer().isNot(AsmToken::Comma))
843 return TokError("minor version number required, comma expected");
844 Lex();
845
846 if (getLexer().isNot(AsmToken::Integer))
847 return TokError("invalid minor version");
848
849 Minor = getLexer().getTok().getIntVal();
850 Lex();
851
852 return false;
853}
854
855bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
856
857 uint32_t Major;
858 uint32_t Minor;
859
860 if (ParseDirectiveMajorMinor(Major, Minor))
861 return true;
862
863 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
864 return false;
865}
866
867bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
868
869 uint32_t Major;
870 uint32_t Minor;
871 uint32_t Stepping;
872 StringRef VendorName;
873 StringRef ArchName;
874
875 // If this directive has no arguments, then use the ISA version for the
876 // targeted GPU.
877 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000878 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +0000879 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
880 Isa.Stepping,
881 "AMD", "AMDGPU");
882 return false;
883 }
884
885
886 if (ParseDirectiveMajorMinor(Major, Minor))
887 return true;
888
889 if (getLexer().isNot(AsmToken::Comma))
890 return TokError("stepping version number required, comma expected");
891 Lex();
892
893 if (getLexer().isNot(AsmToken::Integer))
894 return TokError("invalid stepping version");
895
896 Stepping = getLexer().getTok().getIntVal();
897 Lex();
898
899 if (getLexer().isNot(AsmToken::Comma))
900 return TokError("vendor name required, comma expected");
901 Lex();
902
903 if (getLexer().isNot(AsmToken::String))
904 return TokError("invalid vendor name");
905
906 VendorName = getLexer().getTok().getStringContents();
907 Lex();
908
909 if (getLexer().isNot(AsmToken::Comma))
910 return TokError("arch name required, comma expected");
911 Lex();
912
913 if (getLexer().isNot(AsmToken::String))
914 return TokError("invalid arch name");
915
916 ArchName = getLexer().getTok().getStringContents();
917 Lex();
918
919 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
920 VendorName, ArchName);
921 return false;
922}
923
Tom Stellardff7416b2015-06-26 21:58:31 +0000924bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
925 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +0000926 SmallString<40> ErrStr;
927 raw_svector_ostream Err(ErrStr);
928 if (!parseAmdKernelCodeField(ID, getLexer(), Header, Err)) {
929 return TokError(Err.str());
930 }
Tom Stellardff7416b2015-06-26 21:58:31 +0000931 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +0000932 return false;
933}
934
935bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
936
937 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000938 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +0000939
940 while (true) {
941
942 if (getLexer().isNot(AsmToken::EndOfStatement))
943 return TokError("amd_kernel_code_t values must begin on a new line");
944
945 // Lex EndOfStatement. This is in a while loop, because lexing a comment
946 // will set the current token to EndOfStatement.
947 while(getLexer().is(AsmToken::EndOfStatement))
948 Lex();
949
950 if (getLexer().isNot(AsmToken::Identifier))
951 return TokError("expected value identifier or .end_amd_kernel_code_t");
952
953 StringRef ID = getLexer().getTok().getIdentifier();
954 Lex();
955
956 if (ID == ".end_amd_kernel_code_t")
957 break;
958
959 if (ParseAMDKernelCodeTValue(ID, Header))
960 return true;
961 }
962
963 getTargetStreamer().EmitAMDKernelCodeT(Header);
964
965 return false;
966}
967
Tom Stellarde135ffd2015-09-25 21:41:28 +0000968bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
969 getParser().getStreamer().SwitchSection(
970 AMDGPU::getHSATextSection(getContext()));
971 return false;
972}
973
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000974bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
975 if (getLexer().isNot(AsmToken::Identifier))
976 return TokError("expected symbol name");
977
978 StringRef KernelName = Parser.getTok().getString();
979
980 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
981 ELF::STT_AMDGPU_HSA_KERNEL);
982 Lex();
983 return false;
984}
985
Tom Stellard00f2f912015-12-02 19:47:57 +0000986bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
987 if (getLexer().isNot(AsmToken::Identifier))
988 return TokError("expected symbol name");
989
990 StringRef GlobalName = Parser.getTok().getIdentifier();
991
992 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
993 Lex();
994 return false;
995}
996
997bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
998 if (getLexer().isNot(AsmToken::Identifier))
999 return TokError("expected symbol name");
1000
1001 StringRef GlobalName = Parser.getTok().getIdentifier();
1002
1003 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1004 Lex();
1005 return false;
1006}
1007
1008bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1009 getParser().getStreamer().SwitchSection(
1010 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1011 return false;
1012}
1013
1014bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1015 getParser().getStreamer().SwitchSection(
1016 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1017 return false;
1018}
1019
Tom Stellard9760f032015-12-03 03:34:32 +00001020bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1021 getParser().getStreamer().SwitchSection(
1022 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1023 return false;
1024}
1025
Tom Stellard45bb48e2015-06-13 03:28:10 +00001026bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001027 StringRef IDVal = DirectiveID.getString();
1028
1029 if (IDVal == ".hsa_code_object_version")
1030 return ParseDirectiveHSACodeObjectVersion();
1031
1032 if (IDVal == ".hsa_code_object_isa")
1033 return ParseDirectiveHSACodeObjectISA();
1034
Tom Stellardff7416b2015-06-26 21:58:31 +00001035 if (IDVal == ".amd_kernel_code_t")
1036 return ParseDirectiveAMDKernelCodeT();
1037
Tom Stellarde135ffd2015-09-25 21:41:28 +00001038 if (IDVal == ".hsatext" || IDVal == ".text")
1039 return ParseSectionDirectiveHSAText();
1040
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001041 if (IDVal == ".amdgpu_hsa_kernel")
1042 return ParseDirectiveAMDGPUHsaKernel();
1043
Tom Stellard00f2f912015-12-02 19:47:57 +00001044 if (IDVal == ".amdgpu_hsa_module_global")
1045 return ParseDirectiveAMDGPUHsaModuleGlobal();
1046
1047 if (IDVal == ".amdgpu_hsa_program_global")
1048 return ParseDirectiveAMDGPUHsaProgramGlobal();
1049
1050 if (IDVal == ".hsadata_global_agent")
1051 return ParseSectionDirectiveHSADataGlobalAgent();
1052
1053 if (IDVal == ".hsadata_global_program")
1054 return ParseSectionDirectiveHSADataGlobalProgram();
1055
Tom Stellard9760f032015-12-03 03:34:32 +00001056 if (IDVal == ".hsarodata_readonly_agent")
1057 return ParseSectionDirectiveHSARodataReadonlyAgent();
1058
Tom Stellard45bb48e2015-06-13 03:28:10 +00001059 return true;
1060}
1061
Matt Arsenault68802d32015-11-05 03:11:27 +00001062bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1063 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001064 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001065 return true;
1066
Matt Arsenault3b159672015-12-01 20:31:08 +00001067 if (isSI()) {
1068 // No flat_scr
1069 switch (RegNo) {
1070 case AMDGPU::FLAT_SCR:
1071 case AMDGPU::FLAT_SCR_LO:
1072 case AMDGPU::FLAT_SCR_HI:
1073 return false;
1074 default:
1075 return true;
1076 }
1077 }
1078
Matt Arsenault68802d32015-11-05 03:11:27 +00001079 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1080 // SI/CI have.
1081 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1082 R.isValid(); ++R) {
1083 if (*R == RegNo)
1084 return false;
1085 }
1086
1087 return true;
1088}
1089
Tom Stellard45bb48e2015-06-13 03:28:10 +00001090static bool operandsHaveModifiers(const OperandVector &Operands) {
1091
1092 for (unsigned i = 0, e = Operands.size(); i != e; ++i) {
1093 const AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
1094 if (Op.isRegKind() && Op.hasModifiers())
1095 return true;
Tom Stellardd93a34f2016-02-22 19:17:56 +00001096 if (Op.isImm() && Op.hasModifiers())
1097 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001098 if (Op.isImm() && (Op.getImmTy() == AMDGPUOperand::ImmTyOMod ||
1099 Op.getImmTy() == AMDGPUOperand::ImmTyClamp))
1100 return true;
1101 }
1102 return false;
1103}
1104
1105AMDGPUAsmParser::OperandMatchResultTy
1106AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1107
1108 // Try to parse with a custom parser
1109 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1110
1111 // If we successfully parsed the operand or if there as an error parsing,
1112 // we are done.
1113 //
1114 // If we are parsing after we reach EndOfStatement then this means we
1115 // are appending default values to the Operands list. This is only done
1116 // by custom parser, so we shouldn't continue on to the generic parsing.
Tom Stellarda90b9522016-02-11 03:28:15 +00001117 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001118 getLexer().is(AsmToken::EndOfStatement))
1119 return ResTy;
1120
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001121 bool Negate = false, Abs = false, Abs2 = false;
1122
Tom Stellard45bb48e2015-06-13 03:28:10 +00001123 if (getLexer().getKind()== AsmToken::Minus) {
1124 Parser.Lex();
1125 Negate = true;
1126 }
1127
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001128 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1129 Parser.Lex();
1130 Abs2 = true;
1131 if (getLexer().isNot(AsmToken::LParen)) {
1132 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1133 return MatchOperand_ParseFail;
1134 }
1135 Parser.Lex();
1136 }
1137
Tom Stellard45bb48e2015-06-13 03:28:10 +00001138 if (getLexer().getKind() == AsmToken::Pipe) {
1139 Parser.Lex();
1140 Abs = true;
1141 }
1142
1143 switch(getLexer().getKind()) {
1144 case AsmToken::Integer: {
1145 SMLoc S = Parser.getTok().getLoc();
1146 int64_t IntVal;
1147 if (getParser().parseAbsoluteExpression(IntVal))
1148 return MatchOperand_ParseFail;
Matt Arsenault382557e2015-10-23 18:07:58 +00001149 if (!isInt<32>(IntVal) && !isUInt<32>(IntVal)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001150 Error(S, "invalid immediate: only 32-bit values are legal");
1151 return MatchOperand_ParseFail;
1152 }
1153
Tom Stellard45bb48e2015-06-13 03:28:10 +00001154 if (Negate)
1155 IntVal *= -1;
1156 Operands.push_back(AMDGPUOperand::CreateImm(IntVal, S));
1157 return MatchOperand_Success;
1158 }
1159 case AsmToken::Real: {
1160 // FIXME: We should emit an error if a double precisions floating-point
1161 // value is used. I'm not sure the best way to detect this.
1162 SMLoc S = Parser.getTok().getLoc();
1163 int64_t IntVal;
1164 if (getParser().parseAbsoluteExpression(IntVal))
1165 return MatchOperand_ParseFail;
1166
1167 APFloat F((float)BitsToDouble(IntVal));
1168 if (Negate)
1169 F.changeSign();
1170 Operands.push_back(
1171 AMDGPUOperand::CreateImm(F.bitcastToAPInt().getZExtValue(), S));
1172 return MatchOperand_Success;
1173 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001174 case AsmToken::LBrac:
Tom Stellard45bb48e2015-06-13 03:28:10 +00001175 case AsmToken::Identifier: {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001176 if (auto R = parseRegister()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001177 unsigned Modifiers = 0;
1178
1179 if (Negate)
1180 Modifiers |= 0x1;
1181
1182 if (Abs) {
1183 if (getLexer().getKind() != AsmToken::Pipe)
1184 return MatchOperand_ParseFail;
1185 Parser.Lex();
1186 Modifiers |= 0x2;
1187 }
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001188 if (Abs2) {
1189 if (getLexer().isNot(AsmToken::RParen)) {
1190 return MatchOperand_ParseFail;
1191 }
1192 Parser.Lex();
1193 Modifiers |= 0x2;
1194 }
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001195 assert(R->isReg());
1196 R->Reg.IsForcedVOP3 = isForcedVOP3();
Tom Stellarda90b9522016-02-11 03:28:15 +00001197 if (Modifiers) {
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001198 R->setModifiers(Modifiers);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001199 }
Valery Pykhtin9e33c7f2016-03-14 05:25:44 +00001200 Operands.push_back(std::move(R));
Tom Stellarda90b9522016-02-11 03:28:15 +00001201 } else {
1202 ResTy = parseVOP3OptionalOps(Operands);
1203 if (ResTy == MatchOperand_NoMatch) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001204 const auto &Tok = Parser.getTok();
1205 Operands.push_back(AMDGPUOperand::CreateToken(Tok.getString(),
1206 Tok.getLoc()));
Tom Stellarda90b9522016-02-11 03:28:15 +00001207 Parser.Lex();
1208 }
1209 }
1210 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001211 }
1212 default:
1213 return MatchOperand_NoMatch;
1214 }
1215}
1216
1217bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1218 StringRef Name,
1219 SMLoc NameLoc, OperandVector &Operands) {
1220
1221 // Clear any forced encodings from the previous instruction.
1222 setForcedEncodingSize(0);
1223
1224 if (Name.endswith("_e64"))
1225 setForcedEncodingSize(64);
1226 else if (Name.endswith("_e32"))
1227 setForcedEncodingSize(32);
1228
1229 // Add the instruction mnemonic
1230 Operands.push_back(AMDGPUOperand::CreateToken(Name, NameLoc));
1231
1232 while (!getLexer().is(AsmToken::EndOfStatement)) {
1233 AMDGPUAsmParser::OperandMatchResultTy Res = parseOperand(Operands, Name);
1234
1235 // Eat the comma or space if there is one.
1236 if (getLexer().is(AsmToken::Comma))
1237 Parser.Lex();
1238
1239 switch (Res) {
1240 case MatchOperand_Success: break;
1241 case MatchOperand_ParseFail: return Error(getLexer().getLoc(),
1242 "failed parsing operand.");
1243 case MatchOperand_NoMatch: return Error(getLexer().getLoc(),
1244 "not a valid operand.");
1245 }
1246 }
1247
Tom Stellard45bb48e2015-06-13 03:28:10 +00001248 return false;
1249}
1250
1251//===----------------------------------------------------------------------===//
1252// Utility functions
1253//===----------------------------------------------------------------------===//
1254
1255AMDGPUAsmParser::OperandMatchResultTy
1256AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int,
1257 int64_t Default) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001258 // We are at the end of the statement, and this is a default argument, so
1259 // use a default value.
1260 if (getLexer().is(AsmToken::EndOfStatement)) {
1261 Int = Default;
1262 return MatchOperand_Success;
1263 }
1264
1265 switch(getLexer().getKind()) {
1266 default: return MatchOperand_NoMatch;
1267 case AsmToken::Identifier: {
1268 StringRef OffsetName = Parser.getTok().getString();
1269 if (!OffsetName.equals(Prefix))
1270 return MatchOperand_NoMatch;
1271
1272 Parser.Lex();
1273 if (getLexer().isNot(AsmToken::Colon))
1274 return MatchOperand_ParseFail;
1275
1276 Parser.Lex();
1277 if (getLexer().isNot(AsmToken::Integer))
1278 return MatchOperand_ParseFail;
1279
1280 if (getParser().parseAbsoluteExpression(Int))
1281 return MatchOperand_ParseFail;
1282 break;
1283 }
1284 }
1285 return MatchOperand_Success;
1286}
1287
1288AMDGPUAsmParser::OperandMatchResultTy
1289AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
1290 enum AMDGPUOperand::ImmTy ImmTy) {
1291
1292 SMLoc S = Parser.getTok().getLoc();
1293 int64_t Offset = 0;
1294
1295 AMDGPUAsmParser::OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Offset);
1296 if (Res != MatchOperand_Success)
1297 return Res;
1298
1299 Operands.push_back(AMDGPUOperand::CreateImm(Offset, S, ImmTy));
1300 return MatchOperand_Success;
1301}
1302
1303AMDGPUAsmParser::OperandMatchResultTy
1304AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
1305 enum AMDGPUOperand::ImmTy ImmTy) {
1306 int64_t Bit = 0;
1307 SMLoc S = Parser.getTok().getLoc();
1308
1309 // We are at the end of the statement, and this is a default argument, so
1310 // use a default value.
1311 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1312 switch(getLexer().getKind()) {
1313 case AsmToken::Identifier: {
1314 StringRef Tok = Parser.getTok().getString();
1315 if (Tok == Name) {
1316 Bit = 1;
1317 Parser.Lex();
1318 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1319 Bit = 0;
1320 Parser.Lex();
1321 } else {
1322 return MatchOperand_NoMatch;
1323 }
1324 break;
1325 }
1326 default:
1327 return MatchOperand_NoMatch;
1328 }
1329 }
1330
1331 Operands.push_back(AMDGPUOperand::CreateImm(Bit, S, ImmTy));
1332 return MatchOperand_Success;
1333}
1334
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001335typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1336
Sam Koltona74cd522016-03-18 15:35:51 +00001337void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1338 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001339 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001340 auto i = OptionalIdx.find(ImmT);
1341 if (i != OptionalIdx.end()) {
1342 unsigned Idx = i->second;
1343 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1344 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001345 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001346 }
1347}
1348
Tom Stellard45bb48e2015-06-13 03:28:10 +00001349static bool operandsHasOptionalOp(const OperandVector &Operands,
1350 const OptionalOperand &OOp) {
1351 for (unsigned i = 0; i < Operands.size(); i++) {
1352 const AMDGPUOperand &ParsedOp = ((const AMDGPUOperand &)*Operands[i]);
1353 if ((ParsedOp.isImm() && ParsedOp.getImmTy() == OOp.Type) ||
1354 (ParsedOp.isToken() && ParsedOp.getToken() == OOp.Name))
1355 return true;
1356
1357 }
1358 return false;
1359}
1360
1361AMDGPUAsmParser::OperandMatchResultTy
1362AMDGPUAsmParser::parseOptionalOps(const ArrayRef<OptionalOperand> &OptionalOps,
1363 OperandVector &Operands) {
1364 SMLoc S = Parser.getTok().getLoc();
1365 for (const OptionalOperand &Op : OptionalOps) {
1366 if (operandsHasOptionalOp(Operands, Op))
1367 continue;
1368 AMDGPUAsmParser::OperandMatchResultTy Res;
1369 int64_t Value;
1370 if (Op.IsBit) {
1371 Res = parseNamedBit(Op.Name, Operands, Op.Type);
1372 if (Res == MatchOperand_NoMatch)
1373 continue;
1374 return Res;
1375 }
1376
1377 Res = parseIntWithPrefix(Op.Name, Value, Op.Default);
1378
1379 if (Res == MatchOperand_NoMatch)
1380 continue;
1381
1382 if (Res != MatchOperand_Success)
1383 return Res;
1384
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001385 bool DefaultValue = (Value == Op.Default);
1386
Tom Stellard45bb48e2015-06-13 03:28:10 +00001387 if (Op.ConvertResult && !Op.ConvertResult(Value)) {
1388 return MatchOperand_ParseFail;
1389 }
1390
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001391 if (!DefaultValue) {
1392 Operands.push_back(AMDGPUOperand::CreateImm(Value, S, Op.Type));
1393 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001394 return MatchOperand_Success;
1395 }
1396 return MatchOperand_NoMatch;
1397}
1398
1399//===----------------------------------------------------------------------===//
1400// ds
1401//===----------------------------------------------------------------------===//
1402
1403static const OptionalOperand DSOptionalOps [] = {
1404 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1405 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1406};
1407
1408static const OptionalOperand DSOptionalOpsOff01 [] = {
1409 {"offset0", AMDGPUOperand::ImmTyDSOffset0, false, 0, nullptr},
1410 {"offset1", AMDGPUOperand::ImmTyDSOffset1, false, 0, nullptr},
1411 {"gds", AMDGPUOperand::ImmTyGDS, true, 0, nullptr}
1412};
1413
1414AMDGPUAsmParser::OperandMatchResultTy
1415AMDGPUAsmParser::parseDSOptionalOps(OperandVector &Operands) {
1416 return parseOptionalOps(DSOptionalOps, Operands);
1417}
1418AMDGPUAsmParser::OperandMatchResultTy
1419AMDGPUAsmParser::parseDSOff01OptionalOps(OperandVector &Operands) {
1420 return parseOptionalOps(DSOptionalOpsOff01, Operands);
1421}
1422
1423AMDGPUAsmParser::OperandMatchResultTy
1424AMDGPUAsmParser::parseDSOffsetOptional(OperandVector &Operands) {
1425 SMLoc S = Parser.getTok().getLoc();
1426 AMDGPUAsmParser::OperandMatchResultTy Res =
1427 parseIntWithPrefix("offset", Operands, AMDGPUOperand::ImmTyOffset);
1428 if (Res == MatchOperand_NoMatch) {
1429 Operands.push_back(AMDGPUOperand::CreateImm(0, S,
1430 AMDGPUOperand::ImmTyOffset));
1431 Res = MatchOperand_Success;
1432 }
1433 return Res;
1434}
1435
1436bool AMDGPUOperand::isDSOffset() const {
1437 return isImm() && isUInt<16>(getImm());
1438}
1439
1440bool AMDGPUOperand::isDSOffset01() const {
1441 return isImm() && isUInt<8>(getImm());
1442}
1443
1444void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
1445 const OperandVector &Operands) {
1446
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001447 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001448
1449 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1450 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1451
1452 // Add the register arguments
1453 if (Op.isReg()) {
1454 Op.addRegOperands(Inst, 1);
1455 continue;
1456 }
1457
1458 // Handle optional arguments
1459 OptionalIdx[Op.getImmTy()] = i;
1460 }
1461
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001462 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset0);
1463 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDSOffset1);
1464 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001465
Tom Stellard45bb48e2015-06-13 03:28:10 +00001466 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1467}
1468
1469void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
1470
1471 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
1472 bool GDSOnly = false;
1473
1474 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1475 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1476
1477 // Add the register arguments
1478 if (Op.isReg()) {
1479 Op.addRegOperands(Inst, 1);
1480 continue;
1481 }
1482
1483 if (Op.isToken() && Op.getToken() == "gds") {
1484 GDSOnly = true;
1485 continue;
1486 }
1487
1488 // Handle optional arguments
1489 OptionalIdx[Op.getImmTy()] = i;
1490 }
1491
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001492 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1493 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001494
1495 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001496 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001497 }
1498 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
1499}
1500
1501
1502//===----------------------------------------------------------------------===//
1503// s_waitcnt
1504//===----------------------------------------------------------------------===//
1505
1506bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
1507 StringRef CntName = Parser.getTok().getString();
1508 int64_t CntVal;
1509
1510 Parser.Lex();
1511 if (getLexer().isNot(AsmToken::LParen))
1512 return true;
1513
1514 Parser.Lex();
1515 if (getLexer().isNot(AsmToken::Integer))
1516 return true;
1517
1518 if (getParser().parseAbsoluteExpression(CntVal))
1519 return true;
1520
1521 if (getLexer().isNot(AsmToken::RParen))
1522 return true;
1523
1524 Parser.Lex();
1525 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
1526 Parser.Lex();
1527
1528 int CntShift;
1529 int CntMask;
1530
1531 if (CntName == "vmcnt") {
1532 CntMask = 0xf;
1533 CntShift = 0;
1534 } else if (CntName == "expcnt") {
1535 CntMask = 0x7;
1536 CntShift = 4;
1537 } else if (CntName == "lgkmcnt") {
Tom Stellard3d2c8522016-01-28 17:13:44 +00001538 CntMask = 0xf;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001539 CntShift = 8;
1540 } else {
1541 return true;
1542 }
1543
1544 IntVal &= ~(CntMask << CntShift);
1545 IntVal |= (CntVal << CntShift);
1546 return false;
1547}
1548
1549AMDGPUAsmParser::OperandMatchResultTy
1550AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
1551 // Disable all counters by default.
1552 // vmcnt [3:0]
1553 // expcnt [6:4]
Tom Stellard3d2c8522016-01-28 17:13:44 +00001554 // lgkmcnt [11:8]
1555 int64_t CntVal = 0xf7f;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001556 SMLoc S = Parser.getTok().getLoc();
1557
1558 switch(getLexer().getKind()) {
1559 default: return MatchOperand_ParseFail;
1560 case AsmToken::Integer:
1561 // The operand can be an integer value.
1562 if (getParser().parseAbsoluteExpression(CntVal))
1563 return MatchOperand_ParseFail;
1564 break;
1565
1566 case AsmToken::Identifier:
1567 do {
1568 if (parseCnt(CntVal))
1569 return MatchOperand_ParseFail;
1570 } while(getLexer().isNot(AsmToken::EndOfStatement));
1571 break;
1572 }
1573 Operands.push_back(AMDGPUOperand::CreateImm(CntVal, S));
1574 return MatchOperand_Success;
1575}
1576
Artem Tamazovd6468662016-04-25 14:13:51 +00001577bool AMDGPUAsmParser::parseHwreg(int64_t &HwRegCode, int64_t &Offset, int64_t &Width) {
1578 if (Parser.getTok().getString() != "hwreg")
1579 return true;
1580 Parser.Lex();
1581
1582 if (getLexer().isNot(AsmToken::LParen))
1583 return true;
1584 Parser.Lex();
1585
1586 if (getLexer().isNot(AsmToken::Integer))
1587 return true;
1588 if (getParser().parseAbsoluteExpression(HwRegCode))
1589 return true;
1590
1591 if (getLexer().is(AsmToken::RParen)) {
1592 Parser.Lex();
1593 return false;
1594 }
1595
1596 // optional params
1597 if (getLexer().isNot(AsmToken::Comma))
1598 return true;
1599 Parser.Lex();
1600
1601 if (getLexer().isNot(AsmToken::Integer))
1602 return true;
1603 if (getParser().parseAbsoluteExpression(Offset))
1604 return true;
1605
1606 if (getLexer().isNot(AsmToken::Comma))
1607 return true;
1608 Parser.Lex();
1609
1610 if (getLexer().isNot(AsmToken::Integer))
1611 return true;
1612 if (getParser().parseAbsoluteExpression(Width))
1613 return true;
1614
1615 if (getLexer().isNot(AsmToken::RParen))
1616 return true;
1617 Parser.Lex();
1618
1619 return false;
1620}
1621
1622AMDGPUAsmParser::OperandMatchResultTy
1623AMDGPUAsmParser::parseHwregOp(OperandVector &Operands) {
1624 int64_t Imm16Val = 0;
1625 SMLoc S = Parser.getTok().getLoc();
1626
1627 switch(getLexer().getKind()) {
1628 default: return MatchOperand_ParseFail;
1629 case AsmToken::Integer:
1630 // The operand can be an integer value.
1631 if (getParser().parseAbsoluteExpression(Imm16Val))
1632 return MatchOperand_ParseFail;
1633 if (!isInt<16>(Imm16Val) && !isUInt<16>(Imm16Val)) {
1634 Error(S, "invalid immediate: only 16-bit values are legal");
1635 // Do not return error code, but create an imm operand anyway and proceed
1636 // to the next operand, if any. That avoids unneccessary error messages.
1637 }
1638 break;
1639
1640 case AsmToken::Identifier: {
1641 int64_t HwRegCode = 0;
1642 int64_t Offset = 0; // default
1643 int64_t Width = 32; // default
1644 if (parseHwreg(HwRegCode, Offset, Width))
1645 return MatchOperand_ParseFail;
1646 // HwRegCode (6) [5:0]
1647 // Offset (5) [10:6]
1648 // WidthMinusOne (5) [15:11]
1649 if (HwRegCode < 0 || HwRegCode > 63)
1650 Error(S, "invalid code of hardware register: only 6-bit values are legal");
1651 if (Offset < 0 || Offset > 31)
1652 Error(S, "invalid bit offset: only 5-bit values are legal");
1653 if (Width < 1 || Width > 32)
1654 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
1655 Imm16Val = HwRegCode | (Offset << 6) | ((Width-1) << 11);
1656 }
1657 break;
1658 }
1659 Operands.push_back(AMDGPUOperand::CreateImm(Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
1660 return MatchOperand_Success;
1661}
1662
Tom Stellard45bb48e2015-06-13 03:28:10 +00001663bool AMDGPUOperand::isSWaitCnt() const {
1664 return isImm();
1665}
1666
Artem Tamazovd6468662016-04-25 14:13:51 +00001667bool AMDGPUOperand::isHwreg() const {
1668 return isImmTy(ImmTyHwreg);
1669}
1670
Tom Stellard45bb48e2015-06-13 03:28:10 +00001671//===----------------------------------------------------------------------===//
1672// sopp branch targets
1673//===----------------------------------------------------------------------===//
1674
1675AMDGPUAsmParser::OperandMatchResultTy
1676AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
1677 SMLoc S = Parser.getTok().getLoc();
1678
1679 switch (getLexer().getKind()) {
1680 default: return MatchOperand_ParseFail;
1681 case AsmToken::Integer: {
1682 int64_t Imm;
1683 if (getParser().parseAbsoluteExpression(Imm))
1684 return MatchOperand_ParseFail;
1685 Operands.push_back(AMDGPUOperand::CreateImm(Imm, S));
1686 return MatchOperand_Success;
1687 }
1688
1689 case AsmToken::Identifier:
1690 Operands.push_back(AMDGPUOperand::CreateExpr(
1691 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
1692 Parser.getTok().getString()), getContext()), S));
1693 Parser.Lex();
1694 return MatchOperand_Success;
1695 }
1696}
1697
1698//===----------------------------------------------------------------------===//
1699// flat
1700//===----------------------------------------------------------------------===//
1701
1702static const OptionalOperand FlatOptionalOps [] = {
1703 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1704 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1705 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1706};
1707
1708static const OptionalOperand FlatAtomicOptionalOps [] = {
1709 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1710 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1711};
1712
1713AMDGPUAsmParser::OperandMatchResultTy
1714AMDGPUAsmParser::parseFlatOptionalOps(OperandVector &Operands) {
1715 return parseOptionalOps(FlatOptionalOps, Operands);
1716}
1717
1718AMDGPUAsmParser::OperandMatchResultTy
1719AMDGPUAsmParser::parseFlatAtomicOptionalOps(OperandVector &Operands) {
1720 return parseOptionalOps(FlatAtomicOptionalOps, Operands);
1721}
1722
1723void AMDGPUAsmParser::cvtFlat(MCInst &Inst,
1724 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001725 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001726
1727 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1728 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1729
1730 // Add the register arguments
1731 if (Op.isReg()) {
1732 Op.addRegOperands(Inst, 1);
1733 continue;
1734 }
1735
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001736 OptionalIdx[Op.getImmTy()] = i;
1737 }
1738 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1739 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1740 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
1741}
1742
1743
1744void AMDGPUAsmParser::cvtFlatAtomic(MCInst &Inst,
1745 const OperandVector &Operands) {
1746 OptionalImmIndexMap OptionalIdx;
1747
1748 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1749 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1750
1751 // Add the register arguments
1752 if (Op.isReg()) {
1753 Op.addRegOperands(Inst, 1);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001754 continue;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001755 }
1756
1757 // Handle 'glc' token for flat atomics.
1758 if (Op.isToken()) {
1759 continue;
1760 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001761
1762 // Handle optional arguments
NAKAMURA Takumi3d3d0f42016-02-25 08:35:27 +00001763 OptionalIdx[Op.getImmTy()] = i;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001764 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001765 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1766 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001767}
1768
1769//===----------------------------------------------------------------------===//
1770// mubuf
1771//===----------------------------------------------------------------------===//
1772
1773static const OptionalOperand MubufOptionalOps [] = {
1774 {"offset", AMDGPUOperand::ImmTyOffset, false, 0, nullptr},
1775 {"glc", AMDGPUOperand::ImmTyGLC, true, 0, nullptr},
1776 {"slc", AMDGPUOperand::ImmTySLC, true, 0, nullptr},
1777 {"tfe", AMDGPUOperand::ImmTyTFE, true, 0, nullptr}
1778};
1779
1780AMDGPUAsmParser::OperandMatchResultTy
1781AMDGPUAsmParser::parseMubufOptionalOps(OperandVector &Operands) {
1782 return parseOptionalOps(MubufOptionalOps, Operands);
1783}
1784
1785AMDGPUAsmParser::OperandMatchResultTy
1786AMDGPUAsmParser::parseOffset(OperandVector &Operands) {
1787 return parseIntWithPrefix("offset", Operands);
1788}
1789
1790AMDGPUAsmParser::OperandMatchResultTy
1791AMDGPUAsmParser::parseGLC(OperandVector &Operands) {
1792 return parseNamedBit("glc", Operands);
1793}
1794
1795AMDGPUAsmParser::OperandMatchResultTy
1796AMDGPUAsmParser::parseSLC(OperandVector &Operands) {
1797 return parseNamedBit("slc", Operands);
1798}
1799
1800AMDGPUAsmParser::OperandMatchResultTy
1801AMDGPUAsmParser::parseTFE(OperandVector &Operands) {
1802 return parseNamedBit("tfe", Operands);
1803}
1804
1805bool AMDGPUOperand::isMubufOffset() const {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00001806 return isImmTy(ImmTyOffset) && isUInt<12>(getImm());
Tom Stellard45bb48e2015-06-13 03:28:10 +00001807}
1808
1809void AMDGPUAsmParser::cvtMubuf(MCInst &Inst,
1810 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001811 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001812
1813 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
1814 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
1815
1816 // Add the register arguments
1817 if (Op.isReg()) {
1818 Op.addRegOperands(Inst, 1);
1819 continue;
1820 }
1821
1822 // Handle the case where soffset is an immediate
1823 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
1824 Op.addImmOperands(Inst, 1);
1825 continue;
1826 }
1827
1828 // Handle tokens like 'offen' which are sometimes hard-coded into the
1829 // asm string. There are no MCInst operands for these.
1830 if (Op.isToken()) {
1831 continue;
1832 }
1833 assert(Op.isImm());
1834
1835 // Handle optional arguments
1836 OptionalIdx[Op.getImmTy()] = i;
1837 }
1838
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001839 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
1840 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
1841 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
1842 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001843}
1844
1845//===----------------------------------------------------------------------===//
1846// mimg
1847//===----------------------------------------------------------------------===//
1848
1849AMDGPUAsmParser::OperandMatchResultTy
1850AMDGPUAsmParser::parseDMask(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001851 return parseIntWithPrefix("dmask", Operands, AMDGPUOperand::ImmTyDMask);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001852}
1853
1854AMDGPUAsmParser::OperandMatchResultTy
1855AMDGPUAsmParser::parseUNorm(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001856 return parseNamedBit("unorm", Operands, AMDGPUOperand::ImmTyUNorm);
1857}
1858
1859AMDGPUAsmParser::OperandMatchResultTy
1860AMDGPUAsmParser::parseDA(OperandVector &Operands) {
1861 return parseNamedBit("da", Operands, AMDGPUOperand::ImmTyDA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001862}
1863
1864AMDGPUAsmParser::OperandMatchResultTy
1865AMDGPUAsmParser::parseR128(OperandVector &Operands) {
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001866 return parseNamedBit("r128", Operands, AMDGPUOperand::ImmTyR128);
1867}
1868
1869AMDGPUAsmParser::OperandMatchResultTy
1870AMDGPUAsmParser::parseLWE(OperandVector &Operands) {
1871 return parseNamedBit("lwe", Operands, AMDGPUOperand::ImmTyLWE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001872}
1873
1874//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00001875// smrd
1876//===----------------------------------------------------------------------===//
1877
1878bool AMDGPUOperand::isSMRDOffset() const {
1879
1880 // FIXME: Support 20-bit offsets on VI. We need to to pass subtarget
1881 // information here.
1882 return isImm() && isUInt<8>(getImm());
1883}
1884
1885bool AMDGPUOperand::isSMRDLiteralOffset() const {
1886 // 32-bit literals are only supported on CI and we only want to use them
1887 // when the offset is > 8-bits.
1888 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
1889}
1890
1891//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00001892// vop3
1893//===----------------------------------------------------------------------===//
1894
1895static bool ConvertOmodMul(int64_t &Mul) {
1896 if (Mul != 1 && Mul != 2 && Mul != 4)
1897 return false;
1898
1899 Mul >>= 1;
1900 return true;
1901}
1902
1903static bool ConvertOmodDiv(int64_t &Div) {
1904 if (Div == 1) {
1905 Div = 0;
1906 return true;
1907 }
1908
1909 if (Div == 2) {
1910 Div = 3;
1911 return true;
1912 }
1913
1914 return false;
1915}
1916
1917static const OptionalOperand VOP3OptionalOps [] = {
1918 {"clamp", AMDGPUOperand::ImmTyClamp, true, 0, nullptr},
1919 {"mul", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodMul},
1920 {"div", AMDGPUOperand::ImmTyOMod, false, 1, ConvertOmodDiv},
1921};
1922
1923static bool isVOP3(OperandVector &Operands) {
1924 if (operandsHaveModifiers(Operands))
1925 return true;
1926
Tom Stellarda90b9522016-02-11 03:28:15 +00001927 if (Operands.size() >= 2) {
1928 AMDGPUOperand &DstOp = ((AMDGPUOperand&)*Operands[1]);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001929
Valery Pykhtinf91911c2016-03-14 05:01:45 +00001930 if (DstOp.isRegClass(AMDGPU::SGPR_64RegClassID))
Tom Stellarda90b9522016-02-11 03:28:15 +00001931 return true;
1932 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001933
1934 if (Operands.size() >= 5)
1935 return true;
1936
1937 if (Operands.size() > 3) {
1938 AMDGPUOperand &Src1Op = ((AMDGPUOperand&)*Operands[3]);
Valery Pykhtinf91911c2016-03-14 05:01:45 +00001939 if (Src1Op.isRegClass(AMDGPU::SReg_32RegClassID) ||
1940 Src1Op.isRegClass(AMDGPU::SReg_64RegClassID))
Tom Stellard45bb48e2015-06-13 03:28:10 +00001941 return true;
1942 }
1943 return false;
1944}
1945
1946AMDGPUAsmParser::OperandMatchResultTy
1947AMDGPUAsmParser::parseVOP3OptionalOps(OperandVector &Operands) {
1948
1949 // The value returned by this function may change after parsing
1950 // an operand so store the original value here.
1951 bool HasModifiers = operandsHaveModifiers(Operands);
1952
1953 bool IsVOP3 = isVOP3(Operands);
1954 if (HasModifiers || IsVOP3 ||
1955 getLexer().isNot(AsmToken::EndOfStatement) ||
1956 getForcedEncodingSize() == 64) {
1957
1958 AMDGPUAsmParser::OperandMatchResultTy Res =
1959 parseOptionalOps(VOP3OptionalOps, Operands);
1960
1961 if (!HasModifiers && Res == MatchOperand_Success) {
1962 // We have added a modifier operation, so we need to make sure all
1963 // previous register operands have modifiers
1964 for (unsigned i = 2, e = Operands.size(); i != e; ++i) {
1965 AMDGPUOperand &Op = ((AMDGPUOperand&)*Operands[i]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00001966 if ((Op.isReg() || Op.isImm()) && !Op.hasModifiers())
Tom Stellard45bb48e2015-06-13 03:28:10 +00001967 Op.setModifiers(0);
1968 }
1969 }
1970 return Res;
1971 }
1972 return MatchOperand_NoMatch;
1973}
1974
Tom Stellarda90b9522016-02-11 03:28:15 +00001975void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
1976 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00001977 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00001978 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001979 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
1980 }
1981 for (unsigned E = Operands.size(); I != E; ++I)
1982 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
1983}
1984
1985void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001986 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1987 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00001988 cvtVOP3(Inst, Operands);
1989 } else {
1990 cvtId(Inst, Operands);
1991 }
1992}
1993
1994void AMDGPUAsmParser::cvtVOP3_2_nomod(MCInst &Inst, const OperandVector &Operands) {
1995 if (operandsHaveModifiers(Operands)) {
1996 cvtVOP3(Inst, Operands);
1997 } else {
1998 cvtId(Inst, Operands);
1999 }
2000}
2001
2002void AMDGPUAsmParser::cvtVOP3_only(MCInst &Inst, const OperandVector &Operands) {
2003 cvtVOP3(Inst, Operands);
2004}
2005
2006void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002007 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002008 unsigned I = 1;
2009 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002010 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002011 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002012 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002013
Tom Stellarda90b9522016-02-11 03:28:15 +00002014 for (unsigned E = Operands.size(); I != E; ++I) {
2015 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Tom Stellardd93a34f2016-02-22 19:17:56 +00002016 if (Op.isRegOrImmWithInputMods()) {
2017 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002018 } else if (Op.isImm()) {
2019 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002020 } else {
2021 assert(false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002022 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002023 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002024
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002025 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClamp);
2026 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOMod);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002027}
2028
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002029void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002030 unsigned I = 1;
2031 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2032 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2033 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2034 }
2035
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002036 OptionalImmIndexMap OptionalIdx;
2037
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002038 for (unsigned E = Operands.size(); I != E; ++I) {
2039 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002040
2041 // Add the register arguments
2042 if (Op.isRegOrImm()) {
2043 Op.addRegOrImmOperands(Inst, 1);
2044 continue;
2045 } else if (Op.isImmModifier()) {
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002046 OptionalIdx[Op.getImmTy()] = I;
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002047 } else {
2048 assert(false);
2049 }
2050 }
2051
2052 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2053 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2054 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2055 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2056 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2057 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2058 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2059 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2060}
2061
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002062void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2063 unsigned I = 1;
2064 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2065 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2066 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2067 }
2068
2069 // Add src, same as dst
2070 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2071
2072 OptionalImmIndexMap OptionalIdx;
2073
2074 for (unsigned E = Operands.size(); I != E; ++I) {
2075 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2076
2077 // Add the register arguments
2078 if (Op.isRegOrImm()) {
2079 Op.addRegOrImmOperands(Inst, 1);
2080 continue;
2081 } else if (Op.isImmModifier()) {
2082 OptionalIdx[Op.getImmTy()] = I;
2083 } else {
2084 assert(false);
2085 }
2086 }
2087
2088 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2089 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2090 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2091 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2092 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2093 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2094 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2095 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2096}
2097
Sam Koltondfa29f72016-03-09 12:29:31 +00002098//===----------------------------------------------------------------------===//
2099// dpp
2100//===----------------------------------------------------------------------===//
2101
2102bool AMDGPUOperand::isDPPCtrl() const {
2103 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2104 if (result) {
2105 int64_t Imm = getImm();
2106 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2107 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2108 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2109 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2110 (Imm == 0x130) ||
2111 (Imm == 0x134) ||
2112 (Imm == 0x138) ||
2113 (Imm == 0x13c) ||
2114 (Imm == 0x140) ||
2115 (Imm == 0x141) ||
2116 (Imm == 0x142) ||
2117 (Imm == 0x143);
2118 }
2119 return false;
2120}
2121
Sam Koltona74cd522016-03-18 15:35:51 +00002122AMDGPUAsmParser::OperandMatchResultTy
Sam Koltondfa29f72016-03-09 12:29:31 +00002123AMDGPUAsmParser::parseDPPCtrlOps(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002124 SMLoc S = Parser.getTok().getLoc();
2125 StringRef Prefix;
2126 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002127
Sam Koltona74cd522016-03-18 15:35:51 +00002128 if (getLexer().getKind() == AsmToken::Identifier) {
2129 Prefix = Parser.getTok().getString();
2130 } else {
2131 return MatchOperand_NoMatch;
2132 }
2133
2134 if (Prefix == "row_mirror") {
2135 Int = 0x140;
2136 } else if (Prefix == "row_half_mirror") {
2137 Int = 0x141;
2138 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002139 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2140 if (Prefix != "quad_perm"
2141 && Prefix != "row_shl"
2142 && Prefix != "row_shr"
2143 && Prefix != "row_ror"
2144 && Prefix != "wave_shl"
2145 && Prefix != "wave_rol"
2146 && Prefix != "wave_shr"
2147 && Prefix != "wave_ror"
2148 && Prefix != "row_bcast") {
2149 return MatchOperand_NoMatch;
2150 }
2151
Sam Koltona74cd522016-03-18 15:35:51 +00002152 Parser.Lex();
2153 if (getLexer().isNot(AsmToken::Colon))
2154 return MatchOperand_ParseFail;
2155
2156 if (Prefix == "quad_perm") {
2157 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00002158 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002159 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00002160 return MatchOperand_ParseFail;
2161
2162 Parser.Lex();
2163 if (getLexer().isNot(AsmToken::Integer))
2164 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002165 Int = getLexer().getTok().getIntVal();
Sam Koltondfa29f72016-03-09 12:29:31 +00002166
Sam Koltona74cd522016-03-18 15:35:51 +00002167 Parser.Lex();
2168 if (getLexer().isNot(AsmToken::Comma))
Sam Koltondfa29f72016-03-09 12:29:31 +00002169 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002170 Parser.Lex();
2171 if (getLexer().isNot(AsmToken::Integer))
2172 return MatchOperand_ParseFail;
2173 Int += (getLexer().getTok().getIntVal() << 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00002174
Sam Koltona74cd522016-03-18 15:35:51 +00002175 Parser.Lex();
2176 if (getLexer().isNot(AsmToken::Comma))
2177 return MatchOperand_ParseFail;
2178 Parser.Lex();
2179 if (getLexer().isNot(AsmToken::Integer))
2180 return MatchOperand_ParseFail;
2181 Int += (getLexer().getTok().getIntVal() << 4);
2182
2183 Parser.Lex();
2184 if (getLexer().isNot(AsmToken::Comma))
2185 return MatchOperand_ParseFail;
2186 Parser.Lex();
2187 if (getLexer().isNot(AsmToken::Integer))
2188 return MatchOperand_ParseFail;
2189 Int += (getLexer().getTok().getIntVal() << 6);
2190
2191 Parser.Lex();
2192 if (getLexer().isNot(AsmToken::RBrac))
2193 return MatchOperand_ParseFail;
2194
2195 } else {
2196 // sel:%d
2197 Parser.Lex();
2198 if (getLexer().isNot(AsmToken::Integer))
2199 return MatchOperand_ParseFail;
2200 Int = getLexer().getTok().getIntVal();
2201
2202 if (Prefix == "row_shl") {
2203 Int |= 0x100;
2204 } else if (Prefix == "row_shr") {
2205 Int |= 0x110;
2206 } else if (Prefix == "row_ror") {
2207 Int |= 0x120;
2208 } else if (Prefix == "wave_shl") {
2209 Int = 0x130;
2210 } else if (Prefix == "wave_rol") {
2211 Int = 0x134;
2212 } else if (Prefix == "wave_shr") {
2213 Int = 0x138;
2214 } else if (Prefix == "wave_ror") {
2215 Int = 0x13C;
2216 } else if (Prefix == "row_bcast") {
2217 if (Int == 15) {
2218 Int = 0x142;
2219 } else if (Int == 31) {
2220 Int = 0x143;
2221 }
2222 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002223 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00002224 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002225 }
Sam Koltondfa29f72016-03-09 12:29:31 +00002226 }
Sam Koltona74cd522016-03-18 15:35:51 +00002227 Parser.Lex(); // eat last token
2228
2229 Operands.push_back(AMDGPUOperand::CreateImm(Int, S,
Sam Koltondfa29f72016-03-09 12:29:31 +00002230 AMDGPUOperand::ImmTyDppCtrl));
2231 return MatchOperand_Success;
2232}
2233
2234static const OptionalOperand DPPOptionalOps [] = {
2235 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, 0xf, nullptr},
2236 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, 0xf, nullptr},
2237 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, -1, nullptr}
2238};
2239
Sam Koltona74cd522016-03-18 15:35:51 +00002240AMDGPUAsmParser::OperandMatchResultTy
Sam Koltondfa29f72016-03-09 12:29:31 +00002241AMDGPUAsmParser::parseDPPOptionalOps(OperandVector &Operands) {
2242 SMLoc S = Parser.getTok().getLoc();
2243 OperandMatchResultTy Res = parseOptionalOps(DPPOptionalOps, Operands);
2244 // XXX - sp3 use syntax "bound_ctrl:0" to indicate that bound_ctrl bit was set
2245 if (Res == MatchOperand_Success) {
2246 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands.back());
2247 // If last operand was parsed as bound_ctrl we should replace it with correct value (1)
2248 if (Op.isImmTy(AMDGPUOperand::ImmTyDppBoundCtrl)) {
2249 Operands.pop_back();
2250 Operands.push_back(
2251 AMDGPUOperand::CreateImm(1, S, AMDGPUOperand::ImmTyDppBoundCtrl));
2252 return MatchOperand_Success;
2253 }
2254 }
2255 return Res;
2256}
2257
2258void AMDGPUAsmParser::cvtDPP_mod(MCInst &Inst, const OperandVector &Operands) {
2259 cvtDPP(Inst, Operands, true);
2260}
2261
2262void AMDGPUAsmParser::cvtDPP_nomod(MCInst &Inst, const OperandVector &Operands) {
2263 cvtDPP(Inst, Operands, false);
2264}
2265
Sam Koltona74cd522016-03-18 15:35:51 +00002266void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands,
Sam Koltondfa29f72016-03-09 12:29:31 +00002267 bool HasMods) {
2268 OptionalImmIndexMap OptionalIdx;
2269
2270 unsigned I = 1;
2271 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2272 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2273 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2274 }
2275
2276 for (unsigned E = Operands.size(); I != E; ++I) {
2277 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2278 // Add the register arguments
2279 if (!HasMods && Op.isReg()) {
2280 Op.addRegOperands(Inst, 1);
2281 } else if (HasMods && Op.isRegOrImmWithInputMods()) {
2282 Op.addRegOrImmWithInputModsOperands(Inst, 2);
2283 } else if (Op.isDPPCtrl()) {
2284 Op.addImmOperands(Inst, 1);
2285 } else if (Op.isImm()) {
2286 // Handle optional arguments
2287 OptionalIdx[Op.getImmTy()] = I;
2288 } else {
2289 llvm_unreachable("Invalid operand type");
2290 }
2291 }
2292
2293 // ToDo: fix default values for row_mask and bank_mask
2294 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
2295 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
2296 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
2297}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00002298
Nikolay Haustov2f684f12016-02-26 09:51:05 +00002299
Tom Stellard45bb48e2015-06-13 03:28:10 +00002300/// Force static initialization.
2301extern "C" void LLVMInitializeAMDGPUAsmParser() {
2302 RegisterMCAsmParser<AMDGPUAsmParser> A(TheAMDGPUTarget);
2303 RegisterMCAsmParser<AMDGPUAsmParser> B(TheGCNTarget);
2304}
2305
2306#define GET_REGISTER_MATCHER
2307#define GET_MATCHER_IMPLEMENTATION
2308#include "AMDGPUGenAsmMatcher.inc"