blob: 453d0d91d3e35545c7e623d88331fce02a9f29d5 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000018#include "llvm/ADT/APInt.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000019#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000020#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000021#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000023#include "llvm/ADT/StringSwitch.h"
24#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000025#include "llvm/CodeGen/MachineValueType.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "llvm/MC/MCContext.h"
27#include "llvm/MC/MCExpr.h"
28#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000029#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCInstrInfo.h"
31#include "llvm/MC/MCParser/MCAsmLexer.h"
32#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000033#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000035#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/MC/MCRegisterInfo.h"
37#include "llvm/MC/MCStreamer.h"
38#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000039#include "llvm/MC/MCSymbol.h"
40#include "llvm/Support/Casting.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000041#include "llvm/Support/Debug.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000042#include "llvm/Support/ELF.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000043#include "llvm/Support/ErrorHandling.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000044#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000045#include "llvm/Support/raw_ostream.h"
46#include "llvm/Support/SMLoc.h"
47#include "llvm/Support/TargetRegistry.h"
48#include <algorithm>
49#include <cassert>
50#include <cstdint>
51#include <cstring>
52#include <iterator>
53#include <map>
54#include <memory>
55#include <string>
56#include <vector>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000057
Tom Stellard45bb48e2015-06-13 03:28:10 +000058using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000059using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000060
61namespace {
62
Sam Kolton1eeb11b2016-09-09 14:44:04 +000063class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000064
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000065enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
66
Sam Kolton1eeb11b2016-09-09 14:44:04 +000067//===----------------------------------------------------------------------===//
68// Operand
69//===----------------------------------------------------------------------===//
70
Tom Stellard45bb48e2015-06-13 03:28:10 +000071class AMDGPUOperand : public MCParsedAsmOperand {
72 enum KindTy {
73 Token,
74 Immediate,
75 Register,
76 Expression
77 } Kind;
78
79 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000080 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000081
82public:
Sam Kolton1eeb11b2016-09-09 14:44:04 +000083 AMDGPUOperand(enum KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
84 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000085
Sam Kolton5f10a132016-05-06 11:31:17 +000086 typedef std::unique_ptr<AMDGPUOperand> Ptr;
87
Sam Kolton945231a2016-06-10 09:57:59 +000088 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000089 bool Abs = false;
90 bool Neg = false;
91 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000092
93 bool hasFPModifiers() const { return Abs || Neg; }
94 bool hasIntModifiers() const { return Sext; }
95 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
96
97 int64_t getFPModifiersOperand() const {
98 int64_t Operand = 0;
99 Operand |= Abs ? SISrcMods::ABS : 0;
100 Operand |= Neg ? SISrcMods::NEG : 0;
101 return Operand;
102 }
103
104 int64_t getIntModifiersOperand() const {
105 int64_t Operand = 0;
106 Operand |= Sext ? SISrcMods::SEXT : 0;
107 return Operand;
108 }
109
110 int64_t getModifiersOperand() const {
111 assert(!(hasFPModifiers() && hasIntModifiers())
112 && "fp and int modifiers should not be used simultaneously");
113 if (hasFPModifiers()) {
114 return getFPModifiersOperand();
115 } else if (hasIntModifiers()) {
116 return getIntModifiersOperand();
117 } else {
118 return 0;
119 }
120 }
121
122 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
123 };
124
Tom Stellard45bb48e2015-06-13 03:28:10 +0000125 enum ImmTy {
126 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000128 ImmTyOffen,
129 ImmTyIdxen,
130 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000132 ImmTyOffset0,
133 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGLC,
135 ImmTySLC,
136 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000137 ImmTyClampSI,
138 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000139 ImmTyDppCtrl,
140 ImmTyDppRowMask,
141 ImmTyDppBankMask,
142 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000143 ImmTySdwaDstSel,
144 ImmTySdwaSrc0Sel,
145 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000146 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000147 ImmTyDMask,
148 ImmTyUNorm,
149 ImmTyDA,
150 ImmTyR128,
151 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000152 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000153 ImmTyExpCompr,
154 ImmTyExpVM,
Artem Tamazovd6468662016-04-25 14:13:51 +0000155 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000156 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000157 ImmTySendMsg,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000158 };
159
160 struct TokOp {
161 const char *Data;
162 unsigned Length;
163 };
164
165 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000166 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000167 ImmTy Type;
168 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000169 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000170 };
171
172 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000173 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000174 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000175 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000176 };
177
178 union {
179 TokOp Tok;
180 ImmOp Imm;
181 RegOp Reg;
182 const MCExpr *Expr;
183 };
184
Tom Stellard45bb48e2015-06-13 03:28:10 +0000185 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000186 if (Kind == Token)
187 return true;
188
189 if (Kind != Expression || !Expr)
190 return false;
191
192 // When parsing operands, we can't always tell if something was meant to be
193 // a token, like 'gds', or an expression that references a global variable.
194 // In this case, we assume the string is an expression, and if we need to
195 // interpret is a token, then we treat the symbol name as the token.
196 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000197 }
198
199 bool isImm() const override {
200 return Kind == Immediate;
201 }
202
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000203 bool isInlinableImm(MVT type) const;
204 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000205
Tom Stellard45bb48e2015-06-13 03:28:10 +0000206 bool isRegKind() const {
207 return Kind == Register;
208 }
209
210 bool isReg() const override {
Sam Kolton945231a2016-06-10 09:57:59 +0000211 return isRegKind() && !Reg.Mods.hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000212 }
213
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000214 bool isRegOrImmWithInputMods(MVT type) const {
215 return isRegKind() || isInlinableImm(type);
216 }
217
218 bool isRegOrImmWithInt32InputMods() const {
219 return isRegOrImmWithInputMods(MVT::i32);
220 }
221
222 bool isRegOrImmWithInt64InputMods() const {
223 return isRegOrImmWithInputMods(MVT::i64);
224 }
225
226 bool isRegOrImmWithFP32InputMods() const {
227 return isRegOrImmWithInputMods(MVT::f32);
228 }
229
230 bool isRegOrImmWithFP64InputMods() const {
231 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000232 }
233
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000234 bool isVReg32OrOff() const {
235 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
236 }
237
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000238 bool isImmTy(ImmTy ImmT) const {
239 return isImm() && Imm.Type == ImmT;
240 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000241
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000242 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000243 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000244 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000245
Sam Kolton945231a2016-06-10 09:57:59 +0000246 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
247 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
248 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000249 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
250 bool isDA() const { return isImmTy(ImmTyDA); }
251 bool isR128() const { return isImmTy(ImmTyUNorm); }
252 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000253 bool isOff() const { return isImmTy(ImmTyOff); }
254 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000255 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
256 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000257 bool isOffen() const { return isImmTy(ImmTyOffen); }
258 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
259 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
260 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
261 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
262 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000263 bool isGDS() const { return isImmTy(ImmTyGDS); }
264 bool isGLC() const { return isImmTy(ImmTyGLC); }
265 bool isSLC() const { return isImmTy(ImmTySLC); }
266 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000267 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
268 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
269 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
270 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
271 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
272 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
273 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000274
Sam Kolton945231a2016-06-10 09:57:59 +0000275 bool isMod() const {
276 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000277 }
278
279 bool isRegOrImm() const {
280 return isReg() || isImm();
281 }
282
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000283 bool isRegClass(unsigned RCID) const;
284
285 bool isSCSrcB32() const {
286 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000287 }
288
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000289 bool isSCSrcB64() const {
290 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000291 }
292
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000293 bool isSCSrcF32() const {
294 return isRegClass(AMDGPU::SReg_32RegClassID) || isInlinableImm(MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000295 }
296
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000297 bool isSCSrcF64() const {
298 return isRegClass(AMDGPU::SReg_64RegClassID) || isInlinableImm(MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000299 }
300
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000301 bool isSSrcB32() const {
302 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
303 }
304
305 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000306 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
307 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000308 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000309 }
310
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000311 bool isSSrcF32() const {
312 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000313 }
314
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000315 bool isSSrcF64() const {
316 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000317 }
318
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000319 bool isVCSrcB32() const {
320 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000321 }
322
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000323 bool isVCSrcB64() const {
324 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::i64);
325 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000326
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000327 bool isVCSrcF32() const {
328 return isRegClass(AMDGPU::VS_32RegClassID) || isInlinableImm(MVT::f32);
329 }
330
331 bool isVCSrcF64() const {
332 return isRegClass(AMDGPU::VS_64RegClassID) || isInlinableImm(MVT::f64);
333 }
334
335 bool isVSrcB32() const {
336 return isVCSrcF32() || isLiteralImm(MVT::i32);
337 }
338
339 bool isVSrcB64() const {
340 return isVCSrcF64() || isLiteralImm(MVT::i64);
341 }
342
343 bool isVSrcF32() const {
344 return isVCSrcF32() || isLiteralImm(MVT::f32);
345 }
346
347 bool isVSrcF64() const {
348 return isVCSrcF64() || isLiteralImm(MVT::f64);
349 }
350
351 bool isKImmFP32() const {
352 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000353 }
354
355 bool isMem() const override {
356 return false;
357 }
358
359 bool isExpr() const {
360 return Kind == Expression;
361 }
362
363 bool isSoppBrTarget() const {
364 return isExpr() || isImm();
365 }
366
Sam Kolton945231a2016-06-10 09:57:59 +0000367 bool isSWaitCnt() const;
368 bool isHwreg() const;
369 bool isSendMsg() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000370 bool isSMRDOffset8() const;
371 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000372 bool isSMRDLiteralOffset() const;
373 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000374 bool isGPRIdxMode() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000375
Tom Stellard89049702016-06-15 02:54:14 +0000376 StringRef getExpressionAsToken() const {
377 assert(isExpr());
378 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
379 return S->getSymbol().getName();
380 }
381
Sam Kolton945231a2016-06-10 09:57:59 +0000382 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000383 assert(isToken());
384
385 if (Kind == Expression)
386 return getExpressionAsToken();
387
Sam Kolton945231a2016-06-10 09:57:59 +0000388 return StringRef(Tok.Data, Tok.Length);
389 }
390
391 int64_t getImm() const {
392 assert(isImm());
393 return Imm.Val;
394 }
395
396 enum ImmTy getImmTy() const {
397 assert(isImm());
398 return Imm.Type;
399 }
400
401 unsigned getReg() const override {
402 return Reg.RegNo;
403 }
404
Tom Stellard45bb48e2015-06-13 03:28:10 +0000405 SMLoc getStartLoc() const override {
406 return StartLoc;
407 }
408
Peter Collingbourne0da86302016-10-10 22:49:37 +0000409 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000410 return EndLoc;
411 }
412
Sam Kolton945231a2016-06-10 09:57:59 +0000413 Modifiers getModifiers() const {
414 assert(isRegKind() || isImmTy(ImmTyNone));
415 return isRegKind() ? Reg.Mods : Imm.Mods;
416 }
417
418 void setModifiers(Modifiers Mods) {
419 assert(isRegKind() || isImmTy(ImmTyNone));
420 if (isRegKind())
421 Reg.Mods = Mods;
422 else
423 Imm.Mods = Mods;
424 }
425
426 bool hasModifiers() const {
427 return getModifiers().hasModifiers();
428 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000429
Sam Kolton945231a2016-06-10 09:57:59 +0000430 bool hasFPModifiers() const {
431 return getModifiers().hasFPModifiers();
432 }
433
434 bool hasIntModifiers() const {
435 return getModifiers().hasIntModifiers();
436 }
437
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000438 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000439
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000440 void addLiteralImmOperand(MCInst &Inst, int64_t Val) const;
441
442 void addKImmFP32Operands(MCInst &Inst, unsigned N) const;
443
444 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000445
446 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
447 if (isRegKind())
448 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000449 else if (isExpr())
450 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000451 else
452 addImmOperands(Inst, N);
453 }
454
455 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
456 Modifiers Mods = getModifiers();
457 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
458 if (isRegKind()) {
459 addRegOperands(Inst, N);
460 } else {
461 addImmOperands(Inst, N, false);
462 }
463 }
464
465 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
466 assert(!hasIntModifiers());
467 addRegOrImmWithInputModsOperands(Inst, N);
468 }
469
470 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
471 assert(!hasFPModifiers());
472 addRegOrImmWithInputModsOperands(Inst, N);
473 }
474
475 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
476 if (isImm())
477 addImmOperands(Inst, N);
478 else {
479 assert(isExpr());
480 Inst.addOperand(MCOperand::createExpr(Expr));
481 }
482 }
483
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000484 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000485 switch (Type) {
486 case ImmTyNone: OS << "None"; break;
487 case ImmTyGDS: OS << "GDS"; break;
488 case ImmTyOffen: OS << "Offen"; break;
489 case ImmTyIdxen: OS << "Idxen"; break;
490 case ImmTyAddr64: OS << "Addr64"; break;
491 case ImmTyOffset: OS << "Offset"; break;
492 case ImmTyOffset0: OS << "Offset0"; break;
493 case ImmTyOffset1: OS << "Offset1"; break;
494 case ImmTyGLC: OS << "GLC"; break;
495 case ImmTySLC: OS << "SLC"; break;
496 case ImmTyTFE: OS << "TFE"; break;
497 case ImmTyClampSI: OS << "ClampSI"; break;
498 case ImmTyOModSI: OS << "OModSI"; break;
499 case ImmTyDppCtrl: OS << "DppCtrl"; break;
500 case ImmTyDppRowMask: OS << "DppRowMask"; break;
501 case ImmTyDppBankMask: OS << "DppBankMask"; break;
502 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000503 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
504 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
505 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000506 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
507 case ImmTyDMask: OS << "DMask"; break;
508 case ImmTyUNorm: OS << "UNorm"; break;
509 case ImmTyDA: OS << "DA"; break;
510 case ImmTyR128: OS << "R128"; break;
511 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000512 case ImmTyOff: OS << "Off"; break;
513 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000514 case ImmTyExpCompr: OS << "ExpCompr"; break;
515 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000516 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000517 case ImmTySendMsg: OS << "SendMsg"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000518 }
519 }
520
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000521 void print(raw_ostream &OS) const override {
522 switch (Kind) {
523 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000524 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000525 break;
526 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000527 OS << '<' << getImm();
528 if (getImmTy() != ImmTyNone) {
529 OS << " type: "; printImmTy(OS, getImmTy());
530 }
Sam Kolton945231a2016-06-10 09:57:59 +0000531 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000532 break;
533 case Token:
534 OS << '\'' << getToken() << '\'';
535 break;
536 case Expression:
537 OS << "<expr " << *Expr << '>';
538 break;
539 }
540 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000541
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000542 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
543 int64_t Val, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000544 enum ImmTy Type = ImmTyNone,
545 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000546 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000547 Op->Imm.Val = Val;
548 Op->Imm.IsFPImm = IsFPImm;
549 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000550 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000551 Op->StartLoc = Loc;
552 Op->EndLoc = Loc;
553 return Op;
554 }
555
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000556 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
557 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000558 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000559 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000560 Res->Tok.Data = Str.data();
561 Res->Tok.Length = Str.size();
562 Res->StartLoc = Loc;
563 Res->EndLoc = Loc;
564 return Res;
565 }
566
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000567 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
568 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000569 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000570 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000571 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000572 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000573 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000574 Op->Reg.IsForcedVOP3 = ForceVOP3;
575 Op->StartLoc = S;
576 Op->EndLoc = E;
577 return Op;
578 }
579
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000580 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
581 const class MCExpr *Expr, SMLoc S) {
582 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000583 Op->Expr = Expr;
584 Op->StartLoc = S;
585 Op->EndLoc = S;
586 return Op;
587 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000588};
589
Sam Kolton945231a2016-06-10 09:57:59 +0000590raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
591 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
592 return OS;
593}
594
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000595//===----------------------------------------------------------------------===//
596// AsmParser
597//===----------------------------------------------------------------------===//
598
Tom Stellard45bb48e2015-06-13 03:28:10 +0000599class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000600 const MCInstrInfo &MII;
601 MCAsmParser &Parser;
602
603 unsigned ForcedEncodingSize;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000604 bool ForcedDPP;
605 bool ForcedSDWA;
Matt Arsenault68802d32015-11-05 03:11:27 +0000606
Tom Stellard45bb48e2015-06-13 03:28:10 +0000607 /// @name Auto-generated Match Functions
608 /// {
609
610#define GET_ASSEMBLER_HEADER
611#include "AMDGPUGenAsmMatcher.inc"
612
613 /// }
614
Tom Stellard347ac792015-06-26 21:15:07 +0000615private:
616 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
617 bool ParseDirectiveHSACodeObjectVersion();
618 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000619 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
620 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000621 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000622 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000623 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000624 bool ParseDirectiveAMDGPUHsaModuleGlobal();
625 bool ParseDirectiveAMDGPUHsaProgramGlobal();
626 bool ParseSectionDirectiveHSADataGlobalAgent();
627 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000628 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +0000629 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum);
630 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth);
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000631 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands, bool IsAtomic, bool IsAtomicReturn);
Tom Stellard347ac792015-06-26 21:15:07 +0000632
Tom Stellard45bb48e2015-06-13 03:28:10 +0000633public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000634 enum AMDGPUMatchResultTy {
635 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
636 };
637
Akira Hatanakab11ef082015-11-14 06:35:56 +0000638 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000639 const MCInstrInfo &MII,
640 const MCTargetOptions &Options)
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000641 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser),
Sam Kolton05ef1c92016-06-03 10:27:37 +0000642 ForcedEncodingSize(0),
643 ForcedDPP(false),
644 ForcedSDWA(false) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000645 MCAsmParserExtension::Initialize(Parser);
646
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000647 if (getSTI().getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000648 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000649 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000650 }
651
Akira Hatanakabd9fc282015-11-14 05:20:05 +0000652 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000653
654 {
655 // TODO: make those pre-defined variables read-only.
656 // Currently there is none suitable machinery in the core llvm-mc for this.
657 // MCSymbol::isRedefinable is intended for another purpose, and
658 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
659 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
660 MCContext &Ctx = getContext();
661 MCSymbol *Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
662 Sym->setVariableValue(MCConstantExpr::create(Isa.Major, Ctx));
663 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
664 Sym->setVariableValue(MCConstantExpr::create(Isa.Minor, Ctx));
665 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
666 Sym->setVariableValue(MCConstantExpr::create(Isa.Stepping, Ctx));
667 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000668 }
669
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000670 bool isSI() const {
671 return AMDGPU::isSI(getSTI());
672 }
673
674 bool isCI() const {
675 return AMDGPU::isCI(getSTI());
676 }
677
678 bool isVI() const {
679 return AMDGPU::isVI(getSTI());
680 }
681
Matt Arsenault26faed32016-12-05 22:26:17 +0000682 bool hasInv2PiInlineImm() const {
683 return getSTI().getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
684 }
685
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000686 bool hasSGPR102_SGPR103() const {
687 return !isVI();
688 }
689
Tom Stellard347ac792015-06-26 21:15:07 +0000690 AMDGPUTargetStreamer &getTargetStreamer() {
691 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
692 return static_cast<AMDGPUTargetStreamer &>(TS);
693 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000694
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000695 const MCRegisterInfo *getMRI() const {
696 // We need this const_cast because for some reason getContext() is not const
697 // in MCAsmParser.
698 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
699 }
700
701 const MCInstrInfo *getMII() const {
702 return &MII;
703 }
704
Sam Kolton05ef1c92016-06-03 10:27:37 +0000705 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
706 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
707 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000708
Sam Kolton05ef1c92016-06-03 10:27:37 +0000709 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
710 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
711 bool isForcedDPP() const { return ForcedDPP; }
712 bool isForcedSDWA() const { return ForcedSDWA; }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000713
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000714 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000715 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
716 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000717 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
718 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000719 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
720 OperandVector &Operands, MCStreamer &Out,
721 uint64_t &ErrorInfo,
722 bool MatchingInlineAsm) override;
723 bool ParseDirective(AsmToken DirectiveID) override;
724 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000725 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000726 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
727 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000728 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000729
Sam Kolton11de3702016-05-24 12:38:33 +0000730 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000731 OperandMatchResultTy
732 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
733 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
734 bool (*ConvertResult)(int64_t &) = nullptr);
735 OperandMatchResultTy
736 parseNamedBit(const char *Name, OperandVector &Operands,
737 enum AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
738 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
739 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000740
Sam Kolton1bdcef72016-05-23 09:59:02 +0000741 OperandMatchResultTy parseImm(OperandVector &Operands);
742 OperandMatchResultTy parseRegOrImm(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000743 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands);
744 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000745 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000746
Tom Stellard45bb48e2015-06-13 03:28:10 +0000747 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
748 void cvtDS(MCInst &Inst, const OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000749 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000750
751 bool parseCnt(int64_t &IntVal);
752 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000753 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000754
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000755private:
756 struct OperandInfoTy {
757 int64_t Id;
758 bool IsSymbolic;
759 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
760 };
Sam Kolton11de3702016-05-24 12:38:33 +0000761
Artem Tamazov6edc1352016-05-26 17:00:33 +0000762 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
763 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000764
765 void errorExpTgt();
766 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
767
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000768public:
Sam Kolton11de3702016-05-24 12:38:33 +0000769 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
770
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000771 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000772 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000773 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
774
Artem Tamazov8ce1f712016-05-19 12:22:39 +0000775 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
776 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
777 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +0000778 AMDGPUOperand::Ptr defaultGLC() const;
779 AMDGPUOperand::Ptr defaultSLC() const;
780 AMDGPUOperand::Ptr defaultTFE() const;
781
Sam Kolton5f10a132016-05-06 11:31:17 +0000782 AMDGPUOperand::Ptr defaultDMask() const;
783 AMDGPUOperand::Ptr defaultUNorm() const;
784 AMDGPUOperand::Ptr defaultDA() const;
785 AMDGPUOperand::Ptr defaultR128() const;
786 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000787 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
788 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +0000789 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000790 AMDGPUOperand::Ptr defaultExpTgt() const;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000791 AMDGPUOperand::Ptr defaultExpCompr() const;
792 AMDGPUOperand::Ptr defaultExpVM() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +0000793
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000794 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
795
Tom Stellarda90b9522016-02-11 03:28:15 +0000796 void cvtId(MCInst &Inst, const OperandVector &Operands);
797 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000798 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000799
800 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +0000801 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +0000802
Sam Kolton11de3702016-05-24 12:38:33 +0000803 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +0000804 AMDGPUOperand::Ptr defaultRowMask() const;
805 AMDGPUOperand::Ptr defaultBankMask() const;
806 AMDGPUOperand::Ptr defaultBoundCtrl() const;
807 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000808
Sam Kolton05ef1c92016-06-03 10:27:37 +0000809 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
810 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +0000811 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +0000812 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
813 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +0000814 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
815 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
816 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000817};
818
819struct OptionalOperand {
820 const char *Name;
821 AMDGPUOperand::ImmTy Type;
822 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000823 bool (*ConvertResult)(int64_t&);
824};
825
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000826} // end anonymous namespace
827
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000828// May be called with integer type with equivalent bitwidth.
829static const fltSemantics *getFltSemantics(MVT VT) {
830 switch (VT.getSizeInBits()) {
831 case 32:
832 return &APFloat::IEEEsingle;
833 case 64:
834 return &APFloat::IEEEdouble;
835 case 16:
836 return &APFloat::IEEEhalf;
837 default:
838 llvm_unreachable("unsupported fp type");
839 }
840}
841
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000842//===----------------------------------------------------------------------===//
843// Operand
844//===----------------------------------------------------------------------===//
845
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000846static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
847 bool Lost;
848
849 // Convert literal to single precision
850 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
851 APFloat::rmNearestTiesToEven,
852 &Lost);
853 // We allow precision lost but not overflow or underflow
854 if (Status != APFloat::opOK &&
855 Lost &&
856 ((Status & APFloat::opOverflow) != 0 ||
857 (Status & APFloat::opUnderflow) != 0)) {
858 return false;
859 }
860
861 return true;
862}
863
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000864bool AMDGPUOperand::isInlinableImm(MVT type) const {
865 if (!isImmTy(ImmTyNone)) {
866 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
867 return false;
868 }
869 // TODO: We should avoid using host float here. It would be better to
870 // check the float bit values which is what a few other places do.
871 // We've had bot failures before due to weird NaN support on mips hosts.
872
873 APInt Literal(64, Imm.Val);
874
875 if (Imm.IsFPImm) { // We got fp literal token
876 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +0000877 return AMDGPU::isInlinableLiteral64(Imm.Val,
878 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000879 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000880
881 APFloat FPLiteral(APFloat::IEEEdouble, APInt(64, Imm.Val));
882 if (!canLosslesslyConvertToFPType(FPLiteral, type))
883 return false;
884
885 // Check if single precision literal is inlinable
886 return AMDGPU::isInlinableLiteral32(
887 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +0000888 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000889 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000890
891
892 // We got int literal token.
893 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +0000894 return AMDGPU::isInlinableLiteral64(Imm.Val,
895 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000896 }
897
898 return AMDGPU::isInlinableLiteral32(
899 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +0000900 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000901}
902
903bool AMDGPUOperand::isLiteralImm(MVT type) const {
904 // Check that this imediate can be added as literal
905 if (!isImmTy(ImmTyNone)) {
906 return false;
907 }
908
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000909 if (!Imm.IsFPImm) {
910 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000911
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000912 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
913 // types.
914 return isUInt<32>(Imm.Val) || isInt<32>(Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000915 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +0000916
917 // We got fp literal token
918 if (type == MVT::f64) { // Expected 64-bit fp operand
919 // We would set low 64-bits of literal to zeroes but we accept this literals
920 return true;
921 }
922
923 if (type == MVT::i64) { // Expected 64-bit int operand
924 // We don't allow fp literals in 64-bit integer instructions. It is
925 // unclear how we should encode them.
926 return false;
927 }
928
929 APFloat FPLiteral(APFloat::IEEEdouble, APInt(64, Imm.Val));
930 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000931}
932
933bool AMDGPUOperand::isRegClass(unsigned RCID) const {
934 return isReg() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
935}
936
937void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
938 int64_t Val = Imm.Val;
939 if (isImmTy(ImmTyNone) && ApplyModifiers && Imm.Mods.hasFPModifiers() && Imm.Mods.Neg) {
940 // Apply modifiers to immediate value. Only negate can get here
941 if (Imm.IsFPImm) {
942 APFloat F(BitsToDouble(Val));
943 F.changeSign();
944 Val = F.bitcastToAPInt().getZExtValue();
945 } else {
946 Val = -Val;
947 }
948 }
949
950 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()), Inst.getNumOperands())) {
951 addLiteralImmOperand(Inst, Val);
952 } else {
953 Inst.addOperand(MCOperand::createImm(Val));
954 }
955}
956
957void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val) const {
958 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
959 auto OpNum = Inst.getNumOperands();
960 // Check that this operand accepts literals
961 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
962
963 APInt Literal(64, Val);
964 auto OpSize = AMDGPU::getRegOperandSize(AsmParser->getMRI(), InstDesc, OpNum); // expected operand size
965
966 if (Imm.IsFPImm) { // We got fp literal token
967 if (OpSize == 8) { // Expected 64-bit operand
968 // Check if literal is inlinable
Matt Arsenault26faed32016-12-05 22:26:17 +0000969 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
970 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000971 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
972 } else if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
973 // For fp operands we check if low 32 bits are zeros
974 if (Literal.getLoBits(32) != 0) {
975 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
976 "Can't encode literal as exact 64-bit"
977 " floating-point operand. Low 32-bits will be"
978 " set to zero");
979 }
980 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
981 } else {
982 // We don't allow fp literals in 64-bit integer instructions. It is
983 // unclear how we should encode them. This case should be checked earlier
984 // in predicate methods (isLiteralImm())
985 llvm_unreachable("fp literal in 64-bit integer instruction.");
986 }
987 } else { // Expected 32-bit operand
988 bool lost;
989 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
990 // Convert literal to single precision
991 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
992 // We allow precision lost but not overflow or underflow. This should be
993 // checked earlier in isLiteralImm()
994 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
995 }
996 } else { // We got int literal token
997 if (OpSize == 8) { // Expected 64-bit operand
998 auto LiteralVal = Literal.getZExtValue();
Matt Arsenault26faed32016-12-05 22:26:17 +0000999 if (AMDGPU::isInlinableLiteral64(LiteralVal,
1000 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001001 Inst.addOperand(MCOperand::createImm(LiteralVal));
1002 return;
1003 }
1004 } else { // Expected 32-bit operand
1005 auto LiteralVal = static_cast<int32_t>(Literal.getLoBits(32).getZExtValue());
Matt Arsenault26faed32016-12-05 22:26:17 +00001006 if (AMDGPU::isInlinableLiteral32(LiteralVal,
1007 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001008 Inst.addOperand(MCOperand::createImm(LiteralVal));
1009 return;
1010 }
1011 }
1012 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
1013 }
1014}
1015
1016void AMDGPUOperand::addKImmFP32Operands(MCInst &Inst, unsigned N) const {
1017 APInt Literal(64, Imm.Val);
1018 if (Imm.IsFPImm) { // We got fp literal
1019 bool lost;
1020 APFloat FPLiteral(APFloat::IEEEdouble, Literal);
1021 FPLiteral.convert(APFloat::IEEEsingle, APFloat::rmNearestTiesToEven, &lost);
1022 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
1023 } else { // We got int literal token
1024 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(32).getZExtValue()));
1025 }
1026}
1027
1028void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1029 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1030}
1031
1032//===----------------------------------------------------------------------===//
1033// AsmParser
1034//===----------------------------------------------------------------------===//
1035
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001036static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1037 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001038 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001039 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001040 case 1: return AMDGPU::VGPR_32RegClassID;
1041 case 2: return AMDGPU::VReg_64RegClassID;
1042 case 3: return AMDGPU::VReg_96RegClassID;
1043 case 4: return AMDGPU::VReg_128RegClassID;
1044 case 8: return AMDGPU::VReg_256RegClassID;
1045 case 16: return AMDGPU::VReg_512RegClassID;
1046 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001047 } else if (Is == IS_TTMP) {
1048 switch (RegWidth) {
1049 default: return -1;
1050 case 1: return AMDGPU::TTMP_32RegClassID;
1051 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001052 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001053 }
1054 } else if (Is == IS_SGPR) {
1055 switch (RegWidth) {
1056 default: return -1;
1057 case 1: return AMDGPU::SGPR_32RegClassID;
1058 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001059 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001060 case 8: return AMDGPU::SReg_256RegClassID;
1061 case 16: return AMDGPU::SReg_512RegClassID;
1062 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001063 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001064 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001065}
1066
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001067static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001068 return StringSwitch<unsigned>(RegName)
1069 .Case("exec", AMDGPU::EXEC)
1070 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001071 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001072 .Case("m0", AMDGPU::M0)
1073 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001074 .Case("tba", AMDGPU::TBA)
1075 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001076 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1077 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001078 .Case("vcc_lo", AMDGPU::VCC_LO)
1079 .Case("vcc_hi", AMDGPU::VCC_HI)
1080 .Case("exec_lo", AMDGPU::EXEC_LO)
1081 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001082 .Case("tma_lo", AMDGPU::TMA_LO)
1083 .Case("tma_hi", AMDGPU::TMA_HI)
1084 .Case("tba_lo", AMDGPU::TBA_LO)
1085 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001086 .Default(0);
1087}
1088
1089bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001090 auto R = parseRegister();
1091 if (!R) return true;
1092 assert(R->isReg());
1093 RegNo = R->getReg();
1094 StartLoc = R->getStartLoc();
1095 EndLoc = R->getEndLoc();
1096 return false;
1097}
1098
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001099bool AMDGPUAsmParser::AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth, RegisterKind RegKind, unsigned Reg1, unsigned RegNum)
1100{
1101 switch (RegKind) {
1102 case IS_SPECIAL:
1103 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) { Reg = AMDGPU::EXEC; RegWidth = 2; return true; }
1104 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) { Reg = AMDGPU::FLAT_SCR; RegWidth = 2; return true; }
1105 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) { Reg = AMDGPU::VCC; RegWidth = 2; return true; }
1106 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) { Reg = AMDGPU::TBA; RegWidth = 2; return true; }
1107 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) { Reg = AMDGPU::TMA; RegWidth = 2; return true; }
1108 return false;
1109 case IS_VGPR:
1110 case IS_SGPR:
1111 case IS_TTMP:
1112 if (Reg1 != Reg + RegWidth) { return false; }
1113 RegWidth++;
1114 return true;
1115 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001116 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001117 }
1118}
1119
1120bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg, unsigned& RegNum, unsigned& RegWidth)
1121{
1122 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1123 if (getLexer().is(AsmToken::Identifier)) {
1124 StringRef RegName = Parser.getTok().getString();
1125 if ((Reg = getSpecialRegForName(RegName))) {
1126 Parser.Lex();
1127 RegKind = IS_SPECIAL;
1128 } else {
1129 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001130 if (RegName[0] == 'v') {
1131 RegNumIndex = 1;
1132 RegKind = IS_VGPR;
1133 } else if (RegName[0] == 's') {
1134 RegNumIndex = 1;
1135 RegKind = IS_SGPR;
1136 } else if (RegName.startswith("ttmp")) {
1137 RegNumIndex = strlen("ttmp");
1138 RegKind = IS_TTMP;
1139 } else {
1140 return false;
1141 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001142 if (RegName.size() > RegNumIndex) {
1143 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001144 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1145 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001146 Parser.Lex();
1147 RegWidth = 1;
1148 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001149 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001150 Parser.Lex();
1151 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001152 if (getLexer().isNot(AsmToken::LBrac))
1153 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001154 Parser.Lex();
1155
Artem Tamazovf88397c2016-06-03 14:41:17 +00001156 if (getParser().parseAbsoluteExpression(RegLo))
1157 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001158
Artem Tamazov7da9b822016-05-27 12:50:13 +00001159 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001160 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1161 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001162 Parser.Lex();
1163
Artem Tamazov7da9b822016-05-27 12:50:13 +00001164 if (isRBrace) {
1165 RegHi = RegLo;
1166 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001167 if (getParser().parseAbsoluteExpression(RegHi))
1168 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001169
Artem Tamazovf88397c2016-06-03 14:41:17 +00001170 if (getLexer().isNot(AsmToken::RBrac))
1171 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001172 Parser.Lex();
1173 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001174 RegNum = (unsigned) RegLo;
1175 RegWidth = (RegHi - RegLo) + 1;
1176 }
1177 }
1178 } else if (getLexer().is(AsmToken::LBrac)) {
1179 // List of consecutive registers: [s0,s1,s2,s3]
1180 Parser.Lex();
Artem Tamazovf88397c2016-06-03 14:41:17 +00001181 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth))
1182 return false;
1183 if (RegWidth != 1)
1184 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001185 RegisterKind RegKind1;
1186 unsigned Reg1, RegNum1, RegWidth1;
1187 do {
1188 if (getLexer().is(AsmToken::Comma)) {
1189 Parser.Lex();
1190 } else if (getLexer().is(AsmToken::RBrac)) {
1191 Parser.Lex();
1192 break;
1193 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001194 if (RegWidth1 != 1) {
1195 return false;
1196 }
1197 if (RegKind1 != RegKind) {
1198 return false;
1199 }
1200 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1201 return false;
1202 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001203 } else {
1204 return false;
1205 }
1206 } while (true);
1207 } else {
1208 return false;
1209 }
1210 switch (RegKind) {
1211 case IS_SPECIAL:
1212 RegNum = 0;
1213 RegWidth = 1;
1214 break;
1215 case IS_VGPR:
1216 case IS_SGPR:
1217 case IS_TTMP:
1218 {
1219 unsigned Size = 1;
1220 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
1221 // SGPR and TTMP registers must be are aligned. Max required alignment is 4 dwords.
1222 Size = std::min(RegWidth, 4u);
1223 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001224 if (RegNum % Size != 0)
1225 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001226 RegNum = RegNum / Size;
1227 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001228 if (RCID == -1)
1229 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001230 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001231 if (RegNum >= RC.getNumRegs())
1232 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001233 Reg = RC.getRegister(RegNum);
1234 break;
1235 }
1236
1237 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001238 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001239 }
1240
Artem Tamazovf88397c2016-06-03 14:41:17 +00001241 if (!subtargetHasRegister(*TRI, Reg))
1242 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001243 return true;
1244}
1245
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001246std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001247 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001248 SMLoc StartLoc = Tok.getLoc();
1249 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001250 RegisterKind RegKind;
1251 unsigned Reg, RegNum, RegWidth;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001252
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001253 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth)) {
1254 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001255 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001256 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001257}
1258
Alex Bradbury58eba092016-11-01 16:32:05 +00001259OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001260AMDGPUAsmParser::parseImm(OperandVector &Operands) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001261 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001262 bool Minus = false;
1263 if (getLexer().getKind() == AsmToken::Minus) {
1264 Minus = true;
1265 Parser.Lex();
1266 }
1267
1268 SMLoc S = Parser.getTok().getLoc();
1269 switch(getLexer().getKind()) {
1270 case AsmToken::Integer: {
1271 int64_t IntVal;
1272 if (getParser().parseAbsoluteExpression(IntVal))
1273 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001274 if (Minus)
1275 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001276 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001277 return MatchOperand_Success;
1278 }
1279 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001280 int64_t IntVal;
1281 if (getParser().parseAbsoluteExpression(IntVal))
1282 return MatchOperand_ParseFail;
1283
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001284 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001285 if (Minus)
1286 F.changeSign();
1287 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001288 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001289 AMDGPUOperand::ImmTyNone, true));
1290 return MatchOperand_Success;
1291 }
1292 default:
1293 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1294 }
1295}
1296
Alex Bradbury58eba092016-11-01 16:32:05 +00001297OperandMatchResultTy
Sam Kolton1bdcef72016-05-23 09:59:02 +00001298AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands) {
1299 auto res = parseImm(Operands);
1300 if (res != MatchOperand_NoMatch) {
1301 return res;
1302 }
1303
1304 if (auto R = parseRegister()) {
1305 assert(R->isReg());
1306 R->Reg.IsForcedVOP3 = isForcedVOP3();
1307 Operands.push_back(std::move(R));
1308 return MatchOperand_Success;
1309 }
1310 return MatchOperand_ParseFail;
1311}
1312
Alex Bradbury58eba092016-11-01 16:32:05 +00001313OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001314AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands) {
Matt Arsenault37fefd62016-06-10 02:18:02 +00001315 // XXX: During parsing we can't determine if minus sign means
Sam Kolton1bdcef72016-05-23 09:59:02 +00001316 // negate-modifier or negative immediate value.
1317 // By default we suppose it is modifier.
1318 bool Negate = false, Abs = false, Abs2 = false;
1319
1320 if (getLexer().getKind()== AsmToken::Minus) {
1321 Parser.Lex();
1322 Negate = true;
1323 }
1324
1325 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "abs") {
1326 Parser.Lex();
1327 Abs2 = true;
1328 if (getLexer().isNot(AsmToken::LParen)) {
1329 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1330 return MatchOperand_ParseFail;
1331 }
1332 Parser.Lex();
1333 }
1334
1335 if (getLexer().getKind() == AsmToken::Pipe) {
1336 if (Abs2) {
1337 Error(Parser.getTok().getLoc(), "expected register or immediate");
1338 return MatchOperand_ParseFail;
1339 }
1340 Parser.Lex();
1341 Abs = true;
1342 }
1343
1344 auto Res = parseRegOrImm(Operands);
1345 if (Res != MatchOperand_Success) {
1346 return Res;
1347 }
1348
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001349 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001350 if (Negate) {
Sam Kolton945231a2016-06-10 09:57:59 +00001351 Mods.Neg = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001352 }
1353 if (Abs) {
1354 if (getLexer().getKind() != AsmToken::Pipe) {
1355 Error(Parser.getTok().getLoc(), "expected vertical bar");
1356 return MatchOperand_ParseFail;
1357 }
1358 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001359 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001360 }
1361 if (Abs2) {
1362 if (getLexer().isNot(AsmToken::RParen)) {
1363 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1364 return MatchOperand_ParseFail;
1365 }
1366 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001367 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001368 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001369
Sam Kolton945231a2016-06-10 09:57:59 +00001370 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001371 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001372 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001373 }
1374 return MatchOperand_Success;
1375}
1376
Alex Bradbury58eba092016-11-01 16:32:05 +00001377OperandMatchResultTy
Sam Kolton945231a2016-06-10 09:57:59 +00001378AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands) {
1379 bool Sext = false;
1380
1381 if (getLexer().getKind() == AsmToken::Identifier && Parser.getTok().getString() == "sext") {
1382 Parser.Lex();
1383 Sext = true;
1384 if (getLexer().isNot(AsmToken::LParen)) {
1385 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1386 return MatchOperand_ParseFail;
1387 }
1388 Parser.Lex();
1389 }
1390
1391 auto Res = parseRegOrImm(Operands);
1392 if (Res != MatchOperand_Success) {
1393 return Res;
1394 }
1395
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001396 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00001397 if (Sext) {
1398 if (getLexer().isNot(AsmToken::RParen)) {
1399 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1400 return MatchOperand_ParseFail;
1401 }
1402 Parser.Lex();
1403 Mods.Sext = true;
1404 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001405
Sam Kolton945231a2016-06-10 09:57:59 +00001406 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001407 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001408 Op.setModifiers(Mods);
1409 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001410
Sam Kolton945231a2016-06-10 09:57:59 +00001411 return MatchOperand_Success;
1412}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001413
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001414OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1415 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1416 if (Reg) {
1417 Operands.push_back(std::move(Reg));
1418 return MatchOperand_Success;
1419 }
1420
1421 const AsmToken &Tok = Parser.getTok();
1422 if (Tok.getString() == "off") {
1423 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1424 AMDGPUOperand::ImmTyOff, false));
1425 Parser.Lex();
1426 return MatchOperand_Success;
1427 }
1428
1429 return MatchOperand_NoMatch;
1430}
1431
Tom Stellard45bb48e2015-06-13 03:28:10 +00001432unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
1433
1434 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1435
1436 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001437 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1438 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1439 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001440 return Match_InvalidOperand;
1441
Tom Stellard88e0b252015-10-06 15:57:53 +00001442 if ((TSFlags & SIInstrFlags::VOP3) &&
1443 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1444 getForcedEncodingSize() != 64)
1445 return Match_PreferE32;
1446
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001447 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa ||
1448 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001449 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001450 auto OpNum =
1451 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001452 const auto &Op = Inst.getOperand(OpNum);
1453 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1454 return Match_InvalidOperand;
1455 }
1456 }
1457
Tom Stellard45bb48e2015-06-13 03:28:10 +00001458 return Match_Success;
1459}
1460
Tom Stellard45bb48e2015-06-13 03:28:10 +00001461bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1462 OperandVector &Operands,
1463 MCStreamer &Out,
1464 uint64_t &ErrorInfo,
1465 bool MatchingInlineAsm) {
Sam Koltond63d8a72016-09-09 09:37:51 +00001466 // What asm variants we should check
1467 std::vector<unsigned> MatchedVariants;
1468 if (getForcedEncodingSize() == 32) {
1469 MatchedVariants = {AMDGPUAsmVariants::DEFAULT};
1470 } else if (isForcedVOP3()) {
1471 MatchedVariants = {AMDGPUAsmVariants::VOP3};
1472 } else if (isForcedSDWA()) {
1473 MatchedVariants = {AMDGPUAsmVariants::SDWA};
1474 } else if (isForcedDPP()) {
1475 MatchedVariants = {AMDGPUAsmVariants::DPP};
1476 } else {
1477 MatchedVariants = {AMDGPUAsmVariants::DEFAULT,
1478 AMDGPUAsmVariants::VOP3,
1479 AMDGPUAsmVariants::SDWA,
1480 AMDGPUAsmVariants::DPP};
1481 }
1482
Tom Stellard45bb48e2015-06-13 03:28:10 +00001483 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00001484 unsigned Result = Match_Success;
1485 for (auto Variant : MatchedVariants) {
1486 uint64_t EI;
1487 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
1488 Variant);
1489 // We order match statuses from least to most specific. We use most specific
1490 // status as resulting
1491 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
1492 if ((R == Match_Success) ||
1493 (R == Match_PreferE32) ||
1494 (R == Match_MissingFeature && Result != Match_PreferE32) ||
1495 (R == Match_InvalidOperand && Result != Match_MissingFeature
1496 && Result != Match_PreferE32) ||
1497 (R == Match_MnemonicFail && Result != Match_InvalidOperand
1498 && Result != Match_MissingFeature
1499 && Result != Match_PreferE32)) {
1500 Result = R;
1501 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001502 }
Sam Koltond63d8a72016-09-09 09:37:51 +00001503 if (R == Match_Success)
1504 break;
1505 }
1506
1507 switch (Result) {
1508 default: break;
1509 case Match_Success:
1510 Inst.setLoc(IDLoc);
1511 Out.EmitInstruction(Inst, getSTI());
1512 return false;
1513
1514 case Match_MissingFeature:
1515 return Error(IDLoc, "instruction not supported on this GPU");
1516
1517 case Match_MnemonicFail:
1518 return Error(IDLoc, "unrecognized instruction mnemonic");
1519
1520 case Match_InvalidOperand: {
1521 SMLoc ErrorLoc = IDLoc;
1522 if (ErrorInfo != ~0ULL) {
1523 if (ErrorInfo >= Operands.size()) {
1524 return Error(IDLoc, "too few operands for instruction");
1525 }
1526 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
1527 if (ErrorLoc == SMLoc())
1528 ErrorLoc = IDLoc;
1529 }
1530 return Error(ErrorLoc, "invalid operand for instruction");
1531 }
1532
1533 case Match_PreferE32:
1534 return Error(IDLoc, "internal error: instruction without _e64 suffix "
1535 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00001536 }
1537 llvm_unreachable("Implement any new match types added!");
1538}
1539
Tom Stellard347ac792015-06-26 21:15:07 +00001540bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
1541 uint32_t &Minor) {
1542 if (getLexer().isNot(AsmToken::Integer))
1543 return TokError("invalid major version");
1544
1545 Major = getLexer().getTok().getIntVal();
1546 Lex();
1547
1548 if (getLexer().isNot(AsmToken::Comma))
1549 return TokError("minor version number required, comma expected");
1550 Lex();
1551
1552 if (getLexer().isNot(AsmToken::Integer))
1553 return TokError("invalid minor version");
1554
1555 Minor = getLexer().getTok().getIntVal();
1556 Lex();
1557
1558 return false;
1559}
1560
1561bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
1562
1563 uint32_t Major;
1564 uint32_t Minor;
1565
1566 if (ParseDirectiveMajorMinor(Major, Minor))
1567 return true;
1568
1569 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
1570 return false;
1571}
1572
1573bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00001574 uint32_t Major;
1575 uint32_t Minor;
1576 uint32_t Stepping;
1577 StringRef VendorName;
1578 StringRef ArchName;
1579
1580 // If this directive has no arguments, then use the ISA version for the
1581 // targeted GPU.
1582 if (getLexer().is(AsmToken::EndOfStatement)) {
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001583 AMDGPU::IsaVersion Isa = AMDGPU::getIsaVersion(getSTI().getFeatureBits());
Tom Stellard347ac792015-06-26 21:15:07 +00001584 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Isa.Major, Isa.Minor,
1585 Isa.Stepping,
1586 "AMD", "AMDGPU");
1587 return false;
1588 }
1589
Tom Stellard347ac792015-06-26 21:15:07 +00001590 if (ParseDirectiveMajorMinor(Major, Minor))
1591 return true;
1592
1593 if (getLexer().isNot(AsmToken::Comma))
1594 return TokError("stepping version number required, comma expected");
1595 Lex();
1596
1597 if (getLexer().isNot(AsmToken::Integer))
1598 return TokError("invalid stepping version");
1599
1600 Stepping = getLexer().getTok().getIntVal();
1601 Lex();
1602
1603 if (getLexer().isNot(AsmToken::Comma))
1604 return TokError("vendor name required, comma expected");
1605 Lex();
1606
1607 if (getLexer().isNot(AsmToken::String))
1608 return TokError("invalid vendor name");
1609
1610 VendorName = getLexer().getTok().getStringContents();
1611 Lex();
1612
1613 if (getLexer().isNot(AsmToken::Comma))
1614 return TokError("arch name required, comma expected");
1615 Lex();
1616
1617 if (getLexer().isNot(AsmToken::String))
1618 return TokError("invalid arch name");
1619
1620 ArchName = getLexer().getTok().getStringContents();
1621 Lex();
1622
1623 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
1624 VendorName, ArchName);
1625 return false;
1626}
1627
Tom Stellardff7416b2015-06-26 21:58:31 +00001628bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
1629 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001630 SmallString<40> ErrStr;
1631 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00001632 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00001633 return TokError(Err.str());
1634 }
Tom Stellardff7416b2015-06-26 21:58:31 +00001635 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00001636 return false;
1637}
1638
1639bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00001640 amd_kernel_code_t Header;
Akira Hatanakabd9fc282015-11-14 05:20:05 +00001641 AMDGPU::initDefaultAMDKernelCodeT(Header, getSTI().getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00001642
1643 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00001644 // Lex EndOfStatement. This is in a while loop, because lexing a comment
1645 // will set the current token to EndOfStatement.
1646 while(getLexer().is(AsmToken::EndOfStatement))
1647 Lex();
1648
1649 if (getLexer().isNot(AsmToken::Identifier))
1650 return TokError("expected value identifier or .end_amd_kernel_code_t");
1651
1652 StringRef ID = getLexer().getTok().getIdentifier();
1653 Lex();
1654
1655 if (ID == ".end_amd_kernel_code_t")
1656 break;
1657
1658 if (ParseAMDKernelCodeTValue(ID, Header))
1659 return true;
1660 }
1661
1662 getTargetStreamer().EmitAMDKernelCodeT(Header);
1663
1664 return false;
1665}
1666
Tom Stellarde135ffd2015-09-25 21:41:28 +00001667bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
1668 getParser().getStreamer().SwitchSection(
1669 AMDGPU::getHSATextSection(getContext()));
1670 return false;
1671}
1672
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001673bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
1674 if (getLexer().isNot(AsmToken::Identifier))
1675 return TokError("expected symbol name");
1676
1677 StringRef KernelName = Parser.getTok().getString();
1678
1679 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
1680 ELF::STT_AMDGPU_HSA_KERNEL);
1681 Lex();
1682 return false;
1683}
1684
Tom Stellard00f2f912015-12-02 19:47:57 +00001685bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
1686 if (getLexer().isNot(AsmToken::Identifier))
1687 return TokError("expected symbol name");
1688
1689 StringRef GlobalName = Parser.getTok().getIdentifier();
1690
1691 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
1692 Lex();
1693 return false;
1694}
1695
1696bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
1697 if (getLexer().isNot(AsmToken::Identifier))
1698 return TokError("expected symbol name");
1699
1700 StringRef GlobalName = Parser.getTok().getIdentifier();
1701
1702 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
1703 Lex();
1704 return false;
1705}
1706
1707bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
1708 getParser().getStreamer().SwitchSection(
1709 AMDGPU::getHSADataGlobalAgentSection(getContext()));
1710 return false;
1711}
1712
1713bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
1714 getParser().getStreamer().SwitchSection(
1715 AMDGPU::getHSADataGlobalProgramSection(getContext()));
1716 return false;
1717}
1718
Tom Stellard9760f032015-12-03 03:34:32 +00001719bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
1720 getParser().getStreamer().SwitchSection(
1721 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
1722 return false;
1723}
1724
Tom Stellard45bb48e2015-06-13 03:28:10 +00001725bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00001726 StringRef IDVal = DirectiveID.getString();
1727
1728 if (IDVal == ".hsa_code_object_version")
1729 return ParseDirectiveHSACodeObjectVersion();
1730
1731 if (IDVal == ".hsa_code_object_isa")
1732 return ParseDirectiveHSACodeObjectISA();
1733
Tom Stellardff7416b2015-06-26 21:58:31 +00001734 if (IDVal == ".amd_kernel_code_t")
1735 return ParseDirectiveAMDKernelCodeT();
1736
Tom Stellardfcfaea42016-05-05 17:03:33 +00001737 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00001738 return ParseSectionDirectiveHSAText();
1739
Tom Stellard1e1b05d2015-11-06 11:45:14 +00001740 if (IDVal == ".amdgpu_hsa_kernel")
1741 return ParseDirectiveAMDGPUHsaKernel();
1742
Tom Stellard00f2f912015-12-02 19:47:57 +00001743 if (IDVal == ".amdgpu_hsa_module_global")
1744 return ParseDirectiveAMDGPUHsaModuleGlobal();
1745
1746 if (IDVal == ".amdgpu_hsa_program_global")
1747 return ParseDirectiveAMDGPUHsaProgramGlobal();
1748
1749 if (IDVal == ".hsadata_global_agent")
1750 return ParseSectionDirectiveHSADataGlobalAgent();
1751
1752 if (IDVal == ".hsadata_global_program")
1753 return ParseSectionDirectiveHSADataGlobalProgram();
1754
Tom Stellard9760f032015-12-03 03:34:32 +00001755 if (IDVal == ".hsarodata_readonly_agent")
1756 return ParseSectionDirectiveHSARodataReadonlyAgent();
1757
Tom Stellard45bb48e2015-06-13 03:28:10 +00001758 return true;
1759}
1760
Matt Arsenault68802d32015-11-05 03:11:27 +00001761bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
1762 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00001763 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00001764 return true;
1765
Matt Arsenault3b159672015-12-01 20:31:08 +00001766 if (isSI()) {
1767 // No flat_scr
1768 switch (RegNo) {
1769 case AMDGPU::FLAT_SCR:
1770 case AMDGPU::FLAT_SCR_LO:
1771 case AMDGPU::FLAT_SCR_HI:
1772 return false;
1773 default:
1774 return true;
1775 }
1776 }
1777
Matt Arsenault68802d32015-11-05 03:11:27 +00001778 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
1779 // SI/CI have.
1780 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
1781 R.isValid(); ++R) {
1782 if (*R == RegNo)
1783 return false;
1784 }
1785
1786 return true;
1787}
1788
Alex Bradbury58eba092016-11-01 16:32:05 +00001789OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001790AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
1791
1792 // Try to parse with a custom parser
1793 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
1794
1795 // If we successfully parsed the operand or if there as an error parsing,
1796 // we are done.
1797 //
1798 // If we are parsing after we reach EndOfStatement then this means we
1799 // are appending default values to the Operands list. This is only done
1800 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001801 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00001802 getLexer().is(AsmToken::EndOfStatement))
1803 return ResTy;
1804
Sam Kolton1bdcef72016-05-23 09:59:02 +00001805 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00001806
Sam Kolton1bdcef72016-05-23 09:59:02 +00001807 if (ResTy == MatchOperand_Success)
1808 return ResTy;
1809
1810 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00001811 // If this identifier is a symbol, we want to create an expression for it.
1812 // It is a little difficult to distinguish between a symbol name, and
1813 // an instruction flag like 'gds'. In order to do this, we parse
1814 // all tokens as expressions and then treate the symbol name as the token
1815 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00001816 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00001817 SMLoc S = Tok.getLoc();
1818 const MCExpr *Expr = nullptr;
1819 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001820 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00001821 return MatchOperand_Success;
1822 }
1823
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001824 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001825 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001826 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001827 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001828 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001829}
1830
Sam Kolton05ef1c92016-06-03 10:27:37 +00001831StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
1832 // Clear any forced encodings from the previous instruction.
1833 setForcedEncodingSize(0);
1834 setForcedDPP(false);
1835 setForcedSDWA(false);
1836
1837 if (Name.endswith("_e64")) {
1838 setForcedEncodingSize(64);
1839 return Name.substr(0, Name.size() - 4);
1840 } else if (Name.endswith("_e32")) {
1841 setForcedEncodingSize(32);
1842 return Name.substr(0, Name.size() - 4);
1843 } else if (Name.endswith("_dpp")) {
1844 setForcedDPP(true);
1845 return Name.substr(0, Name.size() - 4);
1846 } else if (Name.endswith("_sdwa")) {
1847 setForcedSDWA(true);
1848 return Name.substr(0, Name.size() - 5);
1849 }
1850 return Name;
1851}
1852
Tom Stellard45bb48e2015-06-13 03:28:10 +00001853bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
1854 StringRef Name,
1855 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001856 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00001857 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001858 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00001859
Tom Stellard45bb48e2015-06-13 03:28:10 +00001860 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00001861 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001862
1863 // Eat the comma or space if there is one.
1864 if (getLexer().is(AsmToken::Comma))
1865 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00001866
Tom Stellard45bb48e2015-06-13 03:28:10 +00001867 switch (Res) {
1868 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001869 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001870 Error(getLexer().getLoc(), "failed parsing operand.");
1871 while (!getLexer().is(AsmToken::EndOfStatement)) {
1872 Parser.Lex();
1873 }
1874 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001875 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00001876 Error(getLexer().getLoc(), "not a valid operand.");
1877 while (!getLexer().is(AsmToken::EndOfStatement)) {
1878 Parser.Lex();
1879 }
1880 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001881 }
1882 }
1883
Tom Stellard45bb48e2015-06-13 03:28:10 +00001884 return false;
1885}
1886
1887//===----------------------------------------------------------------------===//
1888// Utility functions
1889//===----------------------------------------------------------------------===//
1890
Alex Bradbury58eba092016-11-01 16:32:05 +00001891OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00001892AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001893 switch(getLexer().getKind()) {
1894 default: return MatchOperand_NoMatch;
1895 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001896 StringRef Name = Parser.getTok().getString();
1897 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001898 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001899 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001900
1901 Parser.Lex();
1902 if (getLexer().isNot(AsmToken::Colon))
1903 return MatchOperand_ParseFail;
1904
1905 Parser.Lex();
1906 if (getLexer().isNot(AsmToken::Integer))
1907 return MatchOperand_ParseFail;
1908
1909 if (getParser().parseAbsoluteExpression(Int))
1910 return MatchOperand_ParseFail;
1911 break;
1912 }
1913 }
1914 return MatchOperand_Success;
1915}
1916
Alex Bradbury58eba092016-11-01 16:32:05 +00001917OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001918AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001919 enum AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001920 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001921 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001922 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001923
Alex Bradbury58eba092016-11-01 16:32:05 +00001924 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001925 if (Res != MatchOperand_Success)
1926 return Res;
1927
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001928 if (ConvertResult && !ConvertResult(Value)) {
1929 return MatchOperand_ParseFail;
1930 }
1931
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001932 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001933 return MatchOperand_Success;
1934}
1935
Alex Bradbury58eba092016-11-01 16:32:05 +00001936OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00001937AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Sam Kolton11de3702016-05-24 12:38:33 +00001938 enum AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001939 int64_t Bit = 0;
1940 SMLoc S = Parser.getTok().getLoc();
1941
1942 // We are at the end of the statement, and this is a default argument, so
1943 // use a default value.
1944 if (getLexer().isNot(AsmToken::EndOfStatement)) {
1945 switch(getLexer().getKind()) {
1946 case AsmToken::Identifier: {
1947 StringRef Tok = Parser.getTok().getString();
1948 if (Tok == Name) {
1949 Bit = 1;
1950 Parser.Lex();
1951 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
1952 Bit = 0;
1953 Parser.Lex();
1954 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00001955 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001956 }
1957 break;
1958 }
1959 default:
1960 return MatchOperand_NoMatch;
1961 }
1962 }
1963
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001964 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00001965 return MatchOperand_Success;
1966}
1967
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001968typedef std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
1969
Sam Koltona74cd522016-03-18 15:35:51 +00001970void addOptionalImmOperand(MCInst& Inst, const OperandVector& Operands,
1971 OptionalImmIndexMap& OptionalIdx,
Sam Koltondfa29f72016-03-09 12:29:31 +00001972 enum AMDGPUOperand::ImmTy ImmT, int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001973 auto i = OptionalIdx.find(ImmT);
1974 if (i != OptionalIdx.end()) {
1975 unsigned Idx = i->second;
1976 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
1977 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00001978 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00001979 }
1980}
1981
Alex Bradbury58eba092016-11-01 16:32:05 +00001982OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00001983AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00001984 if (getLexer().isNot(AsmToken::Identifier)) {
1985 return MatchOperand_NoMatch;
1986 }
1987 StringRef Tok = Parser.getTok().getString();
1988 if (Tok != Prefix) {
1989 return MatchOperand_NoMatch;
1990 }
1991
1992 Parser.Lex();
1993 if (getLexer().isNot(AsmToken::Colon)) {
1994 return MatchOperand_ParseFail;
1995 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001996
Sam Kolton3025e7f2016-04-26 13:33:56 +00001997 Parser.Lex();
1998 if (getLexer().isNot(AsmToken::Identifier)) {
1999 return MatchOperand_ParseFail;
2000 }
2001
2002 Value = Parser.getTok().getString();
2003 return MatchOperand_Success;
2004}
2005
Tom Stellard45bb48e2015-06-13 03:28:10 +00002006//===----------------------------------------------------------------------===//
2007// ds
2008//===----------------------------------------------------------------------===//
2009
Tom Stellard45bb48e2015-06-13 03:28:10 +00002010void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2011 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002012 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002013
2014 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2015 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2016
2017 // Add the register arguments
2018 if (Op.isReg()) {
2019 Op.addRegOperands(Inst, 1);
2020 continue;
2021 }
2022
2023 // Handle optional arguments
2024 OptionalIdx[Op.getImmTy()] = i;
2025 }
2026
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002027 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2028 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002029 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002030
Tom Stellard45bb48e2015-06-13 03:28:10 +00002031 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2032}
2033
2034void AMDGPUAsmParser::cvtDS(MCInst &Inst, const OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002035 std::map<enum AMDGPUOperand::ImmTy, unsigned> OptionalIdx;
2036 bool GDSOnly = false;
2037
2038 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2039 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2040
2041 // Add the register arguments
2042 if (Op.isReg()) {
2043 Op.addRegOperands(Inst, 1);
2044 continue;
2045 }
2046
2047 if (Op.isToken() && Op.getToken() == "gds") {
2048 GDSOnly = true;
2049 continue;
2050 }
2051
2052 // Handle optional arguments
2053 OptionalIdx[Op.getImmTy()] = i;
2054 }
2055
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002056 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
2057 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002058
2059 if (!GDSOnly) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002060 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002061 }
2062 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2063}
2064
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002065void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2066 OptionalImmIndexMap OptionalIdx;
2067
2068 unsigned EnMask = 0;
2069 int SrcIdx = 0;
2070
2071 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2072 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2073
2074 // Add the register arguments
2075 if (Op.isReg()) {
2076 EnMask |= (1 << SrcIdx);
2077 Op.addRegOperands(Inst, 1);
2078 ++SrcIdx;
2079 continue;
2080 }
2081
2082 if (Op.isOff()) {
2083 ++SrcIdx;
2084 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
2085 continue;
2086 }
2087
2088 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2089 Op.addImmOperands(Inst, 1);
2090 continue;
2091 }
2092
2093 if (Op.isToken() && Op.getToken() == "done")
2094 continue;
2095
2096 // Handle optional arguments
2097 OptionalIdx[Op.getImmTy()] = i;
2098 }
2099
2100 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2101 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2102
2103 Inst.addOperand(MCOperand::createImm(EnMask));
2104}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002105
2106//===----------------------------------------------------------------------===//
2107// s_waitcnt
2108//===----------------------------------------------------------------------===//
2109
2110bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2111 StringRef CntName = Parser.getTok().getString();
2112 int64_t CntVal;
2113
2114 Parser.Lex();
2115 if (getLexer().isNot(AsmToken::LParen))
2116 return true;
2117
2118 Parser.Lex();
2119 if (getLexer().isNot(AsmToken::Integer))
2120 return true;
2121
2122 if (getParser().parseAbsoluteExpression(CntVal))
2123 return true;
2124
2125 if (getLexer().isNot(AsmToken::RParen))
2126 return true;
2127
2128 Parser.Lex();
2129 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2130 Parser.Lex();
2131
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +00002132 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002133 if (CntName == "vmcnt")
2134 IntVal = encodeVmcnt(IV, IntVal, CntVal);
2135 else if (CntName == "expcnt")
2136 IntVal = encodeExpcnt(IV, IntVal, CntVal);
2137 else if (CntName == "lgkmcnt")
2138 IntVal = encodeLgkmcnt(IV, IntVal, CntVal);
2139 else
Tom Stellard45bb48e2015-06-13 03:28:10 +00002140 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002141
Tom Stellard45bb48e2015-06-13 03:28:10 +00002142 return false;
2143}
2144
Alex Bradbury58eba092016-11-01 16:32:05 +00002145OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002146AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002147 IsaVersion IV = getIsaVersion(getSTI().getFeatureBits());
2148 int64_t Waitcnt = getWaitcntBitMask(IV);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002149 SMLoc S = Parser.getTok().getLoc();
2150
2151 switch(getLexer().getKind()) {
2152 default: return MatchOperand_ParseFail;
2153 case AsmToken::Integer:
2154 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002155 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002156 return MatchOperand_ParseFail;
2157 break;
2158
2159 case AsmToken::Identifier:
2160 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002161 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002162 return MatchOperand_ParseFail;
2163 } while(getLexer().isNot(AsmToken::EndOfStatement));
2164 break;
2165 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002166 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002167 return MatchOperand_Success;
2168}
2169
Artem Tamazov6edc1352016-05-26 17:00:33 +00002170bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width) {
2171 using namespace llvm::AMDGPU::Hwreg;
2172
Artem Tamazovd6468662016-04-25 14:13:51 +00002173 if (Parser.getTok().getString() != "hwreg")
2174 return true;
2175 Parser.Lex();
2176
2177 if (getLexer().isNot(AsmToken::LParen))
2178 return true;
2179 Parser.Lex();
2180
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002181 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002182 HwReg.IsSymbolic = true;
2183 HwReg.Id = ID_UNKNOWN_;
2184 const StringRef tok = Parser.getTok().getString();
2185 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2186 if (tok == IdSymbolic[i]) {
2187 HwReg.Id = i;
2188 break;
2189 }
2190 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002191 Parser.Lex();
2192 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002193 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002194 if (getLexer().isNot(AsmToken::Integer))
2195 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002196 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002197 return true;
2198 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002199
2200 if (getLexer().is(AsmToken::RParen)) {
2201 Parser.Lex();
2202 return false;
2203 }
2204
2205 // optional params
2206 if (getLexer().isNot(AsmToken::Comma))
2207 return true;
2208 Parser.Lex();
2209
2210 if (getLexer().isNot(AsmToken::Integer))
2211 return true;
2212 if (getParser().parseAbsoluteExpression(Offset))
2213 return true;
2214
2215 if (getLexer().isNot(AsmToken::Comma))
2216 return true;
2217 Parser.Lex();
2218
2219 if (getLexer().isNot(AsmToken::Integer))
2220 return true;
2221 if (getParser().parseAbsoluteExpression(Width))
2222 return true;
2223
2224 if (getLexer().isNot(AsmToken::RParen))
2225 return true;
2226 Parser.Lex();
2227
2228 return false;
2229}
2230
Alex Bradbury58eba092016-11-01 16:32:05 +00002231OperandMatchResultTy
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002232AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002233 using namespace llvm::AMDGPU::Hwreg;
2234
Artem Tamazovd6468662016-04-25 14:13:51 +00002235 int64_t Imm16Val = 0;
2236 SMLoc S = Parser.getTok().getLoc();
2237
2238 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002239 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002240 case AsmToken::Integer:
2241 // The operand can be an integer value.
2242 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002243 return MatchOperand_NoMatch;
2244 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002245 Error(S, "invalid immediate: only 16-bit values are legal");
2246 // Do not return error code, but create an imm operand anyway and proceed
2247 // to the next operand, if any. That avoids unneccessary error messages.
2248 }
2249 break;
2250
2251 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002252 OperandInfoTy HwReg(ID_UNKNOWN_);
2253 int64_t Offset = OFFSET_DEFAULT_;
2254 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2255 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002256 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002257 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2258 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002259 Error(S, "invalid symbolic name of hardware register");
2260 else
2261 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002262 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002263 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002264 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002265 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002266 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002267 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002268 }
2269 break;
2270 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002271 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00002272 return MatchOperand_Success;
2273}
2274
Tom Stellard45bb48e2015-06-13 03:28:10 +00002275bool AMDGPUOperand::isSWaitCnt() const {
2276 return isImm();
2277}
2278
Artem Tamazovd6468662016-04-25 14:13:51 +00002279bool AMDGPUOperand::isHwreg() const {
2280 return isImmTy(ImmTyHwreg);
2281}
2282
Artem Tamazov6edc1352016-05-26 17:00:33 +00002283bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002284 using namespace llvm::AMDGPU::SendMsg;
2285
2286 if (Parser.getTok().getString() != "sendmsg")
2287 return true;
2288 Parser.Lex();
2289
2290 if (getLexer().isNot(AsmToken::LParen))
2291 return true;
2292 Parser.Lex();
2293
2294 if (getLexer().is(AsmToken::Identifier)) {
2295 Msg.IsSymbolic = true;
2296 Msg.Id = ID_UNKNOWN_;
2297 const std::string tok = Parser.getTok().getString();
2298 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
2299 switch(i) {
2300 default: continue; // Omit gaps.
2301 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
2302 }
2303 if (tok == IdSymbolic[i]) {
2304 Msg.Id = i;
2305 break;
2306 }
2307 }
2308 Parser.Lex();
2309 } else {
2310 Msg.IsSymbolic = false;
2311 if (getLexer().isNot(AsmToken::Integer))
2312 return true;
2313 if (getParser().parseAbsoluteExpression(Msg.Id))
2314 return true;
2315 if (getLexer().is(AsmToken::Integer))
2316 if (getParser().parseAbsoluteExpression(Msg.Id))
2317 Msg.Id = ID_UNKNOWN_;
2318 }
2319 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
2320 return false;
2321
2322 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
2323 if (getLexer().isNot(AsmToken::RParen))
2324 return true;
2325 Parser.Lex();
2326 return false;
2327 }
2328
2329 if (getLexer().isNot(AsmToken::Comma))
2330 return true;
2331 Parser.Lex();
2332
2333 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
2334 Operation.Id = ID_UNKNOWN_;
2335 if (getLexer().is(AsmToken::Identifier)) {
2336 Operation.IsSymbolic = true;
2337 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
2338 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
2339 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002340 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002341 for (int i = F; i < L; ++i) {
2342 if (Tok == S[i]) {
2343 Operation.Id = i;
2344 break;
2345 }
2346 }
2347 Parser.Lex();
2348 } else {
2349 Operation.IsSymbolic = false;
2350 if (getLexer().isNot(AsmToken::Integer))
2351 return true;
2352 if (getParser().parseAbsoluteExpression(Operation.Id))
2353 return true;
2354 }
2355
2356 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2357 // Stream id is optional.
2358 if (getLexer().is(AsmToken::RParen)) {
2359 Parser.Lex();
2360 return false;
2361 }
2362
2363 if (getLexer().isNot(AsmToken::Comma))
2364 return true;
2365 Parser.Lex();
2366
2367 if (getLexer().isNot(AsmToken::Integer))
2368 return true;
2369 if (getParser().parseAbsoluteExpression(StreamId))
2370 return true;
2371 }
2372
2373 if (getLexer().isNot(AsmToken::RParen))
2374 return true;
2375 Parser.Lex();
2376 return false;
2377}
2378
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002379void AMDGPUAsmParser::errorExpTgt() {
2380 Error(Parser.getTok().getLoc(), "invalid exp target");
2381}
2382
2383OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
2384 uint8_t &Val) {
2385 if (Str == "null") {
2386 Val = 9;
2387 return MatchOperand_Success;
2388 }
2389
2390 if (Str.startswith("mrt")) {
2391 Str = Str.drop_front(3);
2392 if (Str == "z") { // == mrtz
2393 Val = 8;
2394 return MatchOperand_Success;
2395 }
2396
2397 if (Str.getAsInteger(10, Val))
2398 return MatchOperand_ParseFail;
2399
2400 if (Val > 7)
2401 errorExpTgt();
2402
2403 return MatchOperand_Success;
2404 }
2405
2406 if (Str.startswith("pos")) {
2407 Str = Str.drop_front(3);
2408 if (Str.getAsInteger(10, Val))
2409 return MatchOperand_ParseFail;
2410
2411 if (Val > 3)
2412 errorExpTgt();
2413
2414 Val += 12;
2415 return MatchOperand_Success;
2416 }
2417
2418 if (Str.startswith("param")) {
2419 Str = Str.drop_front(5);
2420 if (Str.getAsInteger(10, Val))
2421 return MatchOperand_ParseFail;
2422
2423 if (Val >= 32)
2424 errorExpTgt();
2425
2426 Val += 32;
2427 return MatchOperand_Success;
2428 }
2429
2430 if (Str.startswith("invalid_target_")) {
2431 Str = Str.drop_front(15);
2432 if (Str.getAsInteger(10, Val))
2433 return MatchOperand_ParseFail;
2434
2435 errorExpTgt();
2436 return MatchOperand_Success;
2437 }
2438
2439 return MatchOperand_NoMatch;
2440}
2441
2442OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
2443 uint8_t Val;
2444 StringRef Str = Parser.getTok().getString();
2445
2446 auto Res = parseExpTgtImpl(Str, Val);
2447 if (Res != MatchOperand_Success)
2448 return Res;
2449
2450 SMLoc S = Parser.getTok().getLoc();
2451 Parser.Lex();
2452
2453 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
2454 AMDGPUOperand::ImmTyExpTgt));
2455 return MatchOperand_Success;
2456}
2457
Alex Bradbury58eba092016-11-01 16:32:05 +00002458OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002459AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
2460 using namespace llvm::AMDGPU::SendMsg;
2461
2462 int64_t Imm16Val = 0;
2463 SMLoc S = Parser.getTok().getLoc();
2464
2465 switch(getLexer().getKind()) {
2466 default:
2467 return MatchOperand_NoMatch;
2468 case AsmToken::Integer:
2469 // The operand can be an integer value.
2470 if (getParser().parseAbsoluteExpression(Imm16Val))
2471 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002472 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002473 Error(S, "invalid immediate: only 16-bit values are legal");
2474 // Do not return error code, but create an imm operand anyway and proceed
2475 // to the next operand, if any. That avoids unneccessary error messages.
2476 }
2477 break;
2478 case AsmToken::Identifier: {
2479 OperandInfoTy Msg(ID_UNKNOWN_);
2480 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00002481 int64_t StreamId = STREAM_ID_DEFAULT_;
2482 if (parseSendMsgConstruct(Msg, Operation, StreamId))
2483 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002484 do {
2485 // Validate and encode message ID.
2486 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
2487 || Msg.Id == ID_SYSMSG)) {
2488 if (Msg.IsSymbolic)
2489 Error(S, "invalid/unsupported symbolic name of message");
2490 else
2491 Error(S, "invalid/unsupported code of message");
2492 break;
2493 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002494 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002495 // Validate and encode operation ID.
2496 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
2497 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
2498 if (Operation.IsSymbolic)
2499 Error(S, "invalid symbolic name of GS_OP");
2500 else
2501 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
2502 break;
2503 }
2504 if (Operation.Id == OP_GS_NOP
2505 && Msg.Id != ID_GS_DONE) {
2506 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
2507 break;
2508 }
2509 Imm16Val |= (Operation.Id << OP_SHIFT_);
2510 }
2511 if (Msg.Id == ID_SYSMSG) {
2512 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
2513 if (Operation.IsSymbolic)
2514 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
2515 else
2516 Error(S, "invalid/unsupported code of SYSMSG_OP");
2517 break;
2518 }
2519 Imm16Val |= (Operation.Id << OP_SHIFT_);
2520 }
2521 // Validate and encode stream ID.
2522 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
2523 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
2524 Error(S, "invalid stream id: only 2-bit values are legal");
2525 break;
2526 }
2527 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
2528 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00002529 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002530 }
2531 break;
2532 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002533 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00002534 return MatchOperand_Success;
2535}
2536
2537bool AMDGPUOperand::isSendMsg() const {
2538 return isImmTy(ImmTySendMsg);
2539}
2540
Tom Stellard45bb48e2015-06-13 03:28:10 +00002541//===----------------------------------------------------------------------===//
2542// sopp branch targets
2543//===----------------------------------------------------------------------===//
2544
Alex Bradbury58eba092016-11-01 16:32:05 +00002545OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002546AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
2547 SMLoc S = Parser.getTok().getLoc();
2548
2549 switch (getLexer().getKind()) {
2550 default: return MatchOperand_ParseFail;
2551 case AsmToken::Integer: {
2552 int64_t Imm;
2553 if (getParser().parseAbsoluteExpression(Imm))
2554 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002555 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002556 return MatchOperand_Success;
2557 }
2558
2559 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002560 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00002561 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
2562 Parser.getTok().getString()), getContext()), S));
2563 Parser.Lex();
2564 return MatchOperand_Success;
2565 }
2566}
2567
2568//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002569// mubuf
2570//===----------------------------------------------------------------------===//
2571
Sam Kolton5f10a132016-05-06 11:31:17 +00002572AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002573 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002574}
2575
2576AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002577 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00002578}
2579
2580AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002581 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002582}
2583
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002584void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
2585 const OperandVector &Operands,
2586 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002587 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002588 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002589
2590 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2591 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2592
2593 // Add the register arguments
2594 if (Op.isReg()) {
2595 Op.addRegOperands(Inst, 1);
2596 continue;
2597 }
2598
2599 // Handle the case where soffset is an immediate
2600 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
2601 Op.addImmOperands(Inst, 1);
2602 continue;
2603 }
2604
2605 // Handle tokens like 'offen' which are sometimes hard-coded into the
2606 // asm string. There are no MCInst operands for these.
2607 if (Op.isToken()) {
2608 continue;
2609 }
2610 assert(Op.isImm());
2611
2612 // Handle optional arguments
2613 OptionalIdx[Op.getImmTy()] = i;
2614 }
2615
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002616 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
2617 if (IsAtomicReturn) {
2618 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
2619 Inst.insert(I, *I);
2620 }
2621
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002622 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00002623 if (!IsAtomic) { // glc is hard-coded.
2624 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2625 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002626 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2627 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002628}
2629
2630//===----------------------------------------------------------------------===//
2631// mimg
2632//===----------------------------------------------------------------------===//
2633
Sam Kolton1bdcef72016-05-23 09:59:02 +00002634void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
2635 unsigned I = 1;
2636 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2637 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2638 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2639 }
2640
2641 OptionalImmIndexMap OptionalIdx;
2642
2643 for (unsigned E = Operands.size(); I != E; ++I) {
2644 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2645
2646 // Add the register arguments
2647 if (Op.isRegOrImm()) {
2648 Op.addRegOrImmOperands(Inst, 1);
2649 continue;
2650 } else if (Op.isImmModifier()) {
2651 OptionalIdx[Op.getImmTy()] = I;
2652 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002653 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002654 }
2655 }
2656
2657 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2658 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2659 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2660 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2661 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2662 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2663 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2664 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2665}
2666
2667void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
2668 unsigned I = 1;
2669 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2670 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
2671 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2672 }
2673
2674 // Add src, same as dst
2675 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
2676
2677 OptionalImmIndexMap OptionalIdx;
2678
2679 for (unsigned E = Operands.size(); I != E; ++I) {
2680 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
2681
2682 // Add the register arguments
2683 if (Op.isRegOrImm()) {
2684 Op.addRegOrImmOperands(Inst, 1);
2685 continue;
2686 } else if (Op.isImmModifier()) {
2687 OptionalIdx[Op.getImmTy()] = I;
2688 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002689 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00002690 }
2691 }
2692
2693 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
2694 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
2695 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
2696 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
2697 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
2698 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
2699 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
2700 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
2701}
2702
Sam Kolton5f10a132016-05-06 11:31:17 +00002703AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002704 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00002705}
2706
2707AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002708 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00002709}
2710
2711AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002712 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00002713}
2714
2715AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002716 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00002717}
2718
2719AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002720 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00002721}
2722
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002723AMDGPUOperand::Ptr AMDGPUAsmParser::defaultExpTgt() const {
2724 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyExpTgt);
2725}
2726
Matt Arsenault8a63cb92016-12-05 20:31:49 +00002727AMDGPUOperand::Ptr AMDGPUAsmParser::defaultExpCompr() const {
2728 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyExpCompr);
2729}
2730
2731AMDGPUOperand::Ptr AMDGPUAsmParser::defaultExpVM() const {
2732 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyExpVM);
2733}
2734
Tom Stellard45bb48e2015-06-13 03:28:10 +00002735//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00002736// smrd
2737//===----------------------------------------------------------------------===//
2738
Artem Tamazov54bfd542016-10-31 16:07:39 +00002739bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00002740 return isImm() && isUInt<8>(getImm());
2741}
2742
Artem Tamazov54bfd542016-10-31 16:07:39 +00002743bool AMDGPUOperand::isSMRDOffset20() const {
2744 return isImm() && isUInt<20>(getImm());
2745}
2746
Tom Stellard217361c2015-08-06 19:28:38 +00002747bool AMDGPUOperand::isSMRDLiteralOffset() const {
2748 // 32-bit literals are only supported on CI and we only want to use them
2749 // when the offset is > 8-bits.
2750 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
2751}
2752
Artem Tamazov54bfd542016-10-31 16:07:39 +00002753AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
2754 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
2755}
2756
2757AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002758 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002759}
2760
2761AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002762 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00002763}
2764
Tom Stellard217361c2015-08-06 19:28:38 +00002765//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002766// vop3
2767//===----------------------------------------------------------------------===//
2768
2769static bool ConvertOmodMul(int64_t &Mul) {
2770 if (Mul != 1 && Mul != 2 && Mul != 4)
2771 return false;
2772
2773 Mul >>= 1;
2774 return true;
2775}
2776
2777static bool ConvertOmodDiv(int64_t &Div) {
2778 if (Div == 1) {
2779 Div = 0;
2780 return true;
2781 }
2782
2783 if (Div == 2) {
2784 Div = 3;
2785 return true;
2786 }
2787
2788 return false;
2789}
2790
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002791static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
2792 if (BoundCtrl == 0) {
2793 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002794 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00002795 }
2796
2797 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002798 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002799 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002800 }
Matt Arsenault12c53892016-11-15 19:58:54 +00002801
Tom Stellard45bb48e2015-06-13 03:28:10 +00002802 return false;
2803}
2804
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002805// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00002806static const OptionalOperand AMDGPUOptionalOperandTable[] = {
2807 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
2808 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
2809 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
2810 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
2811 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
2812 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
2813 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
2814 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
2815 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
2816 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
2817 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
2818 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
2819 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
2820 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
2821 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
2822 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
2823 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
2824 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
2825 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
2826 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00002827 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
2828 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
2829 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00002830 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002831 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002832};
Tom Stellard45bb48e2015-06-13 03:28:10 +00002833
Alex Bradbury58eba092016-11-01 16:32:05 +00002834OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00002835 OperandMatchResultTy res;
2836 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
2837 // try to parse any optional operand here
2838 if (Op.IsBit) {
2839 res = parseNamedBit(Op.Name, Operands, Op.Type);
2840 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
2841 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00002842 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
2843 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
2844 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
2845 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00002846 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
2847 res = parseSDWADstUnused(Operands);
2848 } else {
2849 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
2850 }
2851 if (res != MatchOperand_NoMatch) {
2852 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002853 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002854 }
2855 return MatchOperand_NoMatch;
2856}
2857
Matt Arsenault12c53892016-11-15 19:58:54 +00002858OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002859 StringRef Name = Parser.getTok().getString();
2860 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00002861 return parseIntWithPrefix("mul", Operands,
2862 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002863 }
Matt Arsenault12c53892016-11-15 19:58:54 +00002864
2865 if (Name == "div") {
2866 return parseIntWithPrefix("div", Operands,
2867 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
2868 }
2869
2870 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002871}
2872
Tom Stellarda90b9522016-02-11 03:28:15 +00002873void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
2874 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00002875 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002876 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002877 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
2878 }
2879 for (unsigned E = Operands.size(); I != E; ++I)
2880 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
2881}
2882
2883void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002884 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2885 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002886 cvtVOP3(Inst, Operands);
2887 } else {
2888 cvtId(Inst, Operands);
2889 }
2890}
2891
Sam Koltona3ec5c12016-10-07 14:46:06 +00002892static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
2893 // 1. This operand is input modifiers
2894 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
2895 // 2. This is not last operand
2896 && Desc.NumOperands > (OpNum + 1)
2897 // 3. Next operand is register class
2898 && Desc.OpInfo[OpNum + 1].RegClass != -1
2899 // 4. Next register is not tied to any other operand
2900 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
2901}
2902
Tom Stellarda90b9522016-02-11 03:28:15 +00002903void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002904 OptionalImmIndexMap OptionalIdx;
Tom Stellarda90b9522016-02-11 03:28:15 +00002905 unsigned I = 1;
2906 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00002907 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00002908 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00002909 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002910
Tom Stellarda90b9522016-02-11 03:28:15 +00002911 for (unsigned E = Operands.size(); I != E; ++I) {
2912 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002913 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00002914 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00002915 } else if (Op.isImm()) {
2916 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00002917 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00002918 llvm_unreachable("unhandled operand type");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002919 }
Tom Stellarda90b9522016-02-11 03:28:15 +00002920 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002921
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002922 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
2923 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002924
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002925 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00002926 // it has src2 register operand that is tied to dst operand
2927 // we don't allow modifiers for this operand in assembler so src2_modifiers
2928 // should be 0
2929 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002930 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
2931 Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002932 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002933 std::advance(
2934 it,
2935 AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
2936 AMDGPU::V_MAC_F16_e64 :
2937 AMDGPU::V_MAC_F32_e64,
2938 AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00002939 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
2940 ++it;
2941 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
2942 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002943}
2944
Sam Koltondfa29f72016-03-09 12:29:31 +00002945//===----------------------------------------------------------------------===//
2946// dpp
2947//===----------------------------------------------------------------------===//
2948
2949bool AMDGPUOperand::isDPPCtrl() const {
2950 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
2951 if (result) {
2952 int64_t Imm = getImm();
2953 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
2954 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
2955 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
2956 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
2957 (Imm == 0x130) ||
2958 (Imm == 0x134) ||
2959 (Imm == 0x138) ||
2960 (Imm == 0x13c) ||
2961 (Imm == 0x140) ||
2962 (Imm == 0x141) ||
2963 (Imm == 0x142) ||
2964 (Imm == 0x143);
2965 }
2966 return false;
2967}
2968
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00002969bool AMDGPUOperand::isGPRIdxMode() const {
2970 return isImm() && isUInt<4>(getImm());
2971}
2972
Alex Bradbury58eba092016-11-01 16:32:05 +00002973OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002974AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00002975 SMLoc S = Parser.getTok().getLoc();
2976 StringRef Prefix;
2977 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00002978
Sam Koltona74cd522016-03-18 15:35:51 +00002979 if (getLexer().getKind() == AsmToken::Identifier) {
2980 Prefix = Parser.getTok().getString();
2981 } else {
2982 return MatchOperand_NoMatch;
2983 }
2984
2985 if (Prefix == "row_mirror") {
2986 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002987 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002988 } else if (Prefix == "row_half_mirror") {
2989 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00002990 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00002991 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00002992 // Check to prevent parseDPPCtrlOps from eating invalid tokens
2993 if (Prefix != "quad_perm"
2994 && Prefix != "row_shl"
2995 && Prefix != "row_shr"
2996 && Prefix != "row_ror"
2997 && Prefix != "wave_shl"
2998 && Prefix != "wave_rol"
2999 && Prefix != "wave_shr"
3000 && Prefix != "wave_ror"
3001 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00003002 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00003003 }
3004
Sam Koltona74cd522016-03-18 15:35:51 +00003005 Parser.Lex();
3006 if (getLexer().isNot(AsmToken::Colon))
3007 return MatchOperand_ParseFail;
3008
3009 if (Prefix == "quad_perm") {
3010 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00003011 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003012 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00003013 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003014 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003015
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003016 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00003017 return MatchOperand_ParseFail;
3018
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003019 for (int i = 0; i < 3; ++i) {
3020 if (getLexer().isNot(AsmToken::Comma))
3021 return MatchOperand_ParseFail;
3022 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003023
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003024 int64_t Temp;
3025 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
3026 return MatchOperand_ParseFail;
3027 const int shift = i*2 + 2;
3028 Int += (Temp << shift);
3029 }
Sam Koltona74cd522016-03-18 15:35:51 +00003030
Sam Koltona74cd522016-03-18 15:35:51 +00003031 if (getLexer().isNot(AsmToken::RBrac))
3032 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003033 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003034
3035 } else {
3036 // sel:%d
3037 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003038 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00003039 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003040
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003041 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003042 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003043 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003044 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003045 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003046 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003047 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003048 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003049 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003050 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003051 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003052 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003053 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003054 Int = 0x13C;
3055 } else if (Prefix == "row_bcast") {
3056 if (Int == 15) {
3057 Int = 0x142;
3058 } else if (Int == 31) {
3059 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00003060 } else {
3061 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003062 }
3063 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00003064 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003065 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003066 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003067 }
Sam Koltona74cd522016-03-18 15:35:51 +00003068
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003069 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00003070 return MatchOperand_Success;
3071}
3072
Sam Kolton5f10a132016-05-06 11:31:17 +00003073AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003074 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003075}
3076
Sam Kolton5f10a132016-05-06 11:31:17 +00003077AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003078 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003079}
3080
Sam Kolton5f10a132016-05-06 11:31:17 +00003081AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003082 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00003083}
3084
3085void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00003086 OptionalImmIndexMap OptionalIdx;
3087
3088 unsigned I = 1;
3089 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3090 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3091 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3092 }
3093
3094 for (unsigned E = Operands.size(); I != E; ++I) {
3095 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3096 // Add the register arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003097 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00003098 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00003099 } else if (Op.isDPPCtrl()) {
3100 Op.addImmOperands(Inst, 1);
3101 } else if (Op.isImm()) {
3102 // Handle optional arguments
3103 OptionalIdx[Op.getImmTy()] = I;
3104 } else {
3105 llvm_unreachable("Invalid operand type");
3106 }
3107 }
3108
Sam Koltondfa29f72016-03-09 12:29:31 +00003109 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
3110 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
3111 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003112
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003113 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003114 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003115 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
3116 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003117 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003118 std::advance(
3119 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003120 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
3121 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003122}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00003123
Sam Kolton3025e7f2016-04-26 13:33:56 +00003124//===----------------------------------------------------------------------===//
3125// sdwa
3126//===----------------------------------------------------------------------===//
3127
Alex Bradbury58eba092016-11-01 16:32:05 +00003128OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003129AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
3130 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003131 using namespace llvm::AMDGPU::SDWA;
3132
Sam Kolton3025e7f2016-04-26 13:33:56 +00003133 SMLoc S = Parser.getTok().getLoc();
3134 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00003135 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003136
Sam Kolton05ef1c92016-06-03 10:27:37 +00003137 res = parseStringWithPrefix(Prefix, Value);
3138 if (res != MatchOperand_Success) {
3139 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00003140 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003141
Sam Kolton3025e7f2016-04-26 13:33:56 +00003142 int64_t Int;
3143 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00003144 .Case("BYTE_0", SdwaSel::BYTE_0)
3145 .Case("BYTE_1", SdwaSel::BYTE_1)
3146 .Case("BYTE_2", SdwaSel::BYTE_2)
3147 .Case("BYTE_3", SdwaSel::BYTE_3)
3148 .Case("WORD_0", SdwaSel::WORD_0)
3149 .Case("WORD_1", SdwaSel::WORD_1)
3150 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00003151 .Default(0xffffffff);
3152 Parser.Lex(); // eat last token
3153
3154 if (Int == 0xffffffff) {
3155 return MatchOperand_ParseFail;
3156 }
3157
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003158 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00003159 return MatchOperand_Success;
3160}
3161
Alex Bradbury58eba092016-11-01 16:32:05 +00003162OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00003163AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003164 using namespace llvm::AMDGPU::SDWA;
3165
Sam Kolton3025e7f2016-04-26 13:33:56 +00003166 SMLoc S = Parser.getTok().getLoc();
3167 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00003168 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00003169
3170 res = parseStringWithPrefix("dst_unused", Value);
3171 if (res != MatchOperand_Success) {
3172 return res;
3173 }
3174
3175 int64_t Int;
3176 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00003177 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
3178 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
3179 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00003180 .Default(0xffffffff);
3181 Parser.Lex(); // eat last token
3182
3183 if (Int == 0xffffffff) {
3184 return MatchOperand_ParseFail;
3185 }
3186
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003187 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00003188 return MatchOperand_Success;
3189}
3190
Sam Kolton945231a2016-06-10 09:57:59 +00003191void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00003192 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003193}
3194
Sam Kolton945231a2016-06-10 09:57:59 +00003195void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00003196 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
3197}
3198
3199void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
3200 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003201}
3202
3203void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00003204 uint64_t BasicInstType) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003205 OptionalImmIndexMap OptionalIdx;
3206
3207 unsigned I = 1;
3208 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3209 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3210 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3211 }
3212
3213 for (unsigned E = Operands.size(); I != E; ++I) {
3214 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3215 // Add the register arguments
Sam Kolton5196b882016-07-01 09:59:21 +00003216 if (BasicInstType == SIInstrFlags::VOPC &&
3217 Op.isReg() &&
3218 Op.Reg.RegNo == AMDGPU::VCC) {
3219 // VOPC sdwa use "vcc" token as dst. Skip it.
3220 continue;
Sam Koltona3ec5c12016-10-07 14:46:06 +00003221 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003222 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003223 } else if (Op.isImm()) {
3224 // Handle optional arguments
3225 OptionalIdx[Op.getImmTy()] = I;
3226 } else {
3227 llvm_unreachable("Invalid operand type");
3228 }
3229 }
3230
Sam Kolton945231a2016-06-10 09:57:59 +00003231 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003232
Sam Koltona3ec5c12016-10-07 14:46:06 +00003233 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa) {
Sam Kolton05ef1c92016-06-03 10:27:37 +00003234 // V_NOP_sdwa has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00003235 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003236 case SIInstrFlags::VOP1:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003237 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3238 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3239 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3240 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003241
3242 case SIInstrFlags::VOP2:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003243 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, 6);
3244 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, 2);
3245 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3246 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3247 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003248
3249 case SIInstrFlags::VOPC:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003250 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, 6);
3251 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, 6);
3252 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003253
Sam Koltona3ec5c12016-10-07 14:46:06 +00003254 default:
3255 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
3256 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00003257 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00003258
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003259 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003260 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003261 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa ||
3262 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003263 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003264 std::advance(
3265 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003266 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00003267 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00003268
Sam Kolton05ef1c92016-06-03 10:27:37 +00003269}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00003270
Tom Stellard45bb48e2015-06-13 03:28:10 +00003271/// Force static initialization.
3272extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00003273 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
3274 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003275}
3276
3277#define GET_REGISTER_MATCHER
3278#define GET_MATCHER_IMPLEMENTATION
3279#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00003280
Sam Kolton11de3702016-05-24 12:38:33 +00003281// This fuction should be defined after auto-generated include so that we have
3282// MatchClassKind enum defined
3283unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
3284 unsigned Kind) {
3285 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00003286 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00003287 // operand. This method checks if we are given immediate operand but expect to
3288 // get corresponding token.
3289 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
3290 switch (Kind) {
3291 case MCK_addr64:
3292 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
3293 case MCK_gds:
3294 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
3295 case MCK_glc:
3296 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
3297 case MCK_idxen:
3298 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
3299 case MCK_offen:
3300 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003301 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00003302 // When operands have expression values, they will return true for isToken,
3303 // because it is not possible to distinguish between a token and an
3304 // expression at parse time. MatchInstructionImpl() will always try to
3305 // match an operand as a token, when isToken returns true, and when the
3306 // name of the expression is not a valid token, the match will fail,
3307 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003308 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
3309 case MCK_SSrcF32:
3310 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00003311 case MCK_SoppBrTarget:
3312 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003313 case MCK_VReg32OrOff:
3314 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
3315 default:
3316 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00003317 }
3318}