blob: ba16766abd4e6a298e0a608a830559c206422b04 [file] [log] [blame]
Sam Koltonf51f4b82016-03-04 12:29:14 +00001//===-- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ---------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000010#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000011#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000012#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000013#include "SIDefines.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000014#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000015#include "Utils/AMDKernelCodeTUtils.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000016#include "Utils/AMDGPUAsmUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000017#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000018#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000019#include "llvm/ADT/ArrayRef.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000020#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000021#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000022#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/StringSwitch.h"
25#include "llvm/ADT/Twine.h"
Sam Kolton1eeb11b2016-09-09 14:44:04 +000026#include "llvm/CodeGen/MachineValueType.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000027#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000028#include "llvm/MC/MCContext.h"
29#include "llvm/MC/MCExpr.h"
30#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000031#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000032#include "llvm/MC/MCInstrInfo.h"
33#include "llvm/MC/MCParser/MCAsmLexer.h"
34#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000035#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000036#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000037#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/MC/MCRegisterInfo.h"
39#include "llvm/MC/MCStreamer.h"
40#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000041#include "llvm/MC/MCSymbol.h"
42#include "llvm/Support/Casting.h"
Tom Stellard1e1b05d2015-11-06 11:45:14 +000043#include "llvm/Support/ELF.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000044#include "llvm/Support/ErrorHandling.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000045#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/raw_ostream.h"
47#include "llvm/Support/SMLoc.h"
48#include "llvm/Support/TargetRegistry.h"
49#include <algorithm>
50#include <cassert>
51#include <cstdint>
52#include <cstring>
53#include <iterator>
54#include <map>
55#include <memory>
56#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000057
Tom Stellard45bb48e2015-06-13 03:28:10 +000058using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000059using namespace llvm::AMDGPU;
Tom Stellard45bb48e2015-06-13 03:28:10 +000060
61namespace {
62
Sam Kolton1eeb11b2016-09-09 14:44:04 +000063class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000064
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000065enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
66
Sam Kolton1eeb11b2016-09-09 14:44:04 +000067//===----------------------------------------------------------------------===//
68// Operand
69//===----------------------------------------------------------------------===//
70
Tom Stellard45bb48e2015-06-13 03:28:10 +000071class AMDGPUOperand : public MCParsedAsmOperand {
72 enum KindTy {
73 Token,
74 Immediate,
75 Register,
76 Expression
77 } Kind;
78
79 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000080 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000081
82public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000083 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000084 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000085
Sam Kolton5f10a132016-05-06 11:31:17 +000086 typedef std::unique_ptr<AMDGPUOperand> Ptr;
87
Sam Kolton945231a2016-06-10 09:57:59 +000088 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000089 bool Abs = false;
90 bool Neg = false;
91 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000092
93 bool hasFPModifiers() const { return Abs || Neg; }
94 bool hasIntModifiers() const { return Sext; }
95 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
96
97 int64_t getFPModifiersOperand() const {
98 int64_t Operand = 0;
99 Operand |= Abs ? SISrcMods::ABS : 0;
100 Operand |= Neg ? SISrcMods::NEG : 0;
101 return Operand;
102 }
103
104 int64_t getIntModifiersOperand() const {
105 int64_t Operand = 0;
106 Operand |= Sext ? SISrcMods::SEXT : 0;
107 return Operand;
108 }
109
110 int64_t getModifiersOperand() const {
111 assert(!(hasFPModifiers() && hasIntModifiers())
112 && "fp and int modifiers should not be used simultaneously");
113 if (hasFPModifiers()) {
114 return getFPModifiersOperand();
115 } else if (hasIntModifiers()) {
116 return getIntModifiersOperand();
117 } else {
118 return 0;
119 }
120 }
121
122 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
123 };
124
Tom Stellard45bb48e2015-06-13 03:28:10 +0000125 enum ImmTy {
126 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000127 ImmTyGDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000128 ImmTyOffen,
129 ImmTyIdxen,
130 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 ImmTyOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000132 ImmTyOffset0,
133 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000134 ImmTyGLC,
135 ImmTySLC,
136 ImmTyTFE,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000137 ImmTyClampSI,
138 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000139 ImmTyDppCtrl,
140 ImmTyDppRowMask,
141 ImmTyDppBankMask,
142 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000143 ImmTySdwaDstSel,
144 ImmTySdwaSrc0Sel,
145 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000146 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000147 ImmTyDMask,
148 ImmTyUNorm,
149 ImmTyDA,
150 ImmTyR128,
151 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000152 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000153 ImmTyExpCompr,
154 ImmTyExpVM,
Artem Tamazovd6468662016-04-25 14:13:51 +0000155 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000156 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000157 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000158 ImmTyInterpSlot,
159 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000160 ImmTyAttrChan,
161 ImmTyOpSel,
162 ImmTyOpSelHi,
163 ImmTyNegLo,
164 ImmTyNegHi
Tom Stellard45bb48e2015-06-13 03:28:10 +0000165 };
166
167 struct TokOp {
168 const char *Data;
169 unsigned Length;
170 };
171
172 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000173 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000174 ImmTy Type;
175 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000176 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000177 };
178
179 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000180 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000181 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000182 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000183 };
184
185 union {
186 TokOp Tok;
187 ImmOp Imm;
188 RegOp Reg;
189 const MCExpr *Expr;
190 };
191
Tom Stellard45bb48e2015-06-13 03:28:10 +0000192 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000193 if (Kind == Token)
194 return true;
195
196 if (Kind != Expression || !Expr)
197 return false;
198
199 // When parsing operands, we can't always tell if something was meant to be
200 // a token, like 'gds', or an expression that references a global variable.
201 // In this case, we assume the string is an expression, and if we need to
202 // interpret is a token, then we treat the symbol name as the token.
203 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000204 }
205
206 bool isImm() const override {
207 return Kind == Immediate;
208 }
209
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000210 bool isInlinableImm(MVT type) const;
211 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000212
Tom Stellard45bb48e2015-06-13 03:28:10 +0000213 bool isRegKind() const {
214 return Kind == Register;
215 }
216
217 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000218 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000219 }
220
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000221 bool isRegOrImmWithInputMods(MVT type) const {
222 return isRegKind() || isInlinableImm(type);
223 }
224
Matt Arsenault4bd72362016-12-10 00:39:12 +0000225 bool isRegOrImmWithInt16InputMods() const {
226 return isRegOrImmWithInputMods(MVT::i16);
227 }
228
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000229 bool isRegOrImmWithInt32InputMods() const {
230 return isRegOrImmWithInputMods(MVT::i32);
231 }
232
233 bool isRegOrImmWithInt64InputMods() const {
234 return isRegOrImmWithInputMods(MVT::i64);
235 }
236
Matt Arsenault4bd72362016-12-10 00:39:12 +0000237 bool isRegOrImmWithFP16InputMods() const {
238 return isRegOrImmWithInputMods(MVT::f16);
239 }
240
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000241 bool isRegOrImmWithFP32InputMods() const {
242 return isRegOrImmWithInputMods(MVT::f32);
243 }
244
245 bool isRegOrImmWithFP64InputMods() const {
246 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000247 }
248
Sam Kolton9772eb32017-01-11 11:46:30 +0000249 bool isVReg() const {
250 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
251 isRegClass(AMDGPU::VReg_64RegClassID) ||
252 isRegClass(AMDGPU::VReg_96RegClassID) ||
253 isRegClass(AMDGPU::VReg_128RegClassID) ||
254 isRegClass(AMDGPU::VReg_256RegClassID) ||
255 isRegClass(AMDGPU::VReg_512RegClassID);
256 }
257
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000258 bool isVReg32OrOff() const {
259 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
260 }
261
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000262 bool isImmTy(ImmTy ImmT) const {
263 return isImm() && Imm.Type == ImmT;
264 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000265
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000266 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000267 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000268 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000269
Sam Kolton945231a2016-06-10 09:57:59 +0000270 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
271 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
272 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000273 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
274 bool isDA() const { return isImmTy(ImmTyDA); }
275 bool isR128() const { return isImmTy(ImmTyUNorm); }
276 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000277 bool isOff() const { return isImmTy(ImmTyOff); }
278 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000279 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
280 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000281 bool isOffen() const { return isImmTy(ImmTyOffen); }
282 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
283 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
284 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
285 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
286 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000287 bool isGDS() const { return isImmTy(ImmTyGDS); }
288 bool isGLC() const { return isImmTy(ImmTyGLC); }
289 bool isSLC() const { return isImmTy(ImmTySLC); }
290 bool isTFE() const { return isImmTy(ImmTyTFE); }
Sam Kolton945231a2016-06-10 09:57:59 +0000291 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
292 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
293 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
294 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
295 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
296 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
297 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000298 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
299 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
300 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000301 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
302 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
303 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
304 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000305
Sam Kolton945231a2016-06-10 09:57:59 +0000306 bool isMod() const {
307 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000308 }
309
310 bool isRegOrImm() const {
311 return isReg() || isImm();
312 }
313
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000314 bool isRegClass(unsigned RCID) const;
315
Sam Kolton9772eb32017-01-11 11:46:30 +0000316 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
317 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
318 }
319
Matt Arsenault4bd72362016-12-10 00:39:12 +0000320 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000321 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000322 }
323
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000324 bool isSCSrcV2B16() const {
325 return isSCSrcB16();
326 }
327
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000328 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000329 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000330 }
331
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000332 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000333 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000334 }
335
Matt Arsenault4bd72362016-12-10 00:39:12 +0000336 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000337 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000338 }
339
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000340 bool isSCSrcV2F16() const {
341 return isSCSrcF16();
342 }
343
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000344 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000345 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000346 }
347
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000348 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000349 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000350 }
351
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000352 bool isSSrcB32() const {
353 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
354 }
355
Matt Arsenault4bd72362016-12-10 00:39:12 +0000356 bool isSSrcB16() const {
357 return isSCSrcB16() || isLiteralImm(MVT::i16);
358 }
359
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000360 bool isSSrcV2B16() const {
361 llvm_unreachable("cannot happen");
362 return isSSrcB16();
363 }
364
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000365 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000366 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
367 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000368 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000369 }
370
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000371 bool isSSrcF32() const {
372 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000373 }
374
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000375 bool isSSrcF64() const {
376 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000377 }
378
Matt Arsenault4bd72362016-12-10 00:39:12 +0000379 bool isSSrcF16() const {
380 return isSCSrcB16() || isLiteralImm(MVT::f16);
381 }
382
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000383 bool isSSrcV2F16() const {
384 llvm_unreachable("cannot happen");
385 return isSSrcF16();
386 }
387
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000388 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000389 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000390 }
391
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000392 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000393 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000394 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000395
Matt Arsenault4bd72362016-12-10 00:39:12 +0000396 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000397 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000398 }
399
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000400 bool isVCSrcV2B16() const {
401 return isVCSrcB16();
402 }
403
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000404 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000405 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000406 }
407
408 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000409 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000410 }
411
Matt Arsenault4bd72362016-12-10 00:39:12 +0000412 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000413 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000414 }
415
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000416 bool isVCSrcV2F16() const {
417 return isVCSrcF16();
418 }
419
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000420 bool isVSrcB32() const {
421 return isVCSrcF32() || isLiteralImm(MVT::i32);
422 }
423
424 bool isVSrcB64() const {
425 return isVCSrcF64() || isLiteralImm(MVT::i64);
426 }
427
Matt Arsenault4bd72362016-12-10 00:39:12 +0000428 bool isVSrcB16() const {
429 return isVCSrcF16() || isLiteralImm(MVT::i16);
430 }
431
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000432 bool isVSrcV2B16() const {
433 llvm_unreachable("cannot happen");
434 return isVSrcB16();
435 }
436
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000437 bool isVSrcF32() const {
438 return isVCSrcF32() || isLiteralImm(MVT::f32);
439 }
440
441 bool isVSrcF64() const {
442 return isVCSrcF64() || isLiteralImm(MVT::f64);
443 }
444
Matt Arsenault4bd72362016-12-10 00:39:12 +0000445 bool isVSrcF16() const {
446 return isVCSrcF16() || isLiteralImm(MVT::f16);
447 }
448
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000449 bool isVSrcV2F16() const {
450 llvm_unreachable("cannot happen");
451 return isVSrcF16();
452 }
453
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000454 bool isKImmFP32() const {
455 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000456 }
457
Matt Arsenault4bd72362016-12-10 00:39:12 +0000458 bool isKImmFP16() const {
459 return isLiteralImm(MVT::f16);
460 }
461
Tom Stellard45bb48e2015-06-13 03:28:10 +0000462 bool isMem() const override {
463 return false;
464 }
465
466 bool isExpr() const {
467 return Kind == Expression;
468 }
469
470 bool isSoppBrTarget() const {
471 return isExpr() || isImm();
472 }
473
Sam Kolton945231a2016-06-10 09:57:59 +0000474 bool isSWaitCnt() const;
475 bool isHwreg() const;
476 bool isSendMsg() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000477 bool isSMRDOffset8() const;
478 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000479 bool isSMRDLiteralOffset() const;
480 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000481 bool isGPRIdxMode() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000482
Tom Stellard89049702016-06-15 02:54:14 +0000483 StringRef getExpressionAsToken() const {
484 assert(isExpr());
485 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
486 return S->getSymbol().getName();
487 }
488
Sam Kolton945231a2016-06-10 09:57:59 +0000489 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000490 assert(isToken());
491
492 if (Kind == Expression)
493 return getExpressionAsToken();
494
Sam Kolton945231a2016-06-10 09:57:59 +0000495 return StringRef(Tok.Data, Tok.Length);
496 }
497
498 int64_t getImm() const {
499 assert(isImm());
500 return Imm.Val;
501 }
502
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000503 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000504 assert(isImm());
505 return Imm.Type;
506 }
507
508 unsigned getReg() const override {
509 return Reg.RegNo;
510 }
511
Tom Stellard45bb48e2015-06-13 03:28:10 +0000512 SMLoc getStartLoc() const override {
513 return StartLoc;
514 }
515
Peter Collingbourne0da86302016-10-10 22:49:37 +0000516 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000517 return EndLoc;
518 }
519
Sam Kolton945231a2016-06-10 09:57:59 +0000520 Modifiers getModifiers() const {
521 assert(isRegKind() || isImmTy(ImmTyNone));
522 return isRegKind() ? Reg.Mods : Imm.Mods;
523 }
524
525 void setModifiers(Modifiers Mods) {
526 assert(isRegKind() || isImmTy(ImmTyNone));
527 if (isRegKind())
528 Reg.Mods = Mods;
529 else
530 Imm.Mods = Mods;
531 }
532
533 bool hasModifiers() const {
534 return getModifiers().hasModifiers();
535 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000536
Sam Kolton945231a2016-06-10 09:57:59 +0000537 bool hasFPModifiers() const {
538 return getModifiers().hasFPModifiers();
539 }
540
541 bool hasIntModifiers() const {
542 return getModifiers().hasIntModifiers();
543 }
544
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000545 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
546
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000547 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000548
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000549 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000550
Matt Arsenault4bd72362016-12-10 00:39:12 +0000551 template <unsigned Bitwidth>
552 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
553
554 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
555 addKImmFPOperands<16>(Inst, N);
556 }
557
558 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
559 addKImmFPOperands<32>(Inst, N);
560 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000561
562 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000563
564 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
565 if (isRegKind())
566 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000567 else if (isExpr())
568 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000569 else
570 addImmOperands(Inst, N);
571 }
572
573 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
574 Modifiers Mods = getModifiers();
575 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
576 if (isRegKind()) {
577 addRegOperands(Inst, N);
578 } else {
579 addImmOperands(Inst, N, false);
580 }
581 }
582
583 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
584 assert(!hasIntModifiers());
585 addRegOrImmWithInputModsOperands(Inst, N);
586 }
587
588 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
589 assert(!hasFPModifiers());
590 addRegOrImmWithInputModsOperands(Inst, N);
591 }
592
Sam Kolton9772eb32017-01-11 11:46:30 +0000593 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
594 Modifiers Mods = getModifiers();
595 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
596 assert(isRegKind());
597 addRegOperands(Inst, N);
598 }
599
600 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
601 assert(!hasIntModifiers());
602 addRegWithInputModsOperands(Inst, N);
603 }
604
605 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
606 assert(!hasFPModifiers());
607 addRegWithInputModsOperands(Inst, N);
608 }
609
Sam Kolton945231a2016-06-10 09:57:59 +0000610 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
611 if (isImm())
612 addImmOperands(Inst, N);
613 else {
614 assert(isExpr());
615 Inst.addOperand(MCOperand::createExpr(Expr));
616 }
617 }
618
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000619 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000620 switch (Type) {
621 case ImmTyNone: OS << "None"; break;
622 case ImmTyGDS: OS << "GDS"; break;
623 case ImmTyOffen: OS << "Offen"; break;
624 case ImmTyIdxen: OS << "Idxen"; break;
625 case ImmTyAddr64: OS << "Addr64"; break;
626 case ImmTyOffset: OS << "Offset"; break;
627 case ImmTyOffset0: OS << "Offset0"; break;
628 case ImmTyOffset1: OS << "Offset1"; break;
629 case ImmTyGLC: OS << "GLC"; break;
630 case ImmTySLC: OS << "SLC"; break;
631 case ImmTyTFE: OS << "TFE"; break;
632 case ImmTyClampSI: OS << "ClampSI"; break;
633 case ImmTyOModSI: OS << "OModSI"; break;
634 case ImmTyDppCtrl: OS << "DppCtrl"; break;
635 case ImmTyDppRowMask: OS << "DppRowMask"; break;
636 case ImmTyDppBankMask: OS << "DppBankMask"; break;
637 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000638 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
639 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
640 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000641 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
642 case ImmTyDMask: OS << "DMask"; break;
643 case ImmTyUNorm: OS << "UNorm"; break;
644 case ImmTyDA: OS << "DA"; break;
645 case ImmTyR128: OS << "R128"; break;
646 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000647 case ImmTyOff: OS << "Off"; break;
648 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000649 case ImmTyExpCompr: OS << "ExpCompr"; break;
650 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000651 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000652 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000653 case ImmTyInterpSlot: OS << "InterpSlot"; break;
654 case ImmTyInterpAttr: OS << "InterpAttr"; break;
655 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000656 case ImmTyOpSel: OS << "OpSel"; break;
657 case ImmTyOpSelHi: OS << "OpSelHi"; break;
658 case ImmTyNegLo: OS << "NegLo"; break;
659 case ImmTyNegHi: OS << "NegHi"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000660 }
661 }
662
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000663 void print(raw_ostream &OS) const override {
664 switch (Kind) {
665 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000666 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000667 break;
668 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000669 OS << '<' << getImm();
670 if (getImmTy() != ImmTyNone) {
671 OS << " type: "; printImmTy(OS, getImmTy());
672 }
Sam Kolton945231a2016-06-10 09:57:59 +0000673 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000674 break;
675 case Token:
676 OS << '\'' << getToken() << '\'';
677 break;
678 case Expression:
679 OS << "<expr " << *Expr << '>';
680 break;
681 }
682 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000683
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000684 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
685 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000686 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000687 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000688 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000689 Op->Imm.Val = Val;
690 Op->Imm.IsFPImm = IsFPImm;
691 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000692 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000693 Op->StartLoc = Loc;
694 Op->EndLoc = Loc;
695 return Op;
696 }
697
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000698 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
699 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000700 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000701 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000702 Res->Tok.Data = Str.data();
703 Res->Tok.Length = Str.size();
704 Res->StartLoc = Loc;
705 Res->EndLoc = Loc;
706 return Res;
707 }
708
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000709 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
710 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000711 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000712 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000713 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000714 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000715 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000716 Op->Reg.IsForcedVOP3 = ForceVOP3;
717 Op->StartLoc = S;
718 Op->EndLoc = E;
719 return Op;
720 }
721
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000722 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
723 const class MCExpr *Expr, SMLoc S) {
724 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000725 Op->Expr = Expr;
726 Op->StartLoc = S;
727 Op->EndLoc = S;
728 return Op;
729 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000730};
731
Sam Kolton945231a2016-06-10 09:57:59 +0000732raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
733 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
734 return OS;
735}
736
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000737//===----------------------------------------------------------------------===//
738// AsmParser
739//===----------------------------------------------------------------------===//
740
Artem Tamazova01cce82016-12-27 16:00:11 +0000741// Holds info related to the current kernel, e.g. count of SGPRs used.
742// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
743// .amdgpu_hsa_kernel or at EOF.
744class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000745 int SgprIndexUnusedMin = -1;
746 int VgprIndexUnusedMin = -1;
747 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000748
749 void usesSgprAt(int i) {
750 if (i >= SgprIndexUnusedMin) {
751 SgprIndexUnusedMin = ++i;
752 if (Ctx) {
753 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
754 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
755 }
756 }
757 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000758
Artem Tamazova01cce82016-12-27 16:00:11 +0000759 void usesVgprAt(int i) {
760 if (i >= VgprIndexUnusedMin) {
761 VgprIndexUnusedMin = ++i;
762 if (Ctx) {
763 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
764 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
765 }
766 }
767 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000768
Artem Tamazova01cce82016-12-27 16:00:11 +0000769public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000770 KernelScopeInfo() = default;
771
Artem Tamazova01cce82016-12-27 16:00:11 +0000772 void initialize(MCContext &Context) {
773 Ctx = &Context;
774 usesSgprAt(SgprIndexUnusedMin = -1);
775 usesVgprAt(VgprIndexUnusedMin = -1);
776 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000777
Artem Tamazova01cce82016-12-27 16:00:11 +0000778 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
779 switch (RegKind) {
780 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
781 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
782 default: break;
783 }
784 }
785};
786
Tom Stellard45bb48e2015-06-13 03:28:10 +0000787class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000788 const MCInstrInfo &MII;
789 MCAsmParser &Parser;
790
Eugene Zelenko66203762017-01-21 00:53:49 +0000791 unsigned ForcedEncodingSize = 0;
792 bool ForcedDPP = false;
793 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000794 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000795
Tom Stellard45bb48e2015-06-13 03:28:10 +0000796 /// @name Auto-generated Match Functions
797 /// {
798
799#define GET_ASSEMBLER_HEADER
800#include "AMDGPUGenAsmMatcher.inc"
801
802 /// }
803
Tom Stellard347ac792015-06-26 21:15:07 +0000804private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000805 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Tom Stellard347ac792015-06-26 21:15:07 +0000806 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
807 bool ParseDirectiveHSACodeObjectVersion();
808 bool ParseDirectiveHSACodeObjectISA();
Sam Kolton69c8aa22016-12-19 11:43:15 +0000809 bool ParseDirectiveRuntimeMetadata();
Tom Stellardff7416b2015-06-26 21:58:31 +0000810 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
811 bool ParseDirectiveAMDKernelCodeT();
Tom Stellarde135ffd2015-09-25 21:41:28 +0000812 bool ParseSectionDirectiveHSAText();
Matt Arsenault68802d32015-11-05 03:11:27 +0000813 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000814 bool ParseDirectiveAMDGPUHsaKernel();
Tom Stellard00f2f912015-12-02 19:47:57 +0000815 bool ParseDirectiveAMDGPUHsaModuleGlobal();
816 bool ParseDirectiveAMDGPUHsaProgramGlobal();
817 bool ParseSectionDirectiveHSADataGlobalAgent();
818 bool ParseSectionDirectiveHSADataGlobalProgram();
Tom Stellard9760f032015-12-03 03:34:32 +0000819 bool ParseSectionDirectiveHSARodataReadonlyAgent();
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000820 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
821 RegisterKind RegKind, unsigned Reg1,
822 unsigned RegNum);
823 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
824 unsigned& RegNum, unsigned& RegWidth,
825 unsigned *DwordRegIndex);
826 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
827 bool IsAtomic, bool IsAtomicReturn);
828 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
829 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000830
Tom Stellard45bb48e2015-06-13 03:28:10 +0000831public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000832 enum AMDGPUMatchResultTy {
833 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
834 };
835
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000836 typedef std::map<AMDGPUOperand::ImmTy, unsigned> OptionalImmIndexMap;
837
Akira Hatanakab11ef082015-11-14 06:35:56 +0000838 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000839 const MCInstrInfo &MII,
840 const MCTargetOptions &Options)
Eugene Zelenko66203762017-01-21 00:53:49 +0000841 : MCTargetAsmParser(Options, STI), MII(MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000842 MCAsmParserExtension::Initialize(Parser);
843
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000844 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000845 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000846 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000847 }
848
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000849 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000850
851 {
852 // TODO: make those pre-defined variables read-only.
853 // Currently there is none suitable machinery in the core llvm-mc for this.
854 // MCSymbol::isRedefinable is intended for another purpose, and
855 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000856 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000857 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Artem Tamazov17091362016-06-14 15:03:59 +0000858 MCContext &Ctx = getContext();
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000859 MCSymbol *Sym =
860 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
861 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000862 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000863 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000864 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000865 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
Artem Tamazov17091362016-06-14 15:03:59 +0000866 }
Artem Tamazova01cce82016-12-27 16:00:11 +0000867 KernelScope.initialize(getContext());
Tom Stellard45bb48e2015-06-13 03:28:10 +0000868 }
869
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000870 bool isSI() const {
871 return AMDGPU::isSI(getSTI());
872 }
873
874 bool isCI() const {
875 return AMDGPU::isCI(getSTI());
876 }
877
878 bool isVI() const {
879 return AMDGPU::isVI(getSTI());
880 }
881
Matt Arsenault26faed32016-12-05 22:26:17 +0000882 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000883 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000884 }
885
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000886 bool hasSGPR102_SGPR103() const {
887 return !isVI();
888 }
889
Tom Stellard347ac792015-06-26 21:15:07 +0000890 AMDGPUTargetStreamer &getTargetStreamer() {
891 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
892 return static_cast<AMDGPUTargetStreamer &>(TS);
893 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000894
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000895 const MCRegisterInfo *getMRI() const {
896 // We need this const_cast because for some reason getContext() is not const
897 // in MCAsmParser.
898 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
899 }
900
901 const MCInstrInfo *getMII() const {
902 return &MII;
903 }
904
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000905 const FeatureBitset &getFeatureBits() const {
906 return getSTI().getFeatureBits();
907 }
908
Sam Kolton05ef1c92016-06-03 10:27:37 +0000909 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
910 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
911 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +0000912
Sam Kolton05ef1c92016-06-03 10:27:37 +0000913 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
914 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
915 bool isForcedDPP() const { return ForcedDPP; }
916 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +0000917 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000918
Valery Pykhtin0f97f172016-03-14 07:43:42 +0000919 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000920 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
921 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +0000922 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
923 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000924 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
925 OperandVector &Operands, MCStreamer &Out,
926 uint64_t &ErrorInfo,
927 bool MatchingInlineAsm) override;
928 bool ParseDirective(AsmToken DirectiveID) override;
929 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +0000930 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000931 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
932 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000933 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000934
Sam Kolton11de3702016-05-24 12:38:33 +0000935 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000936
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000937 OperandMatchResultTy
938 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000939 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000940 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000941
942 OperandMatchResultTy parseOperandArrayWithPrefix(
943 const char *Prefix,
944 OperandVector &Operands,
945 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
946 bool (*ConvertResult)(int64_t&) = nullptr);
947
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000948 OperandMatchResultTy
949 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000950 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +0000951 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
952 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000953
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +0000954 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
955 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +0000956 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +0000957 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +0000958 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
959 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
960 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
961 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000962 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +0000963
Tom Stellard45bb48e2015-06-13 03:28:10 +0000964 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +0000965 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
966 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000967 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000968
969 bool parseCnt(int64_t &IntVal);
970 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000971 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +0000972
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000973private:
974 struct OperandInfoTy {
975 int64_t Id;
976 bool IsSymbolic;
977 OperandInfoTy(int64_t Id_) : Id(Id_), IsSymbolic(false) { }
978 };
Sam Kolton11de3702016-05-24 12:38:33 +0000979
Artem Tamazov6edc1352016-05-26 17:00:33 +0000980 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
981 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000982
983 void errorExpTgt();
984 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
985
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +0000986 bool validateOperandLimitations(const MCInst &Inst);
987 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
988 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
989 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
990 bool isSGPR(unsigned Reg);
991
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000992public:
Sam Kolton11de3702016-05-24 12:38:33 +0000993 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
994
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000995 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000996 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000997 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
998 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000999 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1000
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001001 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1002 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1003 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Sam Kolton5f10a132016-05-06 11:31:17 +00001004 AMDGPUOperand::Ptr defaultGLC() const;
1005 AMDGPUOperand::Ptr defaultSLC() const;
1006 AMDGPUOperand::Ptr defaultTFE() const;
1007
Sam Kolton5f10a132016-05-06 11:31:17 +00001008 AMDGPUOperand::Ptr defaultDMask() const;
1009 AMDGPUOperand::Ptr defaultUNorm() const;
1010 AMDGPUOperand::Ptr defaultDA() const;
1011 AMDGPUOperand::Ptr defaultR128() const;
1012 AMDGPUOperand::Ptr defaultLWE() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +00001013 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1014 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001015 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001016
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001017 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1018
Tom Stellarda90b9522016-02-11 03:28:15 +00001019 void cvtId(MCInst &Inst, const OperandVector &Operands);
1020 void cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001021
1022 void cvtVOP3Impl(MCInst &Inst,
1023 const OperandVector &Operands,
1024 OptionalImmIndexMap &OptionalIdx);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001025 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001026 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001027
1028 void cvtMIMG(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001029 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001030
Sam Kolton11de3702016-05-24 12:38:33 +00001031 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001032 AMDGPUOperand::Ptr defaultRowMask() const;
1033 AMDGPUOperand::Ptr defaultBankMask() const;
1034 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1035 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001036
Sam Kolton05ef1c92016-06-03 10:27:37 +00001037 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1038 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001039 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001040 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1041 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001042 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1043 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
1044 uint64_t BasicInstType);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001045};
1046
1047struct OptionalOperand {
1048 const char *Name;
1049 AMDGPUOperand::ImmTy Type;
1050 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001051 bool (*ConvertResult)(int64_t&);
1052};
1053
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001054} // end anonymous namespace
1055
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001056// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001057static const fltSemantics *getFltSemantics(unsigned Size) {
1058 switch (Size) {
1059 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001060 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001061 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001062 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001063 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001064 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001065 default:
1066 llvm_unreachable("unsupported fp type");
1067 }
1068}
1069
Matt Arsenault4bd72362016-12-10 00:39:12 +00001070static const fltSemantics *getFltSemantics(MVT VT) {
1071 return getFltSemantics(VT.getSizeInBits() / 8);
1072}
1073
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001074static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1075 switch (OperandType) {
1076 case AMDGPU::OPERAND_REG_IMM_INT32:
1077 case AMDGPU::OPERAND_REG_IMM_FP32:
1078 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1079 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1080 return &APFloat::IEEEsingle();
1081 case AMDGPU::OPERAND_REG_IMM_INT64:
1082 case AMDGPU::OPERAND_REG_IMM_FP64:
1083 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1084 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1085 return &APFloat::IEEEdouble();
1086 case AMDGPU::OPERAND_REG_IMM_INT16:
1087 case AMDGPU::OPERAND_REG_IMM_FP16:
1088 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1089 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1090 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1091 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1092 return &APFloat::IEEEhalf();
1093 default:
1094 llvm_unreachable("unsupported fp type");
1095 }
1096}
1097
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001098//===----------------------------------------------------------------------===//
1099// Operand
1100//===----------------------------------------------------------------------===//
1101
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001102static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1103 bool Lost;
1104
1105 // Convert literal to single precision
1106 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1107 APFloat::rmNearestTiesToEven,
1108 &Lost);
1109 // We allow precision lost but not overflow or underflow
1110 if (Status != APFloat::opOK &&
1111 Lost &&
1112 ((Status & APFloat::opOverflow) != 0 ||
1113 (Status & APFloat::opUnderflow) != 0)) {
1114 return false;
1115 }
1116
1117 return true;
1118}
1119
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001120bool AMDGPUOperand::isInlinableImm(MVT type) const {
1121 if (!isImmTy(ImmTyNone)) {
1122 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1123 return false;
1124 }
1125 // TODO: We should avoid using host float here. It would be better to
1126 // check the float bit values which is what a few other places do.
1127 // We've had bot failures before due to weird NaN support on mips hosts.
1128
1129 APInt Literal(64, Imm.Val);
1130
1131 if (Imm.IsFPImm) { // We got fp literal token
1132 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001133 return AMDGPU::isInlinableLiteral64(Imm.Val,
1134 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001135 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001136
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001137 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001138 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1139 return false;
1140
Sam Kolton9dffada2017-01-17 15:26:02 +00001141 if (type.getScalarSizeInBits() == 16) {
1142 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001143 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001144 AsmParser->hasInv2PiInlineImm());
1145 }
1146
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001147 // Check if single precision literal is inlinable
1148 return AMDGPU::isInlinableLiteral32(
1149 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001150 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001151 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001152
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001153 // We got int literal token.
1154 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001155 return AMDGPU::isInlinableLiteral64(Imm.Val,
1156 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001157 }
1158
Matt Arsenault4bd72362016-12-10 00:39:12 +00001159 if (type.getScalarSizeInBits() == 16) {
1160 return AMDGPU::isInlinableLiteral16(
1161 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1162 AsmParser->hasInv2PiInlineImm());
1163 }
1164
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001165 return AMDGPU::isInlinableLiteral32(
1166 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001167 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001168}
1169
1170bool AMDGPUOperand::isLiteralImm(MVT type) const {
1171 // Check that this imediate can be added as literal
1172 if (!isImmTy(ImmTyNone)) {
1173 return false;
1174 }
1175
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001176 if (!Imm.IsFPImm) {
1177 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001178
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001179 if (type == MVT::f64 && hasFPModifiers()) {
1180 // Cannot apply fp modifiers to int literals preserving the same semantics
1181 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1182 // disable these cases.
1183 return false;
1184 }
1185
Matt Arsenault4bd72362016-12-10 00:39:12 +00001186 unsigned Size = type.getSizeInBits();
1187 if (Size == 64)
1188 Size = 32;
1189
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001190 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1191 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001192 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001193 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001194
1195 // We got fp literal token
1196 if (type == MVT::f64) { // Expected 64-bit fp operand
1197 // We would set low 64-bits of literal to zeroes but we accept this literals
1198 return true;
1199 }
1200
1201 if (type == MVT::i64) { // Expected 64-bit int operand
1202 // We don't allow fp literals in 64-bit integer instructions. It is
1203 // unclear how we should encode them.
1204 return false;
1205 }
1206
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001207 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001208 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001209}
1210
1211bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001212 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001213}
1214
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001215uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1216{
1217 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1218 assert(Size == 2 || Size == 4 || Size == 8);
1219
1220 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1221
1222 if (Imm.Mods.Abs) {
1223 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001224 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001225 if (Imm.Mods.Neg) {
1226 Val ^= FpSignMask;
1227 }
1228
1229 return Val;
1230}
1231
1232void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001233
Matt Arsenault4bd72362016-12-10 00:39:12 +00001234 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1235 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001236 addLiteralImmOperand(Inst, Imm.Val,
1237 ApplyModifiers &
1238 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001239 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001240 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1241 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001242 }
1243}
1244
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001245void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001246 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1247 auto OpNum = Inst.getNumOperands();
1248 // Check that this operand accepts literals
1249 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1250
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001251 if (ApplyModifiers) {
1252 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1253 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1254 Val = applyInputFPModifiers(Val, Size);
1255 }
1256
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001257 APInt Literal(64, Val);
1258 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001259
1260 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001261 switch (OpTy) {
1262 case AMDGPU::OPERAND_REG_IMM_INT64:
1263 case AMDGPU::OPERAND_REG_IMM_FP64:
1264 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1265 case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
Matt Arsenault26faed32016-12-05 22:26:17 +00001266 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1267 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001268 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001269 return;
1270 }
1271
1272 // Non-inlineable
1273 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001274 // For fp operands we check if low 32 bits are zeros
1275 if (Literal.getLoBits(32) != 0) {
1276 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001277 "Can't encode literal as exact 64-bit floating-point operand. "
1278 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001279 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001280
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001281 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001282 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001283 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001284
1285 // We don't allow fp literals in 64-bit integer instructions. It is
1286 // unclear how we should encode them. This case should be checked earlier
1287 // in predicate methods (isLiteralImm())
1288 llvm_unreachable("fp literal in 64-bit integer instruction.");
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001289 }
1290 case AMDGPU::OPERAND_REG_IMM_INT32:
1291 case AMDGPU::OPERAND_REG_IMM_FP32:
1292 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1293 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1294 case AMDGPU::OPERAND_REG_IMM_INT16:
1295 case AMDGPU::OPERAND_REG_IMM_FP16:
1296 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1297 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1298 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1299 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001300 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001301 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001302 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001303 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001304 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001305 // We allow precision lost but not overflow or underflow. This should be
1306 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001307
1308 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1309 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1310 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1311 ImmVal |= (ImmVal << 16);
1312 }
1313
1314 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001315 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001316 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001317 default:
1318 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001319 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001320
1321 return;
1322 }
1323
1324 // We got int literal token.
1325 // Only sign extend inline immediates.
1326 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001327 switch (OpTy) {
1328 case AMDGPU::OPERAND_REG_IMM_INT32:
1329 case AMDGPU::OPERAND_REG_IMM_FP32:
1330 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1331 case AMDGPU::OPERAND_REG_INLINE_C_FP32: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001332 if (isInt<32>(Val) &&
1333 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1334 AsmParser->hasInv2PiInlineImm())) {
1335 Inst.addOperand(MCOperand::createImm(Val));
1336 return;
1337 }
1338
1339 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1340 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001341 }
1342 case AMDGPU::OPERAND_REG_IMM_INT64:
1343 case AMDGPU::OPERAND_REG_IMM_FP64:
1344 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1345 case AMDGPU::OPERAND_REG_INLINE_C_FP64: {
1346 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001347 Inst.addOperand(MCOperand::createImm(Val));
1348 return;
1349 }
1350
1351 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1352 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001353 }
1354 case AMDGPU::OPERAND_REG_IMM_INT16:
1355 case AMDGPU::OPERAND_REG_IMM_FP16:
1356 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1357 case AMDGPU::OPERAND_REG_INLINE_C_FP16: {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001358 if (isInt<16>(Val) &&
1359 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1360 AsmParser->hasInv2PiInlineImm())) {
1361 Inst.addOperand(MCOperand::createImm(Val));
1362 return;
1363 }
1364
1365 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1366 return;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001367 }
1368 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1369 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1370 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1371 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1372 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001373
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001374 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1375 static_cast<uint32_t>(LiteralVal);
1376 Inst.addOperand(MCOperand::createImm(ImmVal));
1377 return;
1378 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001379 default:
1380 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001381 }
1382}
1383
Matt Arsenault4bd72362016-12-10 00:39:12 +00001384template <unsigned Bitwidth>
1385void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001386 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001387
1388 if (!Imm.IsFPImm) {
1389 // We got int literal token.
1390 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1391 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001392 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001393
1394 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001395 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001396 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1397 APFloat::rmNearestTiesToEven, &Lost);
1398 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001399}
1400
1401void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1402 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1403}
1404
1405//===----------------------------------------------------------------------===//
1406// AsmParser
1407//===----------------------------------------------------------------------===//
1408
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001409static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1410 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001411 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001412 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001413 case 1: return AMDGPU::VGPR_32RegClassID;
1414 case 2: return AMDGPU::VReg_64RegClassID;
1415 case 3: return AMDGPU::VReg_96RegClassID;
1416 case 4: return AMDGPU::VReg_128RegClassID;
1417 case 8: return AMDGPU::VReg_256RegClassID;
1418 case 16: return AMDGPU::VReg_512RegClassID;
1419 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001420 } else if (Is == IS_TTMP) {
1421 switch (RegWidth) {
1422 default: return -1;
1423 case 1: return AMDGPU::TTMP_32RegClassID;
1424 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001425 case 4: return AMDGPU::TTMP_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001426 }
1427 } else if (Is == IS_SGPR) {
1428 switch (RegWidth) {
1429 default: return -1;
1430 case 1: return AMDGPU::SGPR_32RegClassID;
1431 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001432 case 4: return AMDGPU::SGPR_128RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001433 case 8: return AMDGPU::SReg_256RegClassID;
1434 case 16: return AMDGPU::SReg_512RegClassID;
1435 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001436 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001437 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001438}
1439
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001440static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001441 return StringSwitch<unsigned>(RegName)
1442 .Case("exec", AMDGPU::EXEC)
1443 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001444 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001445 .Case("m0", AMDGPU::M0)
1446 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001447 .Case("tba", AMDGPU::TBA)
1448 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001449 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1450 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001451 .Case("vcc_lo", AMDGPU::VCC_LO)
1452 .Case("vcc_hi", AMDGPU::VCC_HI)
1453 .Case("exec_lo", AMDGPU::EXEC_LO)
1454 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001455 .Case("tma_lo", AMDGPU::TMA_LO)
1456 .Case("tma_hi", AMDGPU::TMA_HI)
1457 .Case("tba_lo", AMDGPU::TBA_LO)
1458 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001459 .Default(0);
1460}
1461
Eugene Zelenko66203762017-01-21 00:53:49 +00001462bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1463 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001464 auto R = parseRegister();
1465 if (!R) return true;
1466 assert(R->isReg());
1467 RegNo = R->getReg();
1468 StartLoc = R->getStartLoc();
1469 EndLoc = R->getEndLoc();
1470 return false;
1471}
1472
Eugene Zelenko66203762017-01-21 00:53:49 +00001473bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1474 RegisterKind RegKind, unsigned Reg1,
1475 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001476 switch (RegKind) {
1477 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001478 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1479 Reg = AMDGPU::EXEC;
1480 RegWidth = 2;
1481 return true;
1482 }
1483 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1484 Reg = AMDGPU::FLAT_SCR;
1485 RegWidth = 2;
1486 return true;
1487 }
1488 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1489 Reg = AMDGPU::VCC;
1490 RegWidth = 2;
1491 return true;
1492 }
1493 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1494 Reg = AMDGPU::TBA;
1495 RegWidth = 2;
1496 return true;
1497 }
1498 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1499 Reg = AMDGPU::TMA;
1500 RegWidth = 2;
1501 return true;
1502 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001503 return false;
1504 case IS_VGPR:
1505 case IS_SGPR:
1506 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001507 if (Reg1 != Reg + RegWidth) {
1508 return false;
1509 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001510 RegWidth++;
1511 return true;
1512 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001513 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001514 }
1515}
1516
Eugene Zelenko66203762017-01-21 00:53:49 +00001517bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1518 unsigned &RegNum, unsigned &RegWidth,
1519 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001520 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001521 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1522 if (getLexer().is(AsmToken::Identifier)) {
1523 StringRef RegName = Parser.getTok().getString();
1524 if ((Reg = getSpecialRegForName(RegName))) {
1525 Parser.Lex();
1526 RegKind = IS_SPECIAL;
1527 } else {
1528 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001529 if (RegName[0] == 'v') {
1530 RegNumIndex = 1;
1531 RegKind = IS_VGPR;
1532 } else if (RegName[0] == 's') {
1533 RegNumIndex = 1;
1534 RegKind = IS_SGPR;
1535 } else if (RegName.startswith("ttmp")) {
1536 RegNumIndex = strlen("ttmp");
1537 RegKind = IS_TTMP;
1538 } else {
1539 return false;
1540 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001541 if (RegName.size() > RegNumIndex) {
1542 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001543 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1544 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001545 Parser.Lex();
1546 RegWidth = 1;
1547 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001548 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001549 Parser.Lex();
1550 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001551 if (getLexer().isNot(AsmToken::LBrac))
1552 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001553 Parser.Lex();
1554
Artem Tamazovf88397c2016-06-03 14:41:17 +00001555 if (getParser().parseAbsoluteExpression(RegLo))
1556 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001557
Artem Tamazov7da9b822016-05-27 12:50:13 +00001558 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001559 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1560 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001561 Parser.Lex();
1562
Artem Tamazov7da9b822016-05-27 12:50:13 +00001563 if (isRBrace) {
1564 RegHi = RegLo;
1565 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001566 if (getParser().parseAbsoluteExpression(RegHi))
1567 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001568
Artem Tamazovf88397c2016-06-03 14:41:17 +00001569 if (getLexer().isNot(AsmToken::RBrac))
1570 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001571 Parser.Lex();
1572 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001573 RegNum = (unsigned) RegLo;
1574 RegWidth = (RegHi - RegLo) + 1;
1575 }
1576 }
1577 } else if (getLexer().is(AsmToken::LBrac)) {
1578 // List of consecutive registers: [s0,s1,s2,s3]
1579 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001580 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001581 return false;
1582 if (RegWidth != 1)
1583 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001584 RegisterKind RegKind1;
1585 unsigned Reg1, RegNum1, RegWidth1;
1586 do {
1587 if (getLexer().is(AsmToken::Comma)) {
1588 Parser.Lex();
1589 } else if (getLexer().is(AsmToken::RBrac)) {
1590 Parser.Lex();
1591 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001592 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001593 if (RegWidth1 != 1) {
1594 return false;
1595 }
1596 if (RegKind1 != RegKind) {
1597 return false;
1598 }
1599 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1600 return false;
1601 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001602 } else {
1603 return false;
1604 }
1605 } while (true);
1606 } else {
1607 return false;
1608 }
1609 switch (RegKind) {
1610 case IS_SPECIAL:
1611 RegNum = 0;
1612 RegWidth = 1;
1613 break;
1614 case IS_VGPR:
1615 case IS_SGPR:
1616 case IS_TTMP:
1617 {
1618 unsigned Size = 1;
1619 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001620 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001621 Size = std::min(RegWidth, 4u);
1622 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001623 if (RegNum % Size != 0)
1624 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001625 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001626 RegNum = RegNum / Size;
1627 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001628 if (RCID == -1)
1629 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001630 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001631 if (RegNum >= RC.getNumRegs())
1632 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001633 Reg = RC.getRegister(RegNum);
1634 break;
1635 }
1636
1637 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001638 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001639 }
1640
Artem Tamazovf88397c2016-06-03 14:41:17 +00001641 if (!subtargetHasRegister(*TRI, Reg))
1642 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001643 return true;
1644}
1645
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001646std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001647 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001648 SMLoc StartLoc = Tok.getLoc();
1649 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001650 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001651 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001652
Artem Tamazova01cce82016-12-27 16:00:11 +00001653 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001654 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001655 }
Artem Tamazova01cce82016-12-27 16:00:11 +00001656 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001657 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001658}
1659
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001660bool
1661AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1662 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1663 (getLexer().getKind() == AsmToken::Integer ||
1664 getLexer().getKind() == AsmToken::Real)) {
1665
1666 // This is a workaround for handling operands like these:
1667 // |1.0|
1668 // |-1|
1669 // This syntax is not compatible with syntax of standard
1670 // MC expressions (due to the trailing '|').
1671
1672 SMLoc EndLoc;
1673 const MCExpr *Expr;
1674
1675 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1676 return true;
1677 }
1678
1679 return !Expr->evaluateAsAbsolute(Val);
1680 }
1681
1682 return getParser().parseAbsoluteExpression(Val);
1683}
1684
Alex Bradbury58eba092016-11-01 16:32:05 +00001685OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001686AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001687 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001688 bool Minus = false;
1689 if (getLexer().getKind() == AsmToken::Minus) {
1690 Minus = true;
1691 Parser.Lex();
1692 }
1693
1694 SMLoc S = Parser.getTok().getLoc();
1695 switch(getLexer().getKind()) {
1696 case AsmToken::Integer: {
1697 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001698 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001699 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001700 if (Minus)
1701 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001702 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001703 return MatchOperand_Success;
1704 }
1705 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001706 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001707 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001708 return MatchOperand_ParseFail;
1709
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001710 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001711 if (Minus)
1712 F.changeSign();
1713 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001714 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001715 AMDGPUOperand::ImmTyNone, true));
1716 return MatchOperand_Success;
1717 }
1718 default:
1719 return Minus ? MatchOperand_ParseFail : MatchOperand_NoMatch;
1720 }
1721}
1722
Alex Bradbury58eba092016-11-01 16:32:05 +00001723OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00001724AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001725 if (auto R = parseRegister()) {
1726 assert(R->isReg());
1727 R->Reg.IsForcedVOP3 = isForcedVOP3();
1728 Operands.push_back(std::move(R));
1729 return MatchOperand_Success;
1730 }
Sam Kolton9772eb32017-01-11 11:46:30 +00001731 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001732}
1733
Alex Bradbury58eba092016-11-01 16:32:05 +00001734OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001735AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1736 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00001737 if (res != MatchOperand_NoMatch) {
1738 return res;
1739 }
1740
1741 return parseReg(Operands);
1742}
1743
1744OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001745AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1746 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001747 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001748
1749 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001750 const AsmToken NextToken = getLexer().peekTok();
1751
1752 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1753 if (NextToken.is(AsmToken::Minus)) {
1754 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1755 return MatchOperand_ParseFail;
1756 }
1757
1758 // '-' followed by an integer literal N should be interpreted as integer
1759 // negation rather than a floating-point NEG modifier applied to N.
1760 // Beside being contr-intuitive, such use of floating-point NEG modifier
1761 // results in different meaning of integer literals used with VOP1/2/C
1762 // and VOP3, for example:
1763 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1764 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1765 // Negative fp literals should be handled likewise for unifomtity
1766 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1767 Parser.Lex();
1768 Negate = true;
1769 }
1770 }
1771
1772 if (getLexer().getKind() == AsmToken::Identifier &&
1773 Parser.getTok().getString() == "neg") {
1774 if (Negate) {
1775 Error(Parser.getTok().getLoc(), "expected register or immediate");
1776 return MatchOperand_ParseFail;
1777 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001778 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001779 Negate2 = true;
1780 if (getLexer().isNot(AsmToken::LParen)) {
1781 Error(Parser.getTok().getLoc(), "expected left paren after neg");
1782 return MatchOperand_ParseFail;
1783 }
1784 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00001785 }
1786
Eugene Zelenko66203762017-01-21 00:53:49 +00001787 if (getLexer().getKind() == AsmToken::Identifier &&
1788 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001789 Parser.Lex();
1790 Abs2 = true;
1791 if (getLexer().isNot(AsmToken::LParen)) {
1792 Error(Parser.getTok().getLoc(), "expected left paren after abs");
1793 return MatchOperand_ParseFail;
1794 }
1795 Parser.Lex();
1796 }
1797
1798 if (getLexer().getKind() == AsmToken::Pipe) {
1799 if (Abs2) {
1800 Error(Parser.getTok().getLoc(), "expected register or immediate");
1801 return MatchOperand_ParseFail;
1802 }
1803 Parser.Lex();
1804 Abs = true;
1805 }
1806
Sam Kolton9772eb32017-01-11 11:46:30 +00001807 OperandMatchResultTy Res;
1808 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001809 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00001810 } else {
1811 Res = parseReg(Operands);
1812 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001813 if (Res != MatchOperand_Success) {
1814 return Res;
1815 }
1816
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001817 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001818 if (Abs) {
1819 if (getLexer().getKind() != AsmToken::Pipe) {
1820 Error(Parser.getTok().getLoc(), "expected vertical bar");
1821 return MatchOperand_ParseFail;
1822 }
1823 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001824 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001825 }
1826 if (Abs2) {
1827 if (getLexer().isNot(AsmToken::RParen)) {
1828 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1829 return MatchOperand_ParseFail;
1830 }
1831 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00001832 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001833 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00001834
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001835 if (Negate) {
1836 Mods.Neg = true;
1837 } else if (Negate2) {
1838 if (getLexer().isNot(AsmToken::RParen)) {
1839 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1840 return MatchOperand_ParseFail;
1841 }
1842 Parser.Lex();
1843 Mods.Neg = true;
1844 }
1845
Sam Kolton945231a2016-06-10 09:57:59 +00001846 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001847 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001848 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001849 }
1850 return MatchOperand_Success;
1851}
1852
Alex Bradbury58eba092016-11-01 16:32:05 +00001853OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001854AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
1855 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00001856 bool Sext = false;
1857
Eugene Zelenko66203762017-01-21 00:53:49 +00001858 if (getLexer().getKind() == AsmToken::Identifier &&
1859 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00001860 Parser.Lex();
1861 Sext = true;
1862 if (getLexer().isNot(AsmToken::LParen)) {
1863 Error(Parser.getTok().getLoc(), "expected left paren after sext");
1864 return MatchOperand_ParseFail;
1865 }
1866 Parser.Lex();
1867 }
1868
Sam Kolton9772eb32017-01-11 11:46:30 +00001869 OperandMatchResultTy Res;
1870 if (AllowImm) {
1871 Res = parseRegOrImm(Operands);
1872 } else {
1873 Res = parseReg(Operands);
1874 }
Sam Kolton945231a2016-06-10 09:57:59 +00001875 if (Res != MatchOperand_Success) {
1876 return Res;
1877 }
1878
Matt Arsenaultb55f6202016-12-03 18:22:49 +00001879 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00001880 if (Sext) {
1881 if (getLexer().isNot(AsmToken::RParen)) {
1882 Error(Parser.getTok().getLoc(), "expected closing parentheses");
1883 return MatchOperand_ParseFail;
1884 }
1885 Parser.Lex();
1886 Mods.Sext = true;
1887 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00001888
Sam Kolton945231a2016-06-10 09:57:59 +00001889 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00001890 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00001891 Op.setModifiers(Mods);
1892 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001893
Sam Kolton945231a2016-06-10 09:57:59 +00001894 return MatchOperand_Success;
1895}
Sam Kolton1bdcef72016-05-23 09:59:02 +00001896
Sam Kolton9772eb32017-01-11 11:46:30 +00001897OperandMatchResultTy
1898AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
1899 return parseRegOrImmWithFPInputMods(Operands, false);
1900}
1901
1902OperandMatchResultTy
1903AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
1904 return parseRegOrImmWithIntInputMods(Operands, false);
1905}
1906
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001907OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
1908 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
1909 if (Reg) {
1910 Operands.push_back(std::move(Reg));
1911 return MatchOperand_Success;
1912 }
1913
1914 const AsmToken &Tok = Parser.getTok();
1915 if (Tok.getString() == "off") {
1916 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
1917 AMDGPUOperand::ImmTyOff, false));
1918 Parser.Lex();
1919 return MatchOperand_Success;
1920 }
1921
1922 return MatchOperand_NoMatch;
1923}
1924
Tom Stellard45bb48e2015-06-13 03:28:10 +00001925unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001926 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
1927
1928 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00001929 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
1930 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
1931 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00001932 return Match_InvalidOperand;
1933
Tom Stellard88e0b252015-10-06 15:57:53 +00001934 if ((TSFlags & SIInstrFlags::VOP3) &&
1935 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
1936 getForcedEncodingSize() != 64)
1937 return Match_PreferE32;
1938
Sam Koltona568e3d2016-12-22 12:57:41 +00001939 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
1940 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00001941 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00001942 auto OpNum =
1943 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00001944 const auto &Op = Inst.getOperand(OpNum);
1945 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
1946 return Match_InvalidOperand;
1947 }
1948 }
1949
Tom Stellard45bb48e2015-06-13 03:28:10 +00001950 return Match_Success;
1951}
1952
Matt Arsenault5f45e782017-01-09 18:44:11 +00001953// What asm variants we should check
1954ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
1955 if (getForcedEncodingSize() == 32) {
1956 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
1957 return makeArrayRef(Variants);
1958 }
1959
1960 if (isForcedVOP3()) {
1961 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
1962 return makeArrayRef(Variants);
1963 }
1964
1965 if (isForcedSDWA()) {
1966 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA};
1967 return makeArrayRef(Variants);
1968 }
1969
1970 if (isForcedDPP()) {
1971 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
1972 return makeArrayRef(Variants);
1973 }
1974
1975 static const unsigned Variants[] = {
1976 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
1977 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::DPP
1978 };
1979
1980 return makeArrayRef(Variants);
1981}
1982
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001983unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
1984 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
1985 const unsigned Num = Desc.getNumImplicitUses();
1986 for (unsigned i = 0; i < Num; ++i) {
1987 unsigned Reg = Desc.ImplicitUses[i];
1988 switch (Reg) {
1989 case AMDGPU::FLAT_SCR:
1990 case AMDGPU::VCC:
1991 case AMDGPU::M0:
1992 return Reg;
1993 default:
1994 break;
1995 }
1996 }
1997 return AMDGPU::NoRegister;
1998}
1999
2000bool AMDGPUAsmParser::isSGPR(unsigned Reg) {
2001 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2002 const MCRegisterClass SGPRClass = TRI->getRegClass(AMDGPU::SReg_32RegClassID);
2003 const unsigned FirstSubReg = TRI->getSubReg(Reg, 1);
2004 return SGPRClass.contains(FirstSubReg != 0 ? FirstSubReg : Reg) ||
2005 Reg == AMDGPU::SCC;
2006}
2007
2008// NB: This code is correct only when used to check constant
2009// bus limitations because GFX7 support no f16 inline constants.
2010// Note that there are no cases when a GFX7 opcode violates
2011// constant bus limitations due to the use of an f16 constant.
2012bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2013 unsigned OpIdx) const {
2014 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2015
2016 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2017 return false;
2018 }
2019
2020 const MCOperand &MO = Inst.getOperand(OpIdx);
2021
2022 int64_t Val = MO.getImm();
2023 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2024
2025 switch (OpSize) { // expected operand size
2026 case 8:
2027 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2028 case 4:
2029 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2030 case 2: {
2031 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2032 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2033 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2034 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2035 } else {
2036 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2037 }
2038 }
2039 default:
2040 llvm_unreachable("invalid operand size");
2041 }
2042}
2043
2044bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2045 const MCOperand &MO = Inst.getOperand(OpIdx);
2046 if (MO.isImm()) {
2047 return !isInlineConstant(Inst, OpIdx);
2048 }
2049 return !MO.isReg() || isSGPR(mc2PseudoReg(MO.getReg()));
2050}
2051
2052bool AMDGPUAsmParser::validateOperandLimitations(const MCInst &Inst) {
2053 const unsigned Opcode = Inst.getOpcode();
2054 const MCInstrDesc &Desc = MII.get(Opcode);
2055 unsigned ConstantBusUseCount = 0;
2056
2057 if (Desc.TSFlags &
2058 (SIInstrFlags::VOPC |
2059 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
2060 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P)) {
2061
2062 // Check special imm operands (used by madmk, etc)
2063 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2064 ++ConstantBusUseCount;
2065 }
2066
2067 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2068 if (SGPRUsed != AMDGPU::NoRegister) {
2069 ++ConstantBusUseCount;
2070 }
2071
2072 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2073 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2074 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2075
2076 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2077
2078 for (int OpIdx : OpIndices) {
2079 if (OpIdx == -1) break;
2080
2081 const MCOperand &MO = Inst.getOperand(OpIdx);
2082 if (usesConstantBus(Inst, OpIdx)) {
2083 if (MO.isReg()) {
2084 const unsigned Reg = mc2PseudoReg(MO.getReg());
2085 // Pairs of registers with a partial intersections like these
2086 // s0, s[0:1]
2087 // flat_scratch_lo, flat_scratch
2088 // flat_scratch_lo, flat_scratch_hi
2089 // are theoretically valid but they are disabled anyway.
2090 // Note that this code mimics SIInstrInfo::verifyInstruction
2091 if (Reg != SGPRUsed) {
2092 ++ConstantBusUseCount;
2093 }
2094 SGPRUsed = Reg;
2095 } else { // Expression or a literal
2096 ++ConstantBusUseCount;
2097 }
2098 }
2099 }
2100 }
2101
2102 return ConstantBusUseCount <= 1;
2103}
2104
Tom Stellard45bb48e2015-06-13 03:28:10 +00002105bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2106 OperandVector &Operands,
2107 MCStreamer &Out,
2108 uint64_t &ErrorInfo,
2109 bool MatchingInlineAsm) {
2110 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002111 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002112 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002113 uint64_t EI;
2114 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2115 Variant);
2116 // We order match statuses from least to most specific. We use most specific
2117 // status as resulting
2118 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2119 if ((R == Match_Success) ||
2120 (R == Match_PreferE32) ||
2121 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2122 (R == Match_InvalidOperand && Result != Match_MissingFeature
2123 && Result != Match_PreferE32) ||
2124 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2125 && Result != Match_MissingFeature
2126 && Result != Match_PreferE32)) {
2127 Result = R;
2128 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002129 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002130 if (R == Match_Success)
2131 break;
2132 }
2133
2134 switch (Result) {
2135 default: break;
2136 case Match_Success:
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002137 if (!validateOperandLimitations(Inst)) {
2138 return Error(IDLoc,
2139 "invalid operand (violates constant bus restrictions)");
2140 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002141 Inst.setLoc(IDLoc);
2142 Out.EmitInstruction(Inst, getSTI());
2143 return false;
2144
2145 case Match_MissingFeature:
2146 return Error(IDLoc, "instruction not supported on this GPU");
2147
2148 case Match_MnemonicFail:
2149 return Error(IDLoc, "unrecognized instruction mnemonic");
2150
2151 case Match_InvalidOperand: {
2152 SMLoc ErrorLoc = IDLoc;
2153 if (ErrorInfo != ~0ULL) {
2154 if (ErrorInfo >= Operands.size()) {
2155 return Error(IDLoc, "too few operands for instruction");
2156 }
2157 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2158 if (ErrorLoc == SMLoc())
2159 ErrorLoc = IDLoc;
2160 }
2161 return Error(ErrorLoc, "invalid operand for instruction");
2162 }
2163
2164 case Match_PreferE32:
2165 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2166 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002167 }
2168 llvm_unreachable("Implement any new match types added!");
2169}
2170
Artem Tamazov25478d82016-12-29 15:41:52 +00002171bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2172 int64_t Tmp = -1;
2173 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2174 return true;
2175 }
2176 if (getParser().parseAbsoluteExpression(Tmp)) {
2177 return true;
2178 }
2179 Ret = static_cast<uint32_t>(Tmp);
2180 return false;
2181}
2182
Tom Stellard347ac792015-06-26 21:15:07 +00002183bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2184 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002185 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002186 return TokError("invalid major version");
2187
Tom Stellard347ac792015-06-26 21:15:07 +00002188 if (getLexer().isNot(AsmToken::Comma))
2189 return TokError("minor version number required, comma expected");
2190 Lex();
2191
Artem Tamazov25478d82016-12-29 15:41:52 +00002192 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002193 return TokError("invalid minor version");
2194
Tom Stellard347ac792015-06-26 21:15:07 +00002195 return false;
2196}
2197
2198bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00002199 uint32_t Major;
2200 uint32_t Minor;
2201
2202 if (ParseDirectiveMajorMinor(Major, Minor))
2203 return true;
2204
2205 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2206 return false;
2207}
2208
2209bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00002210 uint32_t Major;
2211 uint32_t Minor;
2212 uint32_t Stepping;
2213 StringRef VendorName;
2214 StringRef ArchName;
2215
2216 // If this directive has no arguments, then use the ISA version for the
2217 // targeted GPU.
2218 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002219 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002220 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002221 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2222 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00002223 "AMD", "AMDGPU");
2224 return false;
2225 }
2226
Tom Stellard347ac792015-06-26 21:15:07 +00002227 if (ParseDirectiveMajorMinor(Major, Minor))
2228 return true;
2229
2230 if (getLexer().isNot(AsmToken::Comma))
2231 return TokError("stepping version number required, comma expected");
2232 Lex();
2233
Artem Tamazov25478d82016-12-29 15:41:52 +00002234 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00002235 return TokError("invalid stepping version");
2236
Tom Stellard347ac792015-06-26 21:15:07 +00002237 if (getLexer().isNot(AsmToken::Comma))
2238 return TokError("vendor name required, comma expected");
2239 Lex();
2240
2241 if (getLexer().isNot(AsmToken::String))
2242 return TokError("invalid vendor name");
2243
2244 VendorName = getLexer().getTok().getStringContents();
2245 Lex();
2246
2247 if (getLexer().isNot(AsmToken::Comma))
2248 return TokError("arch name required, comma expected");
2249 Lex();
2250
2251 if (getLexer().isNot(AsmToken::String))
2252 return TokError("invalid arch name");
2253
2254 ArchName = getLexer().getTok().getStringContents();
2255 Lex();
2256
2257 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
2258 VendorName, ArchName);
2259 return false;
2260}
2261
Sam Kolton69c8aa22016-12-19 11:43:15 +00002262bool AMDGPUAsmParser::ParseDirectiveRuntimeMetadata() {
2263 std::string Metadata;
2264 raw_string_ostream MS(Metadata);
2265
2266 getLexer().setSkipSpace(false);
2267
2268 bool FoundEnd = false;
2269 while (!getLexer().is(AsmToken::Eof)) {
2270 while (getLexer().is(AsmToken::Space)) {
2271 MS << ' ';
2272 Lex();
2273 }
2274
2275 if (getLexer().is(AsmToken::Identifier)) {
2276 StringRef ID = getLexer().getTok().getIdentifier();
2277 if (ID == ".end_amdgpu_runtime_metadata") {
2278 Lex();
2279 FoundEnd = true;
2280 break;
2281 }
2282 }
2283
2284 MS << Parser.parseStringToEndOfStatement()
2285 << getContext().getAsmInfo()->getSeparatorString();
2286
2287 Parser.eatToEndOfStatement();
2288 }
2289
2290 getLexer().setSkipSpace(true);
2291
2292 if (getLexer().is(AsmToken::Eof) && !FoundEnd)
2293 return TokError("expected directive .end_amdgpu_runtime_metadata not found");
2294
2295 MS.flush();
2296
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002297 if (getTargetStreamer().EmitRuntimeMetadata(getFeatureBits(), Metadata))
2298 return Error(getParser().getTok().getLoc(), "invalid runtime metadata");
Sam Kolton69c8aa22016-12-19 11:43:15 +00002299
2300 return false;
2301}
2302
Tom Stellardff7416b2015-06-26 21:58:31 +00002303bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
2304 amd_kernel_code_t &Header) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002305 SmallString<40> ErrStr;
2306 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00002307 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00002308 return TokError(Err.str());
2309 }
Tom Stellardff7416b2015-06-26 21:58:31 +00002310 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00002311 return false;
2312}
2313
2314bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00002315 amd_kernel_code_t Header;
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002316 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00002317
2318 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00002319 // Lex EndOfStatement. This is in a while loop, because lexing a comment
2320 // will set the current token to EndOfStatement.
2321 while(getLexer().is(AsmToken::EndOfStatement))
2322 Lex();
2323
2324 if (getLexer().isNot(AsmToken::Identifier))
2325 return TokError("expected value identifier or .end_amd_kernel_code_t");
2326
2327 StringRef ID = getLexer().getTok().getIdentifier();
2328 Lex();
2329
2330 if (ID == ".end_amd_kernel_code_t")
2331 break;
2332
2333 if (ParseAMDKernelCodeTValue(ID, Header))
2334 return true;
2335 }
2336
2337 getTargetStreamer().EmitAMDKernelCodeT(Header);
2338
2339 return false;
2340}
2341
Tom Stellarde135ffd2015-09-25 21:41:28 +00002342bool AMDGPUAsmParser::ParseSectionDirectiveHSAText() {
2343 getParser().getStreamer().SwitchSection(
2344 AMDGPU::getHSATextSection(getContext()));
2345 return false;
2346}
2347
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002348bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
2349 if (getLexer().isNot(AsmToken::Identifier))
2350 return TokError("expected symbol name");
2351
2352 StringRef KernelName = Parser.getTok().getString();
2353
2354 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
2355 ELF::STT_AMDGPU_HSA_KERNEL);
2356 Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00002357 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002358 return false;
2359}
2360
Tom Stellard00f2f912015-12-02 19:47:57 +00002361bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaModuleGlobal() {
2362 if (getLexer().isNot(AsmToken::Identifier))
2363 return TokError("expected symbol name");
2364
2365 StringRef GlobalName = Parser.getTok().getIdentifier();
2366
2367 getTargetStreamer().EmitAMDGPUHsaModuleScopeGlobal(GlobalName);
2368 Lex();
2369 return false;
2370}
2371
2372bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaProgramGlobal() {
2373 if (getLexer().isNot(AsmToken::Identifier))
2374 return TokError("expected symbol name");
2375
2376 StringRef GlobalName = Parser.getTok().getIdentifier();
2377
2378 getTargetStreamer().EmitAMDGPUHsaProgramScopeGlobal(GlobalName);
2379 Lex();
2380 return false;
2381}
2382
2383bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalAgent() {
2384 getParser().getStreamer().SwitchSection(
2385 AMDGPU::getHSADataGlobalAgentSection(getContext()));
2386 return false;
2387}
2388
2389bool AMDGPUAsmParser::ParseSectionDirectiveHSADataGlobalProgram() {
2390 getParser().getStreamer().SwitchSection(
2391 AMDGPU::getHSADataGlobalProgramSection(getContext()));
2392 return false;
2393}
2394
Tom Stellard9760f032015-12-03 03:34:32 +00002395bool AMDGPUAsmParser::ParseSectionDirectiveHSARodataReadonlyAgent() {
2396 getParser().getStreamer().SwitchSection(
2397 AMDGPU::getHSARodataReadonlyAgentSection(getContext()));
2398 return false;
2399}
2400
Tom Stellard45bb48e2015-06-13 03:28:10 +00002401bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00002402 StringRef IDVal = DirectiveID.getString();
2403
2404 if (IDVal == ".hsa_code_object_version")
2405 return ParseDirectiveHSACodeObjectVersion();
2406
2407 if (IDVal == ".hsa_code_object_isa")
2408 return ParseDirectiveHSACodeObjectISA();
2409
Sam Kolton69c8aa22016-12-19 11:43:15 +00002410 if (IDVal == ".amdgpu_runtime_metadata")
2411 return ParseDirectiveRuntimeMetadata();
2412
Tom Stellardff7416b2015-06-26 21:58:31 +00002413 if (IDVal == ".amd_kernel_code_t")
2414 return ParseDirectiveAMDKernelCodeT();
2415
Tom Stellardfcfaea42016-05-05 17:03:33 +00002416 if (IDVal == ".hsatext")
Tom Stellarde135ffd2015-09-25 21:41:28 +00002417 return ParseSectionDirectiveHSAText();
2418
Tom Stellard1e1b05d2015-11-06 11:45:14 +00002419 if (IDVal == ".amdgpu_hsa_kernel")
2420 return ParseDirectiveAMDGPUHsaKernel();
2421
Tom Stellard00f2f912015-12-02 19:47:57 +00002422 if (IDVal == ".amdgpu_hsa_module_global")
2423 return ParseDirectiveAMDGPUHsaModuleGlobal();
2424
2425 if (IDVal == ".amdgpu_hsa_program_global")
2426 return ParseDirectiveAMDGPUHsaProgramGlobal();
2427
2428 if (IDVal == ".hsadata_global_agent")
2429 return ParseSectionDirectiveHSADataGlobalAgent();
2430
2431 if (IDVal == ".hsadata_global_program")
2432 return ParseSectionDirectiveHSADataGlobalProgram();
2433
Tom Stellard9760f032015-12-03 03:34:32 +00002434 if (IDVal == ".hsarodata_readonly_agent")
2435 return ParseSectionDirectiveHSARodataReadonlyAgent();
2436
Tom Stellard45bb48e2015-06-13 03:28:10 +00002437 return true;
2438}
2439
Matt Arsenault68802d32015-11-05 03:11:27 +00002440bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
2441 unsigned RegNo) const {
Matt Arsenault3b159672015-12-01 20:31:08 +00002442 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00002443 return true;
2444
Matt Arsenault3b159672015-12-01 20:31:08 +00002445 if (isSI()) {
2446 // No flat_scr
2447 switch (RegNo) {
2448 case AMDGPU::FLAT_SCR:
2449 case AMDGPU::FLAT_SCR_LO:
2450 case AMDGPU::FLAT_SCR_HI:
2451 return false;
2452 default:
2453 return true;
2454 }
2455 }
2456
Matt Arsenault68802d32015-11-05 03:11:27 +00002457 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
2458 // SI/CI have.
2459 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
2460 R.isValid(); ++R) {
2461 if (*R == RegNo)
2462 return false;
2463 }
2464
2465 return true;
2466}
2467
Alex Bradbury58eba092016-11-01 16:32:05 +00002468OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002469AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002470 // Try to parse with a custom parser
2471 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
2472
2473 // If we successfully parsed the operand or if there as an error parsing,
2474 // we are done.
2475 //
2476 // If we are parsing after we reach EndOfStatement then this means we
2477 // are appending default values to the Operands list. This is only done
2478 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002479 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00002480 getLexer().is(AsmToken::EndOfStatement))
2481 return ResTy;
2482
Sam Kolton1bdcef72016-05-23 09:59:02 +00002483 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00002484
Sam Kolton1bdcef72016-05-23 09:59:02 +00002485 if (ResTy == MatchOperand_Success)
2486 return ResTy;
2487
2488 if (getLexer().getKind() == AsmToken::Identifier) {
Tom Stellard89049702016-06-15 02:54:14 +00002489 // If this identifier is a symbol, we want to create an expression for it.
2490 // It is a little difficult to distinguish between a symbol name, and
2491 // an instruction flag like 'gds'. In order to do this, we parse
2492 // all tokens as expressions and then treate the symbol name as the token
2493 // string when we want to interpret the operand as a token.
Sam Kolton1bdcef72016-05-23 09:59:02 +00002494 const auto &Tok = Parser.getTok();
Tom Stellard89049702016-06-15 02:54:14 +00002495 SMLoc S = Tok.getLoc();
2496 const MCExpr *Expr = nullptr;
2497 if (!Parser.parseExpression(Expr)) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002498 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
Tom Stellard89049702016-06-15 02:54:14 +00002499 return MatchOperand_Success;
2500 }
2501
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002502 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), Tok.getLoc()));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002503 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002504 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002505 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002506 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002507}
2508
Sam Kolton05ef1c92016-06-03 10:27:37 +00002509StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
2510 // Clear any forced encodings from the previous instruction.
2511 setForcedEncodingSize(0);
2512 setForcedDPP(false);
2513 setForcedSDWA(false);
2514
2515 if (Name.endswith("_e64")) {
2516 setForcedEncodingSize(64);
2517 return Name.substr(0, Name.size() - 4);
2518 } else if (Name.endswith("_e32")) {
2519 setForcedEncodingSize(32);
2520 return Name.substr(0, Name.size() - 4);
2521 } else if (Name.endswith("_dpp")) {
2522 setForcedDPP(true);
2523 return Name.substr(0, Name.size() - 4);
2524 } else if (Name.endswith("_sdwa")) {
2525 setForcedSDWA(true);
2526 return Name.substr(0, Name.size() - 5);
2527 }
2528 return Name;
2529}
2530
Tom Stellard45bb48e2015-06-13 03:28:10 +00002531bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
2532 StringRef Name,
2533 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002534 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00002535 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002536 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00002537
Tom Stellard45bb48e2015-06-13 03:28:10 +00002538 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00002539 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002540
2541 // Eat the comma or space if there is one.
2542 if (getLexer().is(AsmToken::Comma))
2543 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00002544
Tom Stellard45bb48e2015-06-13 03:28:10 +00002545 switch (Res) {
2546 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002547 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002548 Error(getLexer().getLoc(), "failed parsing operand.");
2549 while (!getLexer().is(AsmToken::EndOfStatement)) {
2550 Parser.Lex();
2551 }
2552 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00002553 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00002554 Error(getLexer().getLoc(), "not a valid operand.");
2555 while (!getLexer().is(AsmToken::EndOfStatement)) {
2556 Parser.Lex();
2557 }
2558 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002559 }
2560 }
2561
Tom Stellard45bb48e2015-06-13 03:28:10 +00002562 return false;
2563}
2564
2565//===----------------------------------------------------------------------===//
2566// Utility functions
2567//===----------------------------------------------------------------------===//
2568
Alex Bradbury58eba092016-11-01 16:32:05 +00002569OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00002570AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002571 switch(getLexer().getKind()) {
2572 default: return MatchOperand_NoMatch;
2573 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002574 StringRef Name = Parser.getTok().getString();
2575 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002576 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002577 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00002578
2579 Parser.Lex();
2580 if (getLexer().isNot(AsmToken::Colon))
2581 return MatchOperand_ParseFail;
2582
2583 Parser.Lex();
2584 if (getLexer().isNot(AsmToken::Integer))
2585 return MatchOperand_ParseFail;
2586
2587 if (getParser().parseAbsoluteExpression(Int))
2588 return MatchOperand_ParseFail;
2589 break;
2590 }
2591 }
2592 return MatchOperand_Success;
2593}
2594
Alex Bradbury58eba092016-11-01 16:32:05 +00002595OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002596AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002597 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002598 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002599 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002600 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002601
Alex Bradbury58eba092016-11-01 16:32:05 +00002602 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002603 if (Res != MatchOperand_Success)
2604 return Res;
2605
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002606 if (ConvertResult && !ConvertResult(Value)) {
2607 return MatchOperand_ParseFail;
2608 }
2609
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002610 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002611 return MatchOperand_Success;
2612}
2613
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002614OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
2615 const char *Prefix,
2616 OperandVector &Operands,
2617 AMDGPUOperand::ImmTy ImmTy,
2618 bool (*ConvertResult)(int64_t&)) {
2619 StringRef Name = Parser.getTok().getString();
2620 if (!Name.equals(Prefix))
2621 return MatchOperand_NoMatch;
2622
2623 Parser.Lex();
2624 if (getLexer().isNot(AsmToken::Colon))
2625 return MatchOperand_ParseFail;
2626
2627 Parser.Lex();
2628 if (getLexer().isNot(AsmToken::LBrac))
2629 return MatchOperand_ParseFail;
2630 Parser.Lex();
2631
2632 unsigned Val = 0;
2633 SMLoc S = Parser.getTok().getLoc();
2634
2635 // FIXME: How to verify the number of elements matches the number of src
2636 // operands?
2637 for (int I = 0; I < 3; ++I) {
2638 if (I != 0) {
2639 if (getLexer().is(AsmToken::RBrac))
2640 break;
2641
2642 if (getLexer().isNot(AsmToken::Comma))
2643 return MatchOperand_ParseFail;
2644 Parser.Lex();
2645 }
2646
2647 if (getLexer().isNot(AsmToken::Integer))
2648 return MatchOperand_ParseFail;
2649
2650 int64_t Op;
2651 if (getParser().parseAbsoluteExpression(Op))
2652 return MatchOperand_ParseFail;
2653
2654 if (Op != 0 && Op != 1)
2655 return MatchOperand_ParseFail;
2656 Val |= (Op << I);
2657 }
2658
2659 Parser.Lex();
2660 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
2661 return MatchOperand_Success;
2662}
2663
Alex Bradbury58eba092016-11-01 16:32:05 +00002664OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002665AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002666 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002667 int64_t Bit = 0;
2668 SMLoc S = Parser.getTok().getLoc();
2669
2670 // We are at the end of the statement, and this is a default argument, so
2671 // use a default value.
2672 if (getLexer().isNot(AsmToken::EndOfStatement)) {
2673 switch(getLexer().getKind()) {
2674 case AsmToken::Identifier: {
2675 StringRef Tok = Parser.getTok().getString();
2676 if (Tok == Name) {
2677 Bit = 1;
2678 Parser.Lex();
2679 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
2680 Bit = 0;
2681 Parser.Lex();
2682 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00002683 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002684 }
2685 break;
2686 }
2687 default:
2688 return MatchOperand_NoMatch;
2689 }
2690 }
2691
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002692 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002693 return MatchOperand_Success;
2694}
2695
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00002696static void addOptionalImmOperand(
2697 MCInst& Inst, const OperandVector& Operands,
2698 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
2699 AMDGPUOperand::ImmTy ImmT,
2700 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002701 auto i = OptionalIdx.find(ImmT);
2702 if (i != OptionalIdx.end()) {
2703 unsigned Idx = i->second;
2704 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
2705 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00002706 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002707 }
2708}
2709
Alex Bradbury58eba092016-11-01 16:32:05 +00002710OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00002711AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00002712 if (getLexer().isNot(AsmToken::Identifier)) {
2713 return MatchOperand_NoMatch;
2714 }
2715 StringRef Tok = Parser.getTok().getString();
2716 if (Tok != Prefix) {
2717 return MatchOperand_NoMatch;
2718 }
2719
2720 Parser.Lex();
2721 if (getLexer().isNot(AsmToken::Colon)) {
2722 return MatchOperand_ParseFail;
2723 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002724
Sam Kolton3025e7f2016-04-26 13:33:56 +00002725 Parser.Lex();
2726 if (getLexer().isNot(AsmToken::Identifier)) {
2727 return MatchOperand_ParseFail;
2728 }
2729
2730 Value = Parser.getTok().getString();
2731 return MatchOperand_Success;
2732}
2733
Tom Stellard45bb48e2015-06-13 03:28:10 +00002734//===----------------------------------------------------------------------===//
2735// ds
2736//===----------------------------------------------------------------------===//
2737
Tom Stellard45bb48e2015-06-13 03:28:10 +00002738void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
2739 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002740 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002741
2742 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2743 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2744
2745 // Add the register arguments
2746 if (Op.isReg()) {
2747 Op.addRegOperands(Inst, 1);
2748 continue;
2749 }
2750
2751 // Handle optional arguments
2752 OptionalIdx[Op.getImmTy()] = i;
2753 }
2754
Nikolay Haustov4f672a32016-04-29 09:02:30 +00002755 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
2756 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002757 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002758
Tom Stellard45bb48e2015-06-13 03:28:10 +00002759 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2760}
2761
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002762void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
2763 bool IsGdsHardcoded) {
2764 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002765
2766 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2767 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2768
2769 // Add the register arguments
2770 if (Op.isReg()) {
2771 Op.addRegOperands(Inst, 1);
2772 continue;
2773 }
2774
2775 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00002776 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002777 continue;
2778 }
2779
2780 // Handle optional arguments
2781 OptionalIdx[Op.getImmTy()] = i;
2782 }
2783
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002784 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov43b61562017-02-03 12:47:30 +00002785 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00002786 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002787 }
2788 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
2789}
2790
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002791void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
2792 OptionalImmIndexMap OptionalIdx;
2793
2794 unsigned EnMask = 0;
2795 int SrcIdx = 0;
2796
2797 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
2798 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
2799
2800 // Add the register arguments
2801 if (Op.isReg()) {
2802 EnMask |= (1 << SrcIdx);
2803 Op.addRegOperands(Inst, 1);
2804 ++SrcIdx;
2805 continue;
2806 }
2807
2808 if (Op.isOff()) {
2809 ++SrcIdx;
2810 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
2811 continue;
2812 }
2813
2814 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
2815 Op.addImmOperands(Inst, 1);
2816 continue;
2817 }
2818
2819 if (Op.isToken() && Op.getToken() == "done")
2820 continue;
2821
2822 // Handle optional arguments
2823 OptionalIdx[Op.getImmTy()] = i;
2824 }
2825
2826 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
2827 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
2828
2829 Inst.addOperand(MCOperand::createImm(EnMask));
2830}
Tom Stellard45bb48e2015-06-13 03:28:10 +00002831
2832//===----------------------------------------------------------------------===//
2833// s_waitcnt
2834//===----------------------------------------------------------------------===//
2835
2836bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
2837 StringRef CntName = Parser.getTok().getString();
2838 int64_t CntVal;
2839
2840 Parser.Lex();
2841 if (getLexer().isNot(AsmToken::LParen))
2842 return true;
2843
2844 Parser.Lex();
2845 if (getLexer().isNot(AsmToken::Integer))
2846 return true;
2847
2848 if (getParser().parseAbsoluteExpression(CntVal))
2849 return true;
2850
2851 if (getLexer().isNot(AsmToken::RParen))
2852 return true;
2853
2854 Parser.Lex();
2855 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma))
2856 Parser.Lex();
2857
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002858 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002859 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002860 if (CntName == "vmcnt")
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002861 IntVal = encodeVmcnt(ISA, IntVal, CntVal);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002862 else if (CntName == "expcnt")
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002863 IntVal = encodeExpcnt(ISA, IntVal, CntVal);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002864 else if (CntName == "lgkmcnt")
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002865 IntVal = encodeLgkmcnt(ISA, IntVal, CntVal);
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002866 else
Tom Stellard45bb48e2015-06-13 03:28:10 +00002867 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002868
Tom Stellard45bb48e2015-06-13 03:28:10 +00002869 return false;
2870}
2871
Alex Bradbury58eba092016-11-01 16:32:05 +00002872OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00002873AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002874 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002875 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002876 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00002877 SMLoc S = Parser.getTok().getLoc();
2878
2879 switch(getLexer().getKind()) {
2880 default: return MatchOperand_ParseFail;
2881 case AsmToken::Integer:
2882 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002883 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002884 return MatchOperand_ParseFail;
2885 break;
2886
2887 case AsmToken::Identifier:
2888 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002889 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00002890 return MatchOperand_ParseFail;
2891 } while(getLexer().isNot(AsmToken::EndOfStatement));
2892 break;
2893 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00002894 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00002895 return MatchOperand_Success;
2896}
2897
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002898bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
2899 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002900 using namespace llvm::AMDGPU::Hwreg;
2901
Artem Tamazovd6468662016-04-25 14:13:51 +00002902 if (Parser.getTok().getString() != "hwreg")
2903 return true;
2904 Parser.Lex();
2905
2906 if (getLexer().isNot(AsmToken::LParen))
2907 return true;
2908 Parser.Lex();
2909
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002910 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002911 HwReg.IsSymbolic = true;
2912 HwReg.Id = ID_UNKNOWN_;
2913 const StringRef tok = Parser.getTok().getString();
2914 for (int i = ID_SYMBOLIC_FIRST_; i < ID_SYMBOLIC_LAST_; ++i) {
2915 if (tok == IdSymbolic[i]) {
2916 HwReg.Id = i;
2917 break;
2918 }
2919 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002920 Parser.Lex();
2921 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002922 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002923 if (getLexer().isNot(AsmToken::Integer))
2924 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002925 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002926 return true;
2927 }
Artem Tamazovd6468662016-04-25 14:13:51 +00002928
2929 if (getLexer().is(AsmToken::RParen)) {
2930 Parser.Lex();
2931 return false;
2932 }
2933
2934 // optional params
2935 if (getLexer().isNot(AsmToken::Comma))
2936 return true;
2937 Parser.Lex();
2938
2939 if (getLexer().isNot(AsmToken::Integer))
2940 return true;
2941 if (getParser().parseAbsoluteExpression(Offset))
2942 return true;
2943
2944 if (getLexer().isNot(AsmToken::Comma))
2945 return true;
2946 Parser.Lex();
2947
2948 if (getLexer().isNot(AsmToken::Integer))
2949 return true;
2950 if (getParser().parseAbsoluteExpression(Width))
2951 return true;
2952
2953 if (getLexer().isNot(AsmToken::RParen))
2954 return true;
2955 Parser.Lex();
2956
2957 return false;
2958}
2959
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00002960OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002961 using namespace llvm::AMDGPU::Hwreg;
2962
Artem Tamazovd6468662016-04-25 14:13:51 +00002963 int64_t Imm16Val = 0;
2964 SMLoc S = Parser.getTok().getLoc();
2965
2966 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00002967 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00002968 case AsmToken::Integer:
2969 // The operand can be an integer value.
2970 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00002971 return MatchOperand_NoMatch;
2972 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00002973 Error(S, "invalid immediate: only 16-bit values are legal");
2974 // Do not return error code, but create an imm operand anyway and proceed
2975 // to the next operand, if any. That avoids unneccessary error messages.
2976 }
2977 break;
2978
2979 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00002980 OperandInfoTy HwReg(ID_UNKNOWN_);
2981 int64_t Offset = OFFSET_DEFAULT_;
2982 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
2983 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00002984 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00002985 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
2986 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00002987 Error(S, "invalid symbolic name of hardware register");
2988 else
2989 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00002990 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00002991 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00002992 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002993 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00002994 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00002995 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00002996 }
2997 break;
2998 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00002999 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00003000 return MatchOperand_Success;
3001}
3002
Tom Stellard45bb48e2015-06-13 03:28:10 +00003003bool AMDGPUOperand::isSWaitCnt() const {
3004 return isImm();
3005}
3006
Artem Tamazovd6468662016-04-25 14:13:51 +00003007bool AMDGPUOperand::isHwreg() const {
3008 return isImmTy(ImmTyHwreg);
3009}
3010
Artem Tamazov6edc1352016-05-26 17:00:33 +00003011bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003012 using namespace llvm::AMDGPU::SendMsg;
3013
3014 if (Parser.getTok().getString() != "sendmsg")
3015 return true;
3016 Parser.Lex();
3017
3018 if (getLexer().isNot(AsmToken::LParen))
3019 return true;
3020 Parser.Lex();
3021
3022 if (getLexer().is(AsmToken::Identifier)) {
3023 Msg.IsSymbolic = true;
3024 Msg.Id = ID_UNKNOWN_;
3025 const std::string tok = Parser.getTok().getString();
3026 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3027 switch(i) {
3028 default: continue; // Omit gaps.
3029 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3030 }
3031 if (tok == IdSymbolic[i]) {
3032 Msg.Id = i;
3033 break;
3034 }
3035 }
3036 Parser.Lex();
3037 } else {
3038 Msg.IsSymbolic = false;
3039 if (getLexer().isNot(AsmToken::Integer))
3040 return true;
3041 if (getParser().parseAbsoluteExpression(Msg.Id))
3042 return true;
3043 if (getLexer().is(AsmToken::Integer))
3044 if (getParser().parseAbsoluteExpression(Msg.Id))
3045 Msg.Id = ID_UNKNOWN_;
3046 }
3047 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3048 return false;
3049
3050 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3051 if (getLexer().isNot(AsmToken::RParen))
3052 return true;
3053 Parser.Lex();
3054 return false;
3055 }
3056
3057 if (getLexer().isNot(AsmToken::Comma))
3058 return true;
3059 Parser.Lex();
3060
3061 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
3062 Operation.Id = ID_UNKNOWN_;
3063 if (getLexer().is(AsmToken::Identifier)) {
3064 Operation.IsSymbolic = true;
3065 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3066 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3067 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003068 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003069 for (int i = F; i < L; ++i) {
3070 if (Tok == S[i]) {
3071 Operation.Id = i;
3072 break;
3073 }
3074 }
3075 Parser.Lex();
3076 } else {
3077 Operation.IsSymbolic = false;
3078 if (getLexer().isNot(AsmToken::Integer))
3079 return true;
3080 if (getParser().parseAbsoluteExpression(Operation.Id))
3081 return true;
3082 }
3083
3084 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3085 // Stream id is optional.
3086 if (getLexer().is(AsmToken::RParen)) {
3087 Parser.Lex();
3088 return false;
3089 }
3090
3091 if (getLexer().isNot(AsmToken::Comma))
3092 return true;
3093 Parser.Lex();
3094
3095 if (getLexer().isNot(AsmToken::Integer))
3096 return true;
3097 if (getParser().parseAbsoluteExpression(StreamId))
3098 return true;
3099 }
3100
3101 if (getLexer().isNot(AsmToken::RParen))
3102 return true;
3103 Parser.Lex();
3104 return false;
3105}
3106
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003107OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3108 if (getLexer().getKind() != AsmToken::Identifier)
3109 return MatchOperand_NoMatch;
3110
3111 StringRef Str = Parser.getTok().getString();
3112 int Slot = StringSwitch<int>(Str)
3113 .Case("p10", 0)
3114 .Case("p20", 1)
3115 .Case("p0", 2)
3116 .Default(-1);
3117
3118 SMLoc S = Parser.getTok().getLoc();
3119 if (Slot == -1)
3120 return MatchOperand_ParseFail;
3121
3122 Parser.Lex();
3123 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3124 AMDGPUOperand::ImmTyInterpSlot));
3125 return MatchOperand_Success;
3126}
3127
3128OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3129 if (getLexer().getKind() != AsmToken::Identifier)
3130 return MatchOperand_NoMatch;
3131
3132 StringRef Str = Parser.getTok().getString();
3133 if (!Str.startswith("attr"))
3134 return MatchOperand_NoMatch;
3135
3136 StringRef Chan = Str.take_back(2);
3137 int AttrChan = StringSwitch<int>(Chan)
3138 .Case(".x", 0)
3139 .Case(".y", 1)
3140 .Case(".z", 2)
3141 .Case(".w", 3)
3142 .Default(-1);
3143 if (AttrChan == -1)
3144 return MatchOperand_ParseFail;
3145
3146 Str = Str.drop_back(2).drop_front(4);
3147
3148 uint8_t Attr;
3149 if (Str.getAsInteger(10, Attr))
3150 return MatchOperand_ParseFail;
3151
3152 SMLoc S = Parser.getTok().getLoc();
3153 Parser.Lex();
3154 if (Attr > 63) {
3155 Error(S, "out of bounds attr");
3156 return MatchOperand_Success;
3157 }
3158
3159 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
3160
3161 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
3162 AMDGPUOperand::ImmTyInterpAttr));
3163 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
3164 AMDGPUOperand::ImmTyAttrChan));
3165 return MatchOperand_Success;
3166}
3167
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003168void AMDGPUAsmParser::errorExpTgt() {
3169 Error(Parser.getTok().getLoc(), "invalid exp target");
3170}
3171
3172OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
3173 uint8_t &Val) {
3174 if (Str == "null") {
3175 Val = 9;
3176 return MatchOperand_Success;
3177 }
3178
3179 if (Str.startswith("mrt")) {
3180 Str = Str.drop_front(3);
3181 if (Str == "z") { // == mrtz
3182 Val = 8;
3183 return MatchOperand_Success;
3184 }
3185
3186 if (Str.getAsInteger(10, Val))
3187 return MatchOperand_ParseFail;
3188
3189 if (Val > 7)
3190 errorExpTgt();
3191
3192 return MatchOperand_Success;
3193 }
3194
3195 if (Str.startswith("pos")) {
3196 Str = Str.drop_front(3);
3197 if (Str.getAsInteger(10, Val))
3198 return MatchOperand_ParseFail;
3199
3200 if (Val > 3)
3201 errorExpTgt();
3202
3203 Val += 12;
3204 return MatchOperand_Success;
3205 }
3206
3207 if (Str.startswith("param")) {
3208 Str = Str.drop_front(5);
3209 if (Str.getAsInteger(10, Val))
3210 return MatchOperand_ParseFail;
3211
3212 if (Val >= 32)
3213 errorExpTgt();
3214
3215 Val += 32;
3216 return MatchOperand_Success;
3217 }
3218
3219 if (Str.startswith("invalid_target_")) {
3220 Str = Str.drop_front(15);
3221 if (Str.getAsInteger(10, Val))
3222 return MatchOperand_ParseFail;
3223
3224 errorExpTgt();
3225 return MatchOperand_Success;
3226 }
3227
3228 return MatchOperand_NoMatch;
3229}
3230
3231OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
3232 uint8_t Val;
3233 StringRef Str = Parser.getTok().getString();
3234
3235 auto Res = parseExpTgtImpl(Str, Val);
3236 if (Res != MatchOperand_Success)
3237 return Res;
3238
3239 SMLoc S = Parser.getTok().getLoc();
3240 Parser.Lex();
3241
3242 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
3243 AMDGPUOperand::ImmTyExpTgt));
3244 return MatchOperand_Success;
3245}
3246
Alex Bradbury58eba092016-11-01 16:32:05 +00003247OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003248AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
3249 using namespace llvm::AMDGPU::SendMsg;
3250
3251 int64_t Imm16Val = 0;
3252 SMLoc S = Parser.getTok().getLoc();
3253
3254 switch(getLexer().getKind()) {
3255 default:
3256 return MatchOperand_NoMatch;
3257 case AsmToken::Integer:
3258 // The operand can be an integer value.
3259 if (getParser().parseAbsoluteExpression(Imm16Val))
3260 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003261 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003262 Error(S, "invalid immediate: only 16-bit values are legal");
3263 // Do not return error code, but create an imm operand anyway and proceed
3264 // to the next operand, if any. That avoids unneccessary error messages.
3265 }
3266 break;
3267 case AsmToken::Identifier: {
3268 OperandInfoTy Msg(ID_UNKNOWN_);
3269 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00003270 int64_t StreamId = STREAM_ID_DEFAULT_;
3271 if (parseSendMsgConstruct(Msg, Operation, StreamId))
3272 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003273 do {
3274 // Validate and encode message ID.
3275 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
3276 || Msg.Id == ID_SYSMSG)) {
3277 if (Msg.IsSymbolic)
3278 Error(S, "invalid/unsupported symbolic name of message");
3279 else
3280 Error(S, "invalid/unsupported code of message");
3281 break;
3282 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003283 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003284 // Validate and encode operation ID.
3285 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
3286 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
3287 if (Operation.IsSymbolic)
3288 Error(S, "invalid symbolic name of GS_OP");
3289 else
3290 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
3291 break;
3292 }
3293 if (Operation.Id == OP_GS_NOP
3294 && Msg.Id != ID_GS_DONE) {
3295 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
3296 break;
3297 }
3298 Imm16Val |= (Operation.Id << OP_SHIFT_);
3299 }
3300 if (Msg.Id == ID_SYSMSG) {
3301 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
3302 if (Operation.IsSymbolic)
3303 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
3304 else
3305 Error(S, "invalid/unsupported code of SYSMSG_OP");
3306 break;
3307 }
3308 Imm16Val |= (Operation.Id << OP_SHIFT_);
3309 }
3310 // Validate and encode stream ID.
3311 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3312 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
3313 Error(S, "invalid stream id: only 2-bit values are legal");
3314 break;
3315 }
3316 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
3317 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00003318 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003319 }
3320 break;
3321 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003322 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003323 return MatchOperand_Success;
3324}
3325
3326bool AMDGPUOperand::isSendMsg() const {
3327 return isImmTy(ImmTySendMsg);
3328}
3329
Tom Stellard45bb48e2015-06-13 03:28:10 +00003330//===----------------------------------------------------------------------===//
3331// sopp branch targets
3332//===----------------------------------------------------------------------===//
3333
Alex Bradbury58eba092016-11-01 16:32:05 +00003334OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003335AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
3336 SMLoc S = Parser.getTok().getLoc();
3337
3338 switch (getLexer().getKind()) {
3339 default: return MatchOperand_ParseFail;
3340 case AsmToken::Integer: {
3341 int64_t Imm;
3342 if (getParser().parseAbsoluteExpression(Imm))
3343 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003344 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003345 return MatchOperand_Success;
3346 }
3347
3348 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003349 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00003350 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
3351 Parser.getTok().getString()), getContext()), S));
3352 Parser.Lex();
3353 return MatchOperand_Success;
3354 }
3355}
3356
3357//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003358// mubuf
3359//===----------------------------------------------------------------------===//
3360
Sam Kolton5f10a132016-05-06 11:31:17 +00003361AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003362 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00003363}
3364
3365AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003366 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00003367}
3368
3369AMDGPUOperand::Ptr AMDGPUAsmParser::defaultTFE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003370 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyTFE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003371}
3372
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003373void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
3374 const OperandVector &Operands,
3375 bool IsAtomic, bool IsAtomicReturn) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003376 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003377 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003378
3379 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3380 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3381
3382 // Add the register arguments
3383 if (Op.isReg()) {
3384 Op.addRegOperands(Inst, 1);
3385 continue;
3386 }
3387
3388 // Handle the case where soffset is an immediate
3389 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
3390 Op.addImmOperands(Inst, 1);
3391 continue;
3392 }
3393
3394 // Handle tokens like 'offen' which are sometimes hard-coded into the
3395 // asm string. There are no MCInst operands for these.
3396 if (Op.isToken()) {
3397 continue;
3398 }
3399 assert(Op.isImm());
3400
3401 // Handle optional arguments
3402 OptionalIdx[Op.getImmTy()] = i;
3403 }
3404
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003405 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
3406 if (IsAtomicReturn) {
3407 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
3408 Inst.insert(I, *I);
3409 }
3410
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003411 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00003412 if (!IsAtomic) { // glc is hard-coded.
3413 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3414 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003415 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3416 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003417}
3418
3419//===----------------------------------------------------------------------===//
3420// mimg
3421//===----------------------------------------------------------------------===//
3422
Sam Kolton1bdcef72016-05-23 09:59:02 +00003423void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands) {
3424 unsigned I = 1;
3425 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3426 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3427 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3428 }
3429
3430 OptionalImmIndexMap OptionalIdx;
3431
3432 for (unsigned E = Operands.size(); I != E; ++I) {
3433 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3434
3435 // Add the register arguments
3436 if (Op.isRegOrImm()) {
3437 Op.addRegOrImmOperands(Inst, 1);
3438 continue;
3439 } else if (Op.isImmModifier()) {
3440 OptionalIdx[Op.getImmTy()] = I;
3441 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003442 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00003443 }
3444 }
3445
3446 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
3447 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
3448 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3449 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
3450 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
3451 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3452 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
3453 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3454}
3455
3456void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
3457 unsigned I = 1;
3458 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3459 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3460 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3461 }
3462
3463 // Add src, same as dst
3464 ((AMDGPUOperand &)*Operands[I]).addRegOperands(Inst, 1);
3465
3466 OptionalImmIndexMap OptionalIdx;
3467
3468 for (unsigned E = Operands.size(); I != E; ++I) {
3469 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3470
3471 // Add the register arguments
3472 if (Op.isRegOrImm()) {
3473 Op.addRegOrImmOperands(Inst, 1);
3474 continue;
3475 } else if (Op.isImmModifier()) {
3476 OptionalIdx[Op.getImmTy()] = I;
3477 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003478 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00003479 }
3480 }
3481
3482 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
3483 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
3484 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
3485 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
3486 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
3487 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
3488 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
3489 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
3490}
3491
Sam Kolton5f10a132016-05-06 11:31:17 +00003492AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003493 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDMask);
Sam Kolton5f10a132016-05-06 11:31:17 +00003494}
3495
3496AMDGPUOperand::Ptr AMDGPUAsmParser::defaultUNorm() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003497 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyUNorm);
Sam Kolton5f10a132016-05-06 11:31:17 +00003498}
3499
3500AMDGPUOperand::Ptr AMDGPUAsmParser::defaultDA() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003501 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDA);
Sam Kolton5f10a132016-05-06 11:31:17 +00003502}
3503
3504AMDGPUOperand::Ptr AMDGPUAsmParser::defaultR128() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003505 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyR128);
Sam Kolton5f10a132016-05-06 11:31:17 +00003506}
3507
3508AMDGPUOperand::Ptr AMDGPUAsmParser::defaultLWE() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003509 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyLWE);
Sam Kolton5f10a132016-05-06 11:31:17 +00003510}
3511
Tom Stellard45bb48e2015-06-13 03:28:10 +00003512//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00003513// smrd
3514//===----------------------------------------------------------------------===//
3515
Artem Tamazov54bfd542016-10-31 16:07:39 +00003516bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00003517 return isImm() && isUInt<8>(getImm());
3518}
3519
Artem Tamazov54bfd542016-10-31 16:07:39 +00003520bool AMDGPUOperand::isSMRDOffset20() const {
3521 return isImm() && isUInt<20>(getImm());
3522}
3523
Tom Stellard217361c2015-08-06 19:28:38 +00003524bool AMDGPUOperand::isSMRDLiteralOffset() const {
3525 // 32-bit literals are only supported on CI and we only want to use them
3526 // when the offset is > 8-bits.
3527 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
3528}
3529
Artem Tamazov54bfd542016-10-31 16:07:39 +00003530AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
3531 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
3532}
3533
3534AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003535 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003536}
3537
3538AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003539 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00003540}
3541
Tom Stellard217361c2015-08-06 19:28:38 +00003542//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00003543// vop3
3544//===----------------------------------------------------------------------===//
3545
3546static bool ConvertOmodMul(int64_t &Mul) {
3547 if (Mul != 1 && Mul != 2 && Mul != 4)
3548 return false;
3549
3550 Mul >>= 1;
3551 return true;
3552}
3553
3554static bool ConvertOmodDiv(int64_t &Div) {
3555 if (Div == 1) {
3556 Div = 0;
3557 return true;
3558 }
3559
3560 if (Div == 2) {
3561 Div = 3;
3562 return true;
3563 }
3564
3565 return false;
3566}
3567
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003568static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
3569 if (BoundCtrl == 0) {
3570 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003571 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00003572 }
3573
3574 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003575 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003576 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003577 }
Matt Arsenault12c53892016-11-15 19:58:54 +00003578
Tom Stellard45bb48e2015-06-13 03:28:10 +00003579 return false;
3580}
3581
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003582// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00003583static const OptionalOperand AMDGPUOptionalOperandTable[] = {
3584 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
3585 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
3586 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
3587 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
3588 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
3589 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
3590 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
3591 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
3592 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
3593 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
3594 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
3595 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
3596 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
3597 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
3598 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
3599 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
3600 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
3601 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
3602 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
3603 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00003604 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
3605 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
3606 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00003607 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003608 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003609 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
3610 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
3611 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
3612 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003613};
Tom Stellard45bb48e2015-06-13 03:28:10 +00003614
Alex Bradbury58eba092016-11-01 16:32:05 +00003615OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00003616 OperandMatchResultTy res;
3617 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
3618 // try to parse any optional operand here
3619 if (Op.IsBit) {
3620 res = parseNamedBit(Op.Name, Operands, Op.Type);
3621 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
3622 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00003623 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
3624 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
3625 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
3626 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00003627 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
3628 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003629 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
3630 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
3631 Op.Type == AMDGPUOperand::ImmTyNegLo ||
3632 Op.Type == AMDGPUOperand::ImmTyNegHi) {
3633 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
3634 Op.ConvertResult);
Sam Kolton11de3702016-05-24 12:38:33 +00003635 } else {
3636 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
3637 }
3638 if (res != MatchOperand_NoMatch) {
3639 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003640 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003641 }
3642 return MatchOperand_NoMatch;
3643}
3644
Matt Arsenault12c53892016-11-15 19:58:54 +00003645OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003646 StringRef Name = Parser.getTok().getString();
3647 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00003648 return parseIntWithPrefix("mul", Operands,
3649 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003650 }
Matt Arsenault12c53892016-11-15 19:58:54 +00003651
3652 if (Name == "div") {
3653 return parseIntWithPrefix("div", Operands,
3654 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
3655 }
3656
3657 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003658}
3659
Tom Stellarda90b9522016-02-11 03:28:15 +00003660void AMDGPUAsmParser::cvtId(MCInst &Inst, const OperandVector &Operands) {
3661 unsigned I = 1;
Tom Stellard88e0b252015-10-06 15:57:53 +00003662 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00003663 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003664 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3665 }
3666 for (unsigned E = Operands.size(); I != E; ++I)
3667 ((AMDGPUOperand &)*Operands[I]).addRegOrImmOperands(Inst, 1);
3668}
3669
3670void AMDGPUAsmParser::cvtVOP3_2_mod(MCInst &Inst, const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003671 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
3672 if (TSFlags & SIInstrFlags::VOP3) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003673 cvtVOP3(Inst, Operands);
3674 } else {
3675 cvtId(Inst, Operands);
3676 }
3677}
3678
Sam Koltona3ec5c12016-10-07 14:46:06 +00003679static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
3680 // 1. This operand is input modifiers
3681 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
3682 // 2. This is not last operand
3683 && Desc.NumOperands > (OpNum + 1)
3684 // 3. Next operand is register class
3685 && Desc.OpInfo[OpNum + 1].RegClass != -1
3686 // 4. Next register is not tied to any other operand
3687 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
3688}
3689
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003690void AMDGPUAsmParser::cvtVOP3Impl(MCInst &Inst, const OperandVector &Operands,
3691 OptionalImmIndexMap &OptionalIdx) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003692 unsigned I = 1;
3693 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00003694 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00003695 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00003696 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003697
Tom Stellarda90b9522016-02-11 03:28:15 +00003698 for (unsigned E = Operands.size(); I != E; ++I) {
3699 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003700 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton945231a2016-06-10 09:57:59 +00003701 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
Nikolay Haustovea8febd2016-03-01 08:34:43 +00003702 } else if (Op.isImm()) {
3703 OptionalIdx[Op.getImmTy()] = I;
Tom Stellarda90b9522016-02-11 03:28:15 +00003704 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00003705 llvm_unreachable("unhandled operand type");
Tom Stellard45bb48e2015-06-13 03:28:10 +00003706 }
Tom Stellarda90b9522016-02-11 03:28:15 +00003707 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003708}
3709
3710void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
3711 OptionalImmIndexMap OptionalIdx;
3712
3713 cvtVOP3Impl(Inst, Operands, OptionalIdx);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003714
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003715 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
3716 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003717
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003718 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003719 // it has src2 register operand that is tied to dst operand
3720 // we don't allow modifiers for this operand in assembler so src2_modifiers
3721 // should be 0
3722 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_si ||
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003723 Inst.getOpcode() == AMDGPU::V_MAC_F32_e64_vi ||
3724 Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003725 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003726 std::advance(
3727 it,
3728 AMDGPU::getNamedOperandIdx(Inst.getOpcode() == AMDGPU::V_MAC_F16_e64_vi ?
3729 AMDGPU::V_MAC_F16_e64 :
3730 AMDGPU::V_MAC_F32_e64,
3731 AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003732 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
3733 ++it;
3734 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
3735 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003736}
3737
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003738void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst, const OperandVector &Operands) {
3739 OptionalImmIndexMap OptIdx;
3740
3741 cvtVOP3Impl(Inst, Operands, OptIdx);
3742
3743 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
3744 // instruction, and then figure out where to actually put the modifiers
3745 int Opc = Inst.getOpcode();
3746
3747 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
3748 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyClampSI);
3749 }
3750
3751 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
3752 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi, -1);
3753
3754 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
3755 if (NegLoIdx != -1) {
3756 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
3757 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
3758 }
3759
3760 const int Ops[] = { AMDGPU::OpName::src0,
3761 AMDGPU::OpName::src1,
3762 AMDGPU::OpName::src2 };
3763 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
3764 AMDGPU::OpName::src1_modifiers,
3765 AMDGPU::OpName::src2_modifiers };
3766
3767 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
3768 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
3769
3770 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
3771 unsigned OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
3772 unsigned NegLo = 0;
3773 unsigned NegHi = 0;
3774
3775 if (NegLoIdx != -1) {
3776 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
3777 NegLo = Inst.getOperand(NegLoIdx).getImm();
3778 NegHi = Inst.getOperand(NegHiIdx).getImm();
3779 }
3780
3781 for (int J = 0; J < 3; ++J) {
3782 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
3783 if (OpIdx == -1)
3784 break;
3785
3786 uint32_t ModVal = 0;
3787
3788 if ((OpSel & (1 << J)) != 0)
3789 ModVal |= SISrcMods::OP_SEL_0;
3790
3791 if ((OpSelHi & (1 << J)) != 0)
3792 ModVal |= SISrcMods::OP_SEL_1;
3793
3794 if ((NegLo & (1 << J)) != 0)
3795 ModVal |= SISrcMods::NEG;
3796
3797 if ((NegHi & (1 << J)) != 0)
3798 ModVal |= SISrcMods::NEG_HI;
3799
3800 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
3801
3802 Inst.getOperand(ModIdx).setImm(ModVal);
3803 }
3804}
3805
Sam Koltondfa29f72016-03-09 12:29:31 +00003806//===----------------------------------------------------------------------===//
3807// dpp
3808//===----------------------------------------------------------------------===//
3809
3810bool AMDGPUOperand::isDPPCtrl() const {
3811 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
3812 if (result) {
3813 int64_t Imm = getImm();
3814 return ((Imm >= 0x000) && (Imm <= 0x0ff)) ||
3815 ((Imm >= 0x101) && (Imm <= 0x10f)) ||
3816 ((Imm >= 0x111) && (Imm <= 0x11f)) ||
3817 ((Imm >= 0x121) && (Imm <= 0x12f)) ||
3818 (Imm == 0x130) ||
3819 (Imm == 0x134) ||
3820 (Imm == 0x138) ||
3821 (Imm == 0x13c) ||
3822 (Imm == 0x140) ||
3823 (Imm == 0x141) ||
3824 (Imm == 0x142) ||
3825 (Imm == 0x143);
3826 }
3827 return false;
3828}
3829
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00003830bool AMDGPUOperand::isGPRIdxMode() const {
3831 return isImm() && isUInt<4>(getImm());
3832}
3833
Alex Bradbury58eba092016-11-01 16:32:05 +00003834OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003835AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00003836 SMLoc S = Parser.getTok().getLoc();
3837 StringRef Prefix;
3838 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00003839
Sam Koltona74cd522016-03-18 15:35:51 +00003840 if (getLexer().getKind() == AsmToken::Identifier) {
3841 Prefix = Parser.getTok().getString();
3842 } else {
3843 return MatchOperand_NoMatch;
3844 }
3845
3846 if (Prefix == "row_mirror") {
3847 Int = 0x140;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003848 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003849 } else if (Prefix == "row_half_mirror") {
3850 Int = 0x141;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003851 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003852 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00003853 // Check to prevent parseDPPCtrlOps from eating invalid tokens
3854 if (Prefix != "quad_perm"
3855 && Prefix != "row_shl"
3856 && Prefix != "row_shr"
3857 && Prefix != "row_ror"
3858 && Prefix != "wave_shl"
3859 && Prefix != "wave_rol"
3860 && Prefix != "wave_shr"
3861 && Prefix != "wave_ror"
3862 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00003863 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00003864 }
3865
Sam Koltona74cd522016-03-18 15:35:51 +00003866 Parser.Lex();
3867 if (getLexer().isNot(AsmToken::Colon))
3868 return MatchOperand_ParseFail;
3869
3870 if (Prefix == "quad_perm") {
3871 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00003872 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003873 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00003874 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003875 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003876
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003877 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00003878 return MatchOperand_ParseFail;
3879
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003880 for (int i = 0; i < 3; ++i) {
3881 if (getLexer().isNot(AsmToken::Comma))
3882 return MatchOperand_ParseFail;
3883 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00003884
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003885 int64_t Temp;
3886 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
3887 return MatchOperand_ParseFail;
3888 const int shift = i*2 + 2;
3889 Int += (Temp << shift);
3890 }
Sam Koltona74cd522016-03-18 15:35:51 +00003891
Sam Koltona74cd522016-03-18 15:35:51 +00003892 if (getLexer().isNot(AsmToken::RBrac))
3893 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003894 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00003895
3896 } else {
3897 // sel:%d
3898 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003899 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00003900 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003901
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003902 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003903 Int |= 0x100;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003904 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003905 Int |= 0x110;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003906 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Sam Koltona74cd522016-03-18 15:35:51 +00003907 Int |= 0x120;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003908 } else if (Prefix == "wave_shl" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003909 Int = 0x130;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003910 } else if (Prefix == "wave_rol" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003911 Int = 0x134;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003912 } else if (Prefix == "wave_shr" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003913 Int = 0x138;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00003914 } else if (Prefix == "wave_ror" && 1 == Int) {
Sam Koltona74cd522016-03-18 15:35:51 +00003915 Int = 0x13C;
3916 } else if (Prefix == "row_bcast") {
3917 if (Int == 15) {
3918 Int = 0x142;
3919 } else if (Int == 31) {
3920 Int = 0x143;
Sam Kolton7a2a3232016-07-14 14:50:35 +00003921 } else {
3922 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003923 }
3924 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00003925 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00003926 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003927 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003928 }
Sam Koltona74cd522016-03-18 15:35:51 +00003929
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003930 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00003931 return MatchOperand_Success;
3932}
3933
Sam Kolton5f10a132016-05-06 11:31:17 +00003934AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003935 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003936}
3937
Sam Kolton5f10a132016-05-06 11:31:17 +00003938AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003939 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00003940}
3941
Sam Kolton5f10a132016-05-06 11:31:17 +00003942AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003943 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00003944}
3945
3946void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00003947 OptionalImmIndexMap OptionalIdx;
3948
3949 unsigned I = 1;
3950 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
3951 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
3952 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
3953 }
3954
3955 for (unsigned E = Operands.size(); I != E; ++I) {
3956 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
3957 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00003958 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00003959 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00003960 // Skip it.
3961 continue;
3962 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00003963 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00003964 } else if (Op.isDPPCtrl()) {
3965 Op.addImmOperands(Inst, 1);
3966 } else if (Op.isImm()) {
3967 // Handle optional arguments
3968 OptionalIdx[Op.getImmTy()] = I;
3969 } else {
3970 llvm_unreachable("Invalid operand type");
3971 }
3972 }
3973
Sam Koltondfa29f72016-03-09 12:29:31 +00003974 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
3975 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
3976 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Koltona3ec5c12016-10-07 14:46:06 +00003977
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003978 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00003979 // it has src2 register operand that is tied to dst operand
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003980 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_dpp ||
3981 Inst.getOpcode() == AMDGPU::V_MAC_F16_dpp) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003982 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00003983 std::advance(
3984 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00003985 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
3986 }
Sam Koltondfa29f72016-03-09 12:29:31 +00003987}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00003988
Sam Kolton3025e7f2016-04-26 13:33:56 +00003989//===----------------------------------------------------------------------===//
3990// sdwa
3991//===----------------------------------------------------------------------===//
3992
Alex Bradbury58eba092016-11-01 16:32:05 +00003993OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003994AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
3995 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00003996 using namespace llvm::AMDGPU::SDWA;
3997
Sam Kolton3025e7f2016-04-26 13:33:56 +00003998 SMLoc S = Parser.getTok().getLoc();
3999 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004000 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00004001
Sam Kolton05ef1c92016-06-03 10:27:37 +00004002 res = parseStringWithPrefix(Prefix, Value);
4003 if (res != MatchOperand_Success) {
4004 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004005 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00004006
Sam Kolton3025e7f2016-04-26 13:33:56 +00004007 int64_t Int;
4008 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004009 .Case("BYTE_0", SdwaSel::BYTE_0)
4010 .Case("BYTE_1", SdwaSel::BYTE_1)
4011 .Case("BYTE_2", SdwaSel::BYTE_2)
4012 .Case("BYTE_3", SdwaSel::BYTE_3)
4013 .Case("WORD_0", SdwaSel::WORD_0)
4014 .Case("WORD_1", SdwaSel::WORD_1)
4015 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004016 .Default(0xffffffff);
4017 Parser.Lex(); // eat last token
4018
4019 if (Int == 0xffffffff) {
4020 return MatchOperand_ParseFail;
4021 }
4022
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004023 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004024 return MatchOperand_Success;
4025}
4026
Alex Bradbury58eba092016-11-01 16:32:05 +00004027OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00004028AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004029 using namespace llvm::AMDGPU::SDWA;
4030
Sam Kolton3025e7f2016-04-26 13:33:56 +00004031 SMLoc S = Parser.getTok().getLoc();
4032 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00004033 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00004034
4035 res = parseStringWithPrefix("dst_unused", Value);
4036 if (res != MatchOperand_Success) {
4037 return res;
4038 }
4039
4040 int64_t Int;
4041 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00004042 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
4043 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
4044 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00004045 .Default(0xffffffff);
4046 Parser.Lex(); // eat last token
4047
4048 if (Int == 0xffffffff) {
4049 return MatchOperand_ParseFail;
4050 }
4051
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004052 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00004053 return MatchOperand_Success;
4054}
4055
Sam Kolton945231a2016-06-10 09:57:59 +00004056void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004057 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004058}
4059
Sam Kolton945231a2016-06-10 09:57:59 +00004060void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00004061 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
4062}
4063
4064void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
4065 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004066}
4067
4068void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Kolton5196b882016-07-01 09:59:21 +00004069 uint64_t BasicInstType) {
Sam Kolton9dffada2017-01-17 15:26:02 +00004070 using namespace llvm::AMDGPU::SDWA;
Sam Kolton05ef1c92016-06-03 10:27:37 +00004071 OptionalImmIndexMap OptionalIdx;
4072
4073 unsigned I = 1;
4074 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4075 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4076 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4077 }
4078
4079 for (unsigned E = Operands.size(); I != E; ++I) {
4080 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4081 // Add the register arguments
Matt Arsenault5f45e782017-01-09 18:44:11 +00004082 if ((BasicInstType == SIInstrFlags::VOPC ||
Sam Koltone66365e2016-12-27 10:06:42 +00004083 BasicInstType == SIInstrFlags::VOP2)&&
Sam Kolton5196b882016-07-01 09:59:21 +00004084 Op.isReg() &&
4085 Op.Reg.RegNo == AMDGPU::VCC) {
Sam Koltone66365e2016-12-27 10:06:42 +00004086 // VOPC and VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
4087 // Skip it.
Sam Kolton5196b882016-07-01 09:59:21 +00004088 continue;
Sam Koltona3ec5c12016-10-07 14:46:06 +00004089 } else if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00004090 Op.addRegWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004091 } else if (Op.isImm()) {
4092 // Handle optional arguments
4093 OptionalIdx[Op.getImmTy()] = I;
4094 } else {
4095 llvm_unreachable("Invalid operand type");
4096 }
4097 }
4098
Sam Kolton945231a2016-06-10 09:57:59 +00004099 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00004100
Sam Koltona568e3d2016-12-22 12:57:41 +00004101 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
4102 // V_NOP_sdwa_vi has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00004103 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004104 case SIInstrFlags::VOP1:
Sam Kolton9dffada2017-01-17 15:26:02 +00004105 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4106 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4107 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004108 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004109
4110 case SIInstrFlags::VOP2:
Sam Kolton9dffada2017-01-17 15:26:02 +00004111 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
4112 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
4113 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4114 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004115 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004116
4117 case SIInstrFlags::VOPC:
Sam Kolton9dffada2017-01-17 15:26:02 +00004118 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
4119 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00004120 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004121
Sam Koltona3ec5c12016-10-07 14:46:06 +00004122 default:
4123 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
4124 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00004125 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00004126
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004127 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00004128 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00004129 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
4130 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004131 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00004132 std::advance(
4133 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00004134 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00004135 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00004136
Sam Kolton05ef1c92016-06-03 10:27:37 +00004137}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00004138
Tom Stellard45bb48e2015-06-13 03:28:10 +00004139/// Force static initialization.
4140extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00004141 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
4142 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00004143}
4144
4145#define GET_REGISTER_MATCHER
4146#define GET_MATCHER_IMPLEMENTATION
4147#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00004148
Sam Kolton11de3702016-05-24 12:38:33 +00004149// This fuction should be defined after auto-generated include so that we have
4150// MatchClassKind enum defined
4151unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
4152 unsigned Kind) {
4153 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00004154 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00004155 // operand. This method checks if we are given immediate operand but expect to
4156 // get corresponding token.
4157 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
4158 switch (Kind) {
4159 case MCK_addr64:
4160 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
4161 case MCK_gds:
4162 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
4163 case MCK_glc:
4164 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
4165 case MCK_idxen:
4166 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
4167 case MCK_offen:
4168 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004169 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00004170 // When operands have expression values, they will return true for isToken,
4171 // because it is not possible to distinguish between a token and an
4172 // expression at parse time. MatchInstructionImpl() will always try to
4173 // match an operand as a token, when isToken returns true, and when the
4174 // name of the expression is not a valid token, the match will fail,
4175 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004176 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
4177 case MCK_SSrcF32:
4178 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00004179 case MCK_SoppBrTarget:
4180 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004181 case MCK_VReg32OrOff:
4182 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00004183 case MCK_InterpSlot:
4184 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
4185 case MCK_Attr:
4186 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
4187 case MCK_AttrChan:
4188 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004189 default:
4190 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00004191 }
4192}