blob: 31e2885c833d9f02c2db027acfa36f3577b1f4cf [file] [log] [blame]
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001//===- AMDGPUAsmParser.cpp - Parse SI asm to MCInst instructions ----------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000010#include "AMDGPU.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000011#include "AMDKernelCodeT.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000012#include "MCTargetDesc/AMDGPUMCTargetDesc.h"
Tom Stellard347ac792015-06-26 21:15:07 +000013#include "MCTargetDesc/AMDGPUTargetStreamer.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000014#include "SIDefines.h"
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +000015#include "SIInstrInfo.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000016#include "Utils/AMDGPUAsmUtils.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000017#include "Utils/AMDGPUBaseInfo.h"
Valery Pykhtindc110542016-03-06 20:25:36 +000018#include "Utils/AMDKernelCodeTUtils.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000019#include "llvm/ADT/APFloat.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000020#include "llvm/ADT/APInt.h"
Eugene Zelenko66203762017-01-21 00:53:49 +000021#include "llvm/ADT/ArrayRef.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000022#include "llvm/ADT/STLExtras.h"
Sam Kolton5f10a132016-05-06 11:31:17 +000023#include "llvm/ADT/SmallBitVector.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000024#include "llvm/ADT/SmallString.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000025#include "llvm/ADT/StringRef.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000026#include "llvm/ADT/StringSwitch.h"
27#include "llvm/ADT/Twine.h"
Zachary Turner264b5d92017-06-07 03:48:56 +000028#include "llvm/BinaryFormat/ELF.h"
Sam Kolton69c8aa22016-12-19 11:43:15 +000029#include "llvm/MC/MCAsmInfo.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000030#include "llvm/MC/MCContext.h"
31#include "llvm/MC/MCExpr.h"
32#include "llvm/MC/MCInst.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000033#include "llvm/MC/MCInstrDesc.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000034#include "llvm/MC/MCInstrInfo.h"
35#include "llvm/MC/MCParser/MCAsmLexer.h"
36#include "llvm/MC/MCParser/MCAsmParser.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000037#include "llvm/MC/MCParser/MCAsmParserExtension.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000038#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
Benjamin Kramerb3e8a6d2016-01-27 10:01:28 +000039#include "llvm/MC/MCParser/MCTargetAsmParser.h"
Tom Stellard45bb48e2015-06-13 03:28:10 +000040#include "llvm/MC/MCRegisterInfo.h"
41#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSubtargetInfo.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000043#include "llvm/MC/MCSymbol.h"
Konstantin Zhuravlyova63b0f92017-10-11 22:18:53 +000044#include "llvm/Support/AMDGPUMetadata.h"
Scott Linder1e8c2c72018-06-21 19:38:56 +000045#include "llvm/Support/AMDHSAKernelDescriptor.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000046#include "llvm/Support/Casting.h"
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000047#include "llvm/Support/Compiler.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000048#include "llvm/Support/ErrorHandling.h"
David Blaikie13e77db2018-03-23 23:58:25 +000049#include "llvm/Support/MachineValueType.h"
Artem Tamazov6edc1352016-05-26 17:00:33 +000050#include "llvm/Support/MathExtras.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000051#include "llvm/Support/SMLoc.h"
52#include "llvm/Support/TargetRegistry.h"
Chandler Carruth6bda14b2017-06-06 11:49:48 +000053#include "llvm/Support/raw_ostream.h"
Eugene Zelenko2bc2f332016-12-09 22:06:55 +000054#include <algorithm>
55#include <cassert>
56#include <cstdint>
57#include <cstring>
58#include <iterator>
59#include <map>
60#include <memory>
61#include <string>
Artem Tamazovebe71ce2016-05-06 17:48:48 +000062
Tom Stellard45bb48e2015-06-13 03:28:10 +000063using namespace llvm;
Konstantin Zhuravlyov836cbff2016-09-30 17:01:40 +000064using namespace llvm::AMDGPU;
Scott Linder1e8c2c72018-06-21 19:38:56 +000065using namespace llvm::amdhsa;
Tom Stellard45bb48e2015-06-13 03:28:10 +000066
67namespace {
68
Sam Kolton1eeb11b2016-09-09 14:44:04 +000069class AMDGPUAsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000070
Nikolay Haustovfb5c3072016-04-20 09:34:48 +000071enum RegisterKind { IS_UNKNOWN, IS_VGPR, IS_SGPR, IS_TTMP, IS_SPECIAL };
72
Sam Kolton1eeb11b2016-09-09 14:44:04 +000073//===----------------------------------------------------------------------===//
74// Operand
75//===----------------------------------------------------------------------===//
76
Tom Stellard45bb48e2015-06-13 03:28:10 +000077class AMDGPUOperand : public MCParsedAsmOperand {
78 enum KindTy {
79 Token,
80 Immediate,
81 Register,
82 Expression
83 } Kind;
84
85 SMLoc StartLoc, EndLoc;
Sam Kolton1eeb11b2016-09-09 14:44:04 +000086 const AMDGPUAsmParser *AsmParser;
Tom Stellard45bb48e2015-06-13 03:28:10 +000087
88public:
Matt Arsenaultf15da6c2017-02-03 20:49:51 +000089 AMDGPUOperand(KindTy Kind_, const AMDGPUAsmParser *AsmParser_)
Sam Kolton1eeb11b2016-09-09 14:44:04 +000090 : MCParsedAsmOperand(), Kind(Kind_), AsmParser(AsmParser_) {}
Tom Stellard45bb48e2015-06-13 03:28:10 +000091
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +000092 using Ptr = std::unique_ptr<AMDGPUOperand>;
Sam Kolton5f10a132016-05-06 11:31:17 +000093
Sam Kolton945231a2016-06-10 09:57:59 +000094 struct Modifiers {
Matt Arsenaultb55f6202016-12-03 18:22:49 +000095 bool Abs = false;
96 bool Neg = false;
97 bool Sext = false;
Sam Kolton945231a2016-06-10 09:57:59 +000098
99 bool hasFPModifiers() const { return Abs || Neg; }
100 bool hasIntModifiers() const { return Sext; }
101 bool hasModifiers() const { return hasFPModifiers() || hasIntModifiers(); }
102
103 int64_t getFPModifiersOperand() const {
104 int64_t Operand = 0;
105 Operand |= Abs ? SISrcMods::ABS : 0;
106 Operand |= Neg ? SISrcMods::NEG : 0;
107 return Operand;
108 }
109
110 int64_t getIntModifiersOperand() const {
111 int64_t Operand = 0;
112 Operand |= Sext ? SISrcMods::SEXT : 0;
113 return Operand;
114 }
115
116 int64_t getModifiersOperand() const {
117 assert(!(hasFPModifiers() && hasIntModifiers())
118 && "fp and int modifiers should not be used simultaneously");
119 if (hasFPModifiers()) {
120 return getFPModifiersOperand();
121 } else if (hasIntModifiers()) {
122 return getIntModifiersOperand();
123 } else {
124 return 0;
125 }
126 }
127
128 friend raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods);
129 };
130
Tom Stellard45bb48e2015-06-13 03:28:10 +0000131 enum ImmTy {
132 ImmTyNone,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000133 ImmTyGDS,
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000134 ImmTyLDS,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000135 ImmTyOffen,
136 ImmTyIdxen,
137 ImmTyAddr64,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000138 ImmTyOffset,
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000139 ImmTyInstOffset,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000140 ImmTyOffset0,
141 ImmTyOffset1,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000142 ImmTyGLC,
143 ImmTySLC,
144 ImmTyTFE,
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000145 ImmTyD16,
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000146 ImmTyClampSI,
147 ImmTyOModSI,
Sam Koltondfa29f72016-03-09 12:29:31 +0000148 ImmTyDppCtrl,
149 ImmTyDppRowMask,
150 ImmTyDppBankMask,
151 ImmTyDppBoundCtrl,
Sam Kolton05ef1c92016-06-03 10:27:37 +0000152 ImmTySdwaDstSel,
153 ImmTySdwaSrc0Sel,
154 ImmTySdwaSrc1Sel,
Sam Kolton3025e7f2016-04-26 13:33:56 +0000155 ImmTySdwaDstUnused,
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000156 ImmTyDMask,
157 ImmTyUNorm,
158 ImmTyDA,
159 ImmTyR128,
160 ImmTyLWE,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000161 ImmTyExpTgt,
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000162 ImmTyExpCompr,
163 ImmTyExpVM,
David Stuttard70e8bc12017-06-22 16:29:22 +0000164 ImmTyDFMT,
165 ImmTyNFMT,
Artem Tamazovd6468662016-04-25 14:13:51 +0000166 ImmTyHwreg,
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000167 ImmTyOff,
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000168 ImmTySendMsg,
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000169 ImmTyInterpSlot,
170 ImmTyInterpAttr,
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000171 ImmTyAttrChan,
172 ImmTyOpSel,
173 ImmTyOpSelHi,
174 ImmTyNegLo,
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000175 ImmTyNegHi,
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000176 ImmTySwizzle,
177 ImmTyHigh
Tom Stellard45bb48e2015-06-13 03:28:10 +0000178 };
179
180 struct TokOp {
181 const char *Data;
182 unsigned Length;
183 };
184
185 struct ImmOp {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000186 int64_t Val;
Matt Arsenault7f192982016-08-16 20:28:06 +0000187 ImmTy Type;
188 bool IsFPImm;
Sam Kolton945231a2016-06-10 09:57:59 +0000189 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000190 };
191
192 struct RegOp {
Matt Arsenault7f192982016-08-16 20:28:06 +0000193 unsigned RegNo;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000194 bool IsForcedVOP3;
Matt Arsenault7f192982016-08-16 20:28:06 +0000195 Modifiers Mods;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000196 };
197
198 union {
199 TokOp Tok;
200 ImmOp Imm;
201 RegOp Reg;
202 const MCExpr *Expr;
203 };
204
Tom Stellard45bb48e2015-06-13 03:28:10 +0000205 bool isToken() const override {
Tom Stellard89049702016-06-15 02:54:14 +0000206 if (Kind == Token)
207 return true;
208
209 if (Kind != Expression || !Expr)
210 return false;
211
212 // When parsing operands, we can't always tell if something was meant to be
213 // a token, like 'gds', or an expression that references a global variable.
214 // In this case, we assume the string is an expression, and if we need to
215 // interpret is a token, then we treat the symbol name as the token.
216 return isa<MCSymbolRefExpr>(Expr);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000217 }
218
219 bool isImm() const override {
220 return Kind == Immediate;
221 }
222
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000223 bool isInlinableImm(MVT type) const;
224 bool isLiteralImm(MVT type) const;
Tom Stellard45bb48e2015-06-13 03:28:10 +0000225
Tom Stellard45bb48e2015-06-13 03:28:10 +0000226 bool isRegKind() const {
227 return Kind == Register;
228 }
229
230 bool isReg() const override {
Sam Kolton9772eb32017-01-11 11:46:30 +0000231 return isRegKind() && !hasModifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000232 }
233
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000234 bool isRegOrImmWithInputMods(MVT type) const {
235 return isRegKind() || isInlinableImm(type);
236 }
237
Matt Arsenault4bd72362016-12-10 00:39:12 +0000238 bool isRegOrImmWithInt16InputMods() const {
239 return isRegOrImmWithInputMods(MVT::i16);
240 }
241
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000242 bool isRegOrImmWithInt32InputMods() const {
243 return isRegOrImmWithInputMods(MVT::i32);
244 }
245
246 bool isRegOrImmWithInt64InputMods() const {
247 return isRegOrImmWithInputMods(MVT::i64);
248 }
249
Matt Arsenault4bd72362016-12-10 00:39:12 +0000250 bool isRegOrImmWithFP16InputMods() const {
251 return isRegOrImmWithInputMods(MVT::f16);
252 }
253
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000254 bool isRegOrImmWithFP32InputMods() const {
255 return isRegOrImmWithInputMods(MVT::f32);
256 }
257
258 bool isRegOrImmWithFP64InputMods() const {
259 return isRegOrImmWithInputMods(MVT::f64);
Tom Stellarda90b9522016-02-11 03:28:15 +0000260 }
261
Sam Kolton9772eb32017-01-11 11:46:30 +0000262 bool isVReg() const {
263 return isRegClass(AMDGPU::VGPR_32RegClassID) ||
264 isRegClass(AMDGPU::VReg_64RegClassID) ||
265 isRegClass(AMDGPU::VReg_96RegClassID) ||
266 isRegClass(AMDGPU::VReg_128RegClassID) ||
267 isRegClass(AMDGPU::VReg_256RegClassID) ||
268 isRegClass(AMDGPU::VReg_512RegClassID);
269 }
270
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000271 bool isVReg32OrOff() const {
272 return isOff() || isRegClass(AMDGPU::VGPR_32RegClassID);
273 }
274
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +0000275 bool isSDWAOperand(MVT type) const;
276 bool isSDWAFP16Operand() const;
277 bool isSDWAFP32Operand() const;
278 bool isSDWAInt16Operand() const;
279 bool isSDWAInt32Operand() const;
Sam Kolton549c89d2017-06-21 08:53:38 +0000280
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000281 bool isImmTy(ImmTy ImmT) const {
282 return isImm() && Imm.Type == ImmT;
283 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000284
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000285 bool isImmModifier() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000286 return isImm() && Imm.Type != ImmTyNone;
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000287 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000288
Sam Kolton945231a2016-06-10 09:57:59 +0000289 bool isClampSI() const { return isImmTy(ImmTyClampSI); }
290 bool isOModSI() const { return isImmTy(ImmTyOModSI); }
291 bool isDMask() const { return isImmTy(ImmTyDMask); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000292 bool isUNorm() const { return isImmTy(ImmTyUNorm); }
293 bool isDA() const { return isImmTy(ImmTyDA); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000294 bool isR128() const { return isImmTy(ImmTyR128); }
Nikolay Haustov2f684f12016-02-26 09:51:05 +0000295 bool isLWE() const { return isImmTy(ImmTyLWE); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000296 bool isOff() const { return isImmTy(ImmTyOff); }
297 bool isExpTgt() const { return isImmTy(ImmTyExpTgt); }
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000298 bool isExpVM() const { return isImmTy(ImmTyExpVM); }
299 bool isExpCompr() const { return isImmTy(ImmTyExpCompr); }
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000300 bool isOffen() const { return isImmTy(ImmTyOffen); }
301 bool isIdxen() const { return isImmTy(ImmTyIdxen); }
302 bool isAddr64() const { return isImmTy(ImmTyAddr64); }
303 bool isOffset() const { return isImmTy(ImmTyOffset) && isUInt<16>(getImm()); }
304 bool isOffset0() const { return isImmTy(ImmTyOffset0) && isUInt<16>(getImm()); }
305 bool isOffset1() const { return isImmTy(ImmTyOffset1) && isUInt<8>(getImm()); }
Matt Arsenaultfd023142017-06-12 15:55:58 +0000306
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000307 bool isOffsetU12() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isUInt<12>(getImm()); }
308 bool isOffsetS13() const { return (isImmTy(ImmTyOffset) || isImmTy(ImmTyInstOffset)) && isInt<13>(getImm()); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000309 bool isGDS() const { return isImmTy(ImmTyGDS); }
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000310 bool isLDS() const { return isImmTy(ImmTyLDS); }
Nikolay Haustovea8febd2016-03-01 08:34:43 +0000311 bool isGLC() const { return isImmTy(ImmTyGLC); }
312 bool isSLC() const { return isImmTy(ImmTySLC); }
313 bool isTFE() const { return isImmTy(ImmTyTFE); }
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000314 bool isD16() const { return isImmTy(ImmTyD16); }
David Stuttard70e8bc12017-06-22 16:29:22 +0000315 bool isDFMT() const { return isImmTy(ImmTyDFMT) && isUInt<8>(getImm()); }
316 bool isNFMT() const { return isImmTy(ImmTyNFMT) && isUInt<8>(getImm()); }
Sam Kolton945231a2016-06-10 09:57:59 +0000317 bool isBankMask() const { return isImmTy(ImmTyDppBankMask); }
318 bool isRowMask() const { return isImmTy(ImmTyDppRowMask); }
319 bool isBoundCtrl() const { return isImmTy(ImmTyDppBoundCtrl); }
320 bool isSDWADstSel() const { return isImmTy(ImmTySdwaDstSel); }
321 bool isSDWASrc0Sel() const { return isImmTy(ImmTySdwaSrc0Sel); }
322 bool isSDWASrc1Sel() const { return isImmTy(ImmTySdwaSrc1Sel); }
323 bool isSDWADstUnused() const { return isImmTy(ImmTySdwaDstUnused); }
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000324 bool isInterpSlot() const { return isImmTy(ImmTyInterpSlot); }
325 bool isInterpAttr() const { return isImmTy(ImmTyInterpAttr); }
326 bool isAttrChan() const { return isImmTy(ImmTyAttrChan); }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000327 bool isOpSel() const { return isImmTy(ImmTyOpSel); }
328 bool isOpSelHi() const { return isImmTy(ImmTyOpSelHi); }
329 bool isNegLo() const { return isImmTy(ImmTyNegLo); }
330 bool isNegHi() const { return isImmTy(ImmTyNegHi); }
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000331 bool isHigh() const { return isImmTy(ImmTyHigh); }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000332
Sam Kolton945231a2016-06-10 09:57:59 +0000333 bool isMod() const {
334 return isClampSI() || isOModSI();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000335 }
336
337 bool isRegOrImm() const {
338 return isReg() || isImm();
339 }
340
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000341 bool isRegClass(unsigned RCID) const;
342
Sam Kolton9772eb32017-01-11 11:46:30 +0000343 bool isRegOrInlineNoMods(unsigned RCID, MVT type) const {
344 return (isRegClass(RCID) || isInlinableImm(type)) && !hasModifiers();
345 }
346
Matt Arsenault4bd72362016-12-10 00:39:12 +0000347 bool isSCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000348 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000349 }
350
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000351 bool isSCSrcV2B16() const {
352 return isSCSrcB16();
353 }
354
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000355 bool isSCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000356 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000357 }
358
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000359 bool isSCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000360 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::i64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000361 }
362
Matt Arsenault4bd72362016-12-10 00:39:12 +0000363 bool isSCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000364 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000365 }
366
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000367 bool isSCSrcV2F16() const {
368 return isSCSrcF16();
369 }
370
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000371 bool isSCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000372 return isRegOrInlineNoMods(AMDGPU::SReg_32RegClassID, MVT::f32);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000373 }
374
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000375 bool isSCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000376 return isRegOrInlineNoMods(AMDGPU::SReg_64RegClassID, MVT::f64);
Tom Stellardd93a34f2016-02-22 19:17:56 +0000377 }
378
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000379 bool isSSrcB32() const {
380 return isSCSrcB32() || isLiteralImm(MVT::i32) || isExpr();
381 }
382
Matt Arsenault4bd72362016-12-10 00:39:12 +0000383 bool isSSrcB16() const {
384 return isSCSrcB16() || isLiteralImm(MVT::i16);
385 }
386
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000387 bool isSSrcV2B16() const {
388 llvm_unreachable("cannot happen");
389 return isSSrcB16();
390 }
391
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000392 bool isSSrcB64() const {
Tom Stellardd93a34f2016-02-22 19:17:56 +0000393 // TODO: Find out how SALU supports extension of 32-bit literals to 64 bits.
394 // See isVSrc64().
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000395 return isSCSrcB64() || isLiteralImm(MVT::i64);
Matt Arsenault86d336e2015-09-08 21:15:00 +0000396 }
397
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000398 bool isSSrcF32() const {
399 return isSCSrcB32() || isLiteralImm(MVT::f32) || isExpr();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000400 }
401
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000402 bool isSSrcF64() const {
403 return isSCSrcB64() || isLiteralImm(MVT::f64);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000404 }
405
Matt Arsenault4bd72362016-12-10 00:39:12 +0000406 bool isSSrcF16() const {
407 return isSCSrcB16() || isLiteralImm(MVT::f16);
408 }
409
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000410 bool isSSrcV2F16() const {
411 llvm_unreachable("cannot happen");
412 return isSSrcF16();
413 }
414
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000415 bool isVCSrcB32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000416 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000417 }
418
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000419 bool isVCSrcB64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000420 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::i64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000421 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000422
Matt Arsenault4bd72362016-12-10 00:39:12 +0000423 bool isVCSrcB16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000424 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::i16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000425 }
426
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000427 bool isVCSrcV2B16() const {
428 return isVCSrcB16();
429 }
430
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000431 bool isVCSrcF32() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000432 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f32);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000433 }
434
435 bool isVCSrcF64() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000436 return isRegOrInlineNoMods(AMDGPU::VS_64RegClassID, MVT::f64);
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000437 }
438
Matt Arsenault4bd72362016-12-10 00:39:12 +0000439 bool isVCSrcF16() const {
Sam Kolton9772eb32017-01-11 11:46:30 +0000440 return isRegOrInlineNoMods(AMDGPU::VS_32RegClassID, MVT::f16);
Matt Arsenault4bd72362016-12-10 00:39:12 +0000441 }
442
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000443 bool isVCSrcV2F16() const {
444 return isVCSrcF16();
445 }
446
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000447 bool isVSrcB32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000448 return isVCSrcF32() || isLiteralImm(MVT::i32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000449 }
450
451 bool isVSrcB64() const {
452 return isVCSrcF64() || isLiteralImm(MVT::i64);
453 }
454
Matt Arsenault4bd72362016-12-10 00:39:12 +0000455 bool isVSrcB16() const {
456 return isVCSrcF16() || isLiteralImm(MVT::i16);
457 }
458
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000459 bool isVSrcV2B16() const {
460 llvm_unreachable("cannot happen");
461 return isVSrcB16();
462 }
463
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000464 bool isVSrcF32() const {
Dmitry Preobrazhensky32c6b5c2018-06-13 17:02:03 +0000465 return isVCSrcF32() || isLiteralImm(MVT::f32) || isExpr();
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000466 }
467
468 bool isVSrcF64() const {
469 return isVCSrcF64() || isLiteralImm(MVT::f64);
470 }
471
Matt Arsenault4bd72362016-12-10 00:39:12 +0000472 bool isVSrcF16() const {
473 return isVCSrcF16() || isLiteralImm(MVT::f16);
474 }
475
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000476 bool isVSrcV2F16() const {
477 llvm_unreachable("cannot happen");
478 return isVSrcF16();
479 }
480
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000481 bool isKImmFP32() const {
482 return isLiteralImm(MVT::f32);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000483 }
484
Matt Arsenault4bd72362016-12-10 00:39:12 +0000485 bool isKImmFP16() const {
486 return isLiteralImm(MVT::f16);
487 }
488
Tom Stellard45bb48e2015-06-13 03:28:10 +0000489 bool isMem() const override {
490 return false;
491 }
492
493 bool isExpr() const {
494 return Kind == Expression;
495 }
496
497 bool isSoppBrTarget() const {
498 return isExpr() || isImm();
499 }
500
Sam Kolton945231a2016-06-10 09:57:59 +0000501 bool isSWaitCnt() const;
502 bool isHwreg() const;
503 bool isSendMsg() const;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000504 bool isSwizzle() const;
Artem Tamazov54bfd542016-10-31 16:07:39 +0000505 bool isSMRDOffset8() const;
506 bool isSMRDOffset20() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000507 bool isSMRDLiteralOffset() const;
508 bool isDPPCtrl() const;
Matt Arsenaultcc88ce32016-10-12 18:00:51 +0000509 bool isGPRIdxMode() const;
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +0000510 bool isS16Imm() const;
511 bool isU16Imm() const;
Sam Kolton945231a2016-06-10 09:57:59 +0000512
Tom Stellard89049702016-06-15 02:54:14 +0000513 StringRef getExpressionAsToken() const {
514 assert(isExpr());
515 const MCSymbolRefExpr *S = cast<MCSymbolRefExpr>(Expr);
516 return S->getSymbol().getName();
517 }
518
Sam Kolton945231a2016-06-10 09:57:59 +0000519 StringRef getToken() const {
Tom Stellard89049702016-06-15 02:54:14 +0000520 assert(isToken());
521
522 if (Kind == Expression)
523 return getExpressionAsToken();
524
Sam Kolton945231a2016-06-10 09:57:59 +0000525 return StringRef(Tok.Data, Tok.Length);
526 }
527
528 int64_t getImm() const {
529 assert(isImm());
530 return Imm.Val;
531 }
532
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000533 ImmTy getImmTy() const {
Sam Kolton945231a2016-06-10 09:57:59 +0000534 assert(isImm());
535 return Imm.Type;
536 }
537
538 unsigned getReg() const override {
539 return Reg.RegNo;
540 }
541
Tom Stellard45bb48e2015-06-13 03:28:10 +0000542 SMLoc getStartLoc() const override {
543 return StartLoc;
544 }
545
Peter Collingbourne0da86302016-10-10 22:49:37 +0000546 SMLoc getEndLoc() const override {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000547 return EndLoc;
548 }
549
Matt Arsenaultf7f59b52017-12-20 18:52:57 +0000550 SMRange getLocRange() const {
551 return SMRange(StartLoc, EndLoc);
552 }
553
Sam Kolton945231a2016-06-10 09:57:59 +0000554 Modifiers getModifiers() const {
555 assert(isRegKind() || isImmTy(ImmTyNone));
556 return isRegKind() ? Reg.Mods : Imm.Mods;
557 }
558
559 void setModifiers(Modifiers Mods) {
560 assert(isRegKind() || isImmTy(ImmTyNone));
561 if (isRegKind())
562 Reg.Mods = Mods;
563 else
564 Imm.Mods = Mods;
565 }
566
567 bool hasModifiers() const {
568 return getModifiers().hasModifiers();
569 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +0000570
Sam Kolton945231a2016-06-10 09:57:59 +0000571 bool hasFPModifiers() const {
572 return getModifiers().hasFPModifiers();
573 }
574
575 bool hasIntModifiers() const {
576 return getModifiers().hasIntModifiers();
577 }
578
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000579 uint64_t applyInputFPModifiers(uint64_t Val, unsigned Size) const;
580
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000581 void addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers = true) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000582
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +0000583 void addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const;
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000584
Matt Arsenault4bd72362016-12-10 00:39:12 +0000585 template <unsigned Bitwidth>
586 void addKImmFPOperands(MCInst &Inst, unsigned N) const;
587
588 void addKImmFP16Operands(MCInst &Inst, unsigned N) const {
589 addKImmFPOperands<16>(Inst, N);
590 }
591
592 void addKImmFP32Operands(MCInst &Inst, unsigned N) const {
593 addKImmFPOperands<32>(Inst, N);
594 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000595
596 void addRegOperands(MCInst &Inst, unsigned N) const;
Sam Kolton945231a2016-06-10 09:57:59 +0000597
598 void addRegOrImmOperands(MCInst &Inst, unsigned N) const {
599 if (isRegKind())
600 addRegOperands(Inst, N);
Tom Stellard89049702016-06-15 02:54:14 +0000601 else if (isExpr())
602 Inst.addOperand(MCOperand::createExpr(Expr));
Sam Kolton945231a2016-06-10 09:57:59 +0000603 else
604 addImmOperands(Inst, N);
605 }
606
607 void addRegOrImmWithInputModsOperands(MCInst &Inst, unsigned N) const {
608 Modifiers Mods = getModifiers();
609 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
610 if (isRegKind()) {
611 addRegOperands(Inst, N);
612 } else {
613 addImmOperands(Inst, N, false);
614 }
615 }
616
617 void addRegOrImmWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
618 assert(!hasIntModifiers());
619 addRegOrImmWithInputModsOperands(Inst, N);
620 }
621
622 void addRegOrImmWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
623 assert(!hasFPModifiers());
624 addRegOrImmWithInputModsOperands(Inst, N);
625 }
626
Sam Kolton9772eb32017-01-11 11:46:30 +0000627 void addRegWithInputModsOperands(MCInst &Inst, unsigned N) const {
628 Modifiers Mods = getModifiers();
629 Inst.addOperand(MCOperand::createImm(Mods.getModifiersOperand()));
630 assert(isRegKind());
631 addRegOperands(Inst, N);
632 }
633
634 void addRegWithFPInputModsOperands(MCInst &Inst, unsigned N) const {
635 assert(!hasIntModifiers());
636 addRegWithInputModsOperands(Inst, N);
637 }
638
639 void addRegWithIntInputModsOperands(MCInst &Inst, unsigned N) const {
640 assert(!hasFPModifiers());
641 addRegWithInputModsOperands(Inst, N);
642 }
643
Sam Kolton945231a2016-06-10 09:57:59 +0000644 void addSoppBrTargetOperands(MCInst &Inst, unsigned N) const {
645 if (isImm())
646 addImmOperands(Inst, N);
647 else {
648 assert(isExpr());
649 Inst.addOperand(MCOperand::createExpr(Expr));
650 }
651 }
652
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000653 static void printImmTy(raw_ostream& OS, ImmTy Type) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000654 switch (Type) {
655 case ImmTyNone: OS << "None"; break;
656 case ImmTyGDS: OS << "GDS"; break;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +0000657 case ImmTyLDS: OS << "LDS"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000658 case ImmTyOffen: OS << "Offen"; break;
659 case ImmTyIdxen: OS << "Idxen"; break;
660 case ImmTyAddr64: OS << "Addr64"; break;
661 case ImmTyOffset: OS << "Offset"; break;
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +0000662 case ImmTyInstOffset: OS << "InstOffset"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000663 case ImmTyOffset0: OS << "Offset0"; break;
664 case ImmTyOffset1: OS << "Offset1"; break;
665 case ImmTyGLC: OS << "GLC"; break;
666 case ImmTySLC: OS << "SLC"; break;
667 case ImmTyTFE: OS << "TFE"; break;
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000668 case ImmTyD16: OS << "D16"; break;
David Stuttard70e8bc12017-06-22 16:29:22 +0000669 case ImmTyDFMT: OS << "DFMT"; break;
670 case ImmTyNFMT: OS << "NFMT"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000671 case ImmTyClampSI: OS << "ClampSI"; break;
672 case ImmTyOModSI: OS << "OModSI"; break;
673 case ImmTyDppCtrl: OS << "DppCtrl"; break;
674 case ImmTyDppRowMask: OS << "DppRowMask"; break;
675 case ImmTyDppBankMask: OS << "DppBankMask"; break;
676 case ImmTyDppBoundCtrl: OS << "DppBoundCtrl"; break;
Sam Kolton05ef1c92016-06-03 10:27:37 +0000677 case ImmTySdwaDstSel: OS << "SdwaDstSel"; break;
678 case ImmTySdwaSrc0Sel: OS << "SdwaSrc0Sel"; break;
679 case ImmTySdwaSrc1Sel: OS << "SdwaSrc1Sel"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000680 case ImmTySdwaDstUnused: OS << "SdwaDstUnused"; break;
681 case ImmTyDMask: OS << "DMask"; break;
682 case ImmTyUNorm: OS << "UNorm"; break;
683 case ImmTyDA: OS << "DA"; break;
684 case ImmTyR128: OS << "R128"; break;
685 case ImmTyLWE: OS << "LWE"; break;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +0000686 case ImmTyOff: OS << "Off"; break;
687 case ImmTyExpTgt: OS << "ExpTgt"; break;
Matt Arsenault8a63cb92016-12-05 20:31:49 +0000688 case ImmTyExpCompr: OS << "ExpCompr"; break;
689 case ImmTyExpVM: OS << "ExpVM"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000690 case ImmTyHwreg: OS << "Hwreg"; break;
Artem Tamazovebe71ce2016-05-06 17:48:48 +0000691 case ImmTySendMsg: OS << "SendMsg"; break;
Matt Arsenault0e8a2992016-12-15 20:40:20 +0000692 case ImmTyInterpSlot: OS << "InterpSlot"; break;
693 case ImmTyInterpAttr: OS << "InterpAttr"; break;
694 case ImmTyAttrChan: OS << "AttrChan"; break;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000695 case ImmTyOpSel: OS << "OpSel"; break;
696 case ImmTyOpSelHi: OS << "OpSelHi"; break;
697 case ImmTyNegLo: OS << "NegLo"; break;
698 case ImmTyNegHi: OS << "NegHi"; break;
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +0000699 case ImmTySwizzle: OS << "Swizzle"; break;
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +0000700 case ImmTyHigh: OS << "High"; break;
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000701 }
702 }
703
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000704 void print(raw_ostream &OS) const override {
705 switch (Kind) {
706 case Register:
Sam Kolton945231a2016-06-10 09:57:59 +0000707 OS << "<register " << getReg() << " mods: " << Reg.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000708 break;
709 case Immediate:
Nikolay Haustov4f672a32016-04-29 09:02:30 +0000710 OS << '<' << getImm();
711 if (getImmTy() != ImmTyNone) {
712 OS << " type: "; printImmTy(OS, getImmTy());
713 }
Sam Kolton945231a2016-06-10 09:57:59 +0000714 OS << " mods: " << Imm.Mods << '>';
Matt Arsenaultcbd75372015-08-08 00:41:51 +0000715 break;
716 case Token:
717 OS << '\'' << getToken() << '\'';
718 break;
719 case Expression:
720 OS << "<expr " << *Expr << '>';
721 break;
722 }
723 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000724
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000725 static AMDGPUOperand::Ptr CreateImm(const AMDGPUAsmParser *AsmParser,
726 int64_t Val, SMLoc Loc,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000727 ImmTy Type = ImmTyNone,
Sam Kolton5f10a132016-05-06 11:31:17 +0000728 bool IsFPImm = false) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000729 auto Op = llvm::make_unique<AMDGPUOperand>(Immediate, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000730 Op->Imm.Val = Val;
731 Op->Imm.IsFPImm = IsFPImm;
732 Op->Imm.Type = Type;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000733 Op->Imm.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000734 Op->StartLoc = Loc;
735 Op->EndLoc = Loc;
736 return Op;
737 }
738
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000739 static AMDGPUOperand::Ptr CreateToken(const AMDGPUAsmParser *AsmParser,
740 StringRef Str, SMLoc Loc,
Sam Kolton5f10a132016-05-06 11:31:17 +0000741 bool HasExplicitEncodingSize = true) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000742 auto Res = llvm::make_unique<AMDGPUOperand>(Token, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000743 Res->Tok.Data = Str.data();
744 Res->Tok.Length = Str.size();
745 Res->StartLoc = Loc;
746 Res->EndLoc = Loc;
747 return Res;
748 }
749
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000750 static AMDGPUOperand::Ptr CreateReg(const AMDGPUAsmParser *AsmParser,
751 unsigned RegNo, SMLoc S,
Sam Kolton5f10a132016-05-06 11:31:17 +0000752 SMLoc E,
Sam Kolton5f10a132016-05-06 11:31:17 +0000753 bool ForceVOP3) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000754 auto Op = llvm::make_unique<AMDGPUOperand>(Register, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000755 Op->Reg.RegNo = RegNo;
Matt Arsenaultb55f6202016-12-03 18:22:49 +0000756 Op->Reg.Mods = Modifiers();
Tom Stellard45bb48e2015-06-13 03:28:10 +0000757 Op->Reg.IsForcedVOP3 = ForceVOP3;
758 Op->StartLoc = S;
759 Op->EndLoc = E;
760 return Op;
761 }
762
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000763 static AMDGPUOperand::Ptr CreateExpr(const AMDGPUAsmParser *AsmParser,
764 const class MCExpr *Expr, SMLoc S) {
765 auto Op = llvm::make_unique<AMDGPUOperand>(Expression, AsmParser);
Tom Stellard45bb48e2015-06-13 03:28:10 +0000766 Op->Expr = Expr;
767 Op->StartLoc = S;
768 Op->EndLoc = S;
769 return Op;
770 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000771};
772
Sam Kolton945231a2016-06-10 09:57:59 +0000773raw_ostream &operator <<(raw_ostream &OS, AMDGPUOperand::Modifiers Mods) {
774 OS << "abs:" << Mods.Abs << " neg: " << Mods.Neg << " sext:" << Mods.Sext;
775 return OS;
776}
777
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000778//===----------------------------------------------------------------------===//
779// AsmParser
780//===----------------------------------------------------------------------===//
781
Artem Tamazova01cce82016-12-27 16:00:11 +0000782// Holds info related to the current kernel, e.g. count of SGPRs used.
783// Kernel scope begins at .amdgpu_hsa_kernel directive, ends at next
784// .amdgpu_hsa_kernel or at EOF.
785class KernelScopeInfo {
Eugene Zelenko66203762017-01-21 00:53:49 +0000786 int SgprIndexUnusedMin = -1;
787 int VgprIndexUnusedMin = -1;
788 MCContext *Ctx = nullptr;
Artem Tamazova01cce82016-12-27 16:00:11 +0000789
790 void usesSgprAt(int i) {
791 if (i >= SgprIndexUnusedMin) {
792 SgprIndexUnusedMin = ++i;
793 if (Ctx) {
794 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.sgpr_count"));
795 Sym->setVariableValue(MCConstantExpr::create(SgprIndexUnusedMin, *Ctx));
796 }
797 }
798 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000799
Artem Tamazova01cce82016-12-27 16:00:11 +0000800 void usesVgprAt(int i) {
801 if (i >= VgprIndexUnusedMin) {
802 VgprIndexUnusedMin = ++i;
803 if (Ctx) {
804 MCSymbol * const Sym = Ctx->getOrCreateSymbol(Twine(".kernel.vgpr_count"));
805 Sym->setVariableValue(MCConstantExpr::create(VgprIndexUnusedMin, *Ctx));
806 }
807 }
808 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000809
Artem Tamazova01cce82016-12-27 16:00:11 +0000810public:
Eugene Zelenko66203762017-01-21 00:53:49 +0000811 KernelScopeInfo() = default;
812
Artem Tamazova01cce82016-12-27 16:00:11 +0000813 void initialize(MCContext &Context) {
814 Ctx = &Context;
815 usesSgprAt(SgprIndexUnusedMin = -1);
816 usesVgprAt(VgprIndexUnusedMin = -1);
817 }
Eugene Zelenko66203762017-01-21 00:53:49 +0000818
Artem Tamazova01cce82016-12-27 16:00:11 +0000819 void usesRegister(RegisterKind RegKind, unsigned DwordRegIndex, unsigned RegWidth) {
820 switch (RegKind) {
821 case IS_SGPR: usesSgprAt(DwordRegIndex + RegWidth - 1); break;
822 case IS_VGPR: usesVgprAt(DwordRegIndex + RegWidth - 1); break;
823 default: break;
824 }
825 }
826};
827
Tom Stellard45bb48e2015-06-13 03:28:10 +0000828class AMDGPUAsmParser : public MCTargetAsmParser {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000829 MCAsmParser &Parser;
830
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000831 // Number of extra operands parsed after the first optional operand.
832 // This may be necessary to skip hardcoded mandatory operands.
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +0000833 static const unsigned MAX_OPR_LOOKAHEAD = 8;
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +0000834
Eugene Zelenko66203762017-01-21 00:53:49 +0000835 unsigned ForcedEncodingSize = 0;
836 bool ForcedDPP = false;
837 bool ForcedSDWA = false;
Artem Tamazova01cce82016-12-27 16:00:11 +0000838 KernelScopeInfo KernelScope;
Matt Arsenault68802d32015-11-05 03:11:27 +0000839
Tom Stellard45bb48e2015-06-13 03:28:10 +0000840 /// @name Auto-generated Match Functions
841 /// {
842
843#define GET_ASSEMBLER_HEADER
844#include "AMDGPUGenAsmMatcher.inc"
845
846 /// }
847
Tom Stellard347ac792015-06-26 21:15:07 +0000848private:
Artem Tamazov25478d82016-12-29 15:41:52 +0000849 bool ParseAsAbsoluteExpression(uint32_t &Ret);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000850 bool OutOfRangeError(SMRange Range);
851 /// Calculate VGPR/SGPR blocks required for given target, reserved
852 /// registers, and user-specified NextFreeXGPR values.
853 ///
854 /// \param Features [in] Target features, used for bug corrections.
855 /// \param VCCUsed [in] Whether VCC special SGPR is reserved.
856 /// \param FlatScrUsed [in] Whether FLAT_SCRATCH special SGPR is reserved.
857 /// \param XNACKUsed [in] Whether XNACK_MASK special SGPR is reserved.
858 /// \param NextFreeVGPR [in] Max VGPR number referenced, plus one.
859 /// \param VGPRRange [in] Token range, used for VGPR diagnostics.
860 /// \param NextFreeSGPR [in] Max SGPR number referenced, plus one.
861 /// \param SGPRRange [in] Token range, used for SGPR diagnostics.
862 /// \param VGPRBlocks [out] Result VGPR block count.
863 /// \param SGPRBlocks [out] Result SGPR block count.
864 bool calculateGPRBlocks(const FeatureBitset &Features, bool VCCUsed,
865 bool FlatScrUsed, bool XNACKUsed,
866 unsigned NextFreeVGPR, SMRange VGPRRange,
867 unsigned NextFreeSGPR, SMRange SGPRRange,
868 unsigned &VGPRBlocks, unsigned &SGPRBlocks);
869 bool ParseDirectiveAMDGCNTarget();
870 bool ParseDirectiveAMDHSAKernel();
Tom Stellard347ac792015-06-26 21:15:07 +0000871 bool ParseDirectiveMajorMinor(uint32_t &Major, uint32_t &Minor);
872 bool ParseDirectiveHSACodeObjectVersion();
873 bool ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +0000874 bool ParseAMDKernelCodeTValue(StringRef ID, amd_kernel_code_t &Header);
875 bool ParseDirectiveAMDKernelCodeT();
Matt Arsenault68802d32015-11-05 03:11:27 +0000876 bool subtargetHasRegister(const MCRegisterInfo &MRI, unsigned RegNo) const;
Tom Stellard1e1b05d2015-11-06 11:45:14 +0000877 bool ParseDirectiveAMDGPUHsaKernel();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000878
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +0000879 bool ParseDirectiveISAVersion();
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +0000880 bool ParseDirectiveHSAMetadata();
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +0000881 bool ParseDirectivePALMetadata();
882
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000883 bool AddNextRegisterToList(unsigned& Reg, unsigned& RegWidth,
884 RegisterKind RegKind, unsigned Reg1,
885 unsigned RegNum);
886 bool ParseAMDGPURegister(RegisterKind& RegKind, unsigned& Reg,
887 unsigned& RegNum, unsigned& RegWidth,
888 unsigned *DwordRegIndex);
Scott Linder1e8c2c72018-06-21 19:38:56 +0000889 Optional<StringRef> getGprCountSymbolName(RegisterKind RegKind);
890 void initializeGprCountSymbol(RegisterKind RegKind);
891 bool updateGprCountSymbols(RegisterKind RegKind, unsigned DwordRegIndex,
892 unsigned RegWidth);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000893 void cvtMubufImpl(MCInst &Inst, const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +0000894 bool IsAtomic, bool IsAtomicReturn, bool IsLds = false);
Matt Arsenaultf15da6c2017-02-03 20:49:51 +0000895 void cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
896 bool IsGdsHardcoded);
Tom Stellard347ac792015-06-26 21:15:07 +0000897
Tom Stellard45bb48e2015-06-13 03:28:10 +0000898public:
Tom Stellard88e0b252015-10-06 15:57:53 +0000899 enum AMDGPUMatchResultTy {
900 Match_PreferE32 = FIRST_TARGET_MATCH_RESULT_TY
901 };
902
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +0000903 using OptionalImmIndexMap = std::map<AMDGPUOperand::ImmTy, unsigned>;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +0000904
Akira Hatanakab11ef082015-11-14 06:35:56 +0000905 AMDGPUAsmParser(const MCSubtargetInfo &STI, MCAsmParser &_Parser,
Tom Stellard45bb48e2015-06-13 03:28:10 +0000906 const MCInstrInfo &MII,
907 const MCTargetOptions &Options)
Oliver Stannard4191b9e2017-10-11 09:17:43 +0000908 : MCTargetAsmParser(Options, STI, MII), Parser(_Parser) {
Akira Hatanakab11ef082015-11-14 06:35:56 +0000909 MCAsmParserExtension::Initialize(Parser);
910
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000911 if (getFeatureBits().none()) {
Tom Stellard45bb48e2015-06-13 03:28:10 +0000912 // Set default features.
Akira Hatanakab11ef082015-11-14 06:35:56 +0000913 copySTI().ToggleFeature("SOUTHERN_ISLANDS");
Tom Stellard45bb48e2015-06-13 03:28:10 +0000914 }
915
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000916 setAvailableFeatures(ComputeAvailableFeatures(getFeatureBits()));
Artem Tamazov17091362016-06-14 15:03:59 +0000917
918 {
919 // TODO: make those pre-defined variables read-only.
920 // Currently there is none suitable machinery in the core llvm-mc for this.
921 // MCSymbol::isRedefinable is intended for another purpose, and
922 // AsmParser::parseDirectiveSet() cannot be specialized for specific target.
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +0000923 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000924 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Artem Tamazov17091362016-06-14 15:03:59 +0000925 MCContext &Ctx = getContext();
Scott Linder1e8c2c72018-06-21 19:38:56 +0000926 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
927 MCSymbol *Sym =
928 Ctx.getOrCreateSymbol(Twine(".amdgcn.gfx_generation_number"));
929 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
930 } else {
931 MCSymbol *Sym =
932 Ctx.getOrCreateSymbol(Twine(".option.machine_version_major"));
933 Sym->setVariableValue(MCConstantExpr::create(ISA.Major, Ctx));
934 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_minor"));
935 Sym->setVariableValue(MCConstantExpr::create(ISA.Minor, Ctx));
936 Sym = Ctx.getOrCreateSymbol(Twine(".option.machine_version_stepping"));
937 Sym->setVariableValue(MCConstantExpr::create(ISA.Stepping, Ctx));
938 }
939 if (ISA.Major >= 6 && AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
940 initializeGprCountSymbol(IS_VGPR);
941 initializeGprCountSymbol(IS_SGPR);
942 } else
943 KernelScope.initialize(getContext());
Artem Tamazov17091362016-06-14 15:03:59 +0000944 }
Tom Stellard45bb48e2015-06-13 03:28:10 +0000945 }
946
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +0000947 bool hasXNACK() const {
948 return AMDGPU::hasXNACK(getSTI());
949 }
950
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +0000951 bool hasMIMG_R128() const {
952 return AMDGPU::hasMIMG_R128(getSTI());
953 }
954
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +0000955 bool hasPackedD16() const {
956 return AMDGPU::hasPackedD16(getSTI());
957 }
958
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000959 bool isSI() const {
960 return AMDGPU::isSI(getSTI());
961 }
962
963 bool isCI() const {
964 return AMDGPU::isCI(getSTI());
965 }
966
967 bool isVI() const {
968 return AMDGPU::isVI(getSTI());
969 }
970
Sam Koltonf7659d712017-05-23 10:08:55 +0000971 bool isGFX9() const {
972 return AMDGPU::isGFX9(getSTI());
973 }
974
Matt Arsenault26faed32016-12-05 22:26:17 +0000975 bool hasInv2PiInlineImm() const {
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +0000976 return getFeatureBits()[AMDGPU::FeatureInv2PiInlineImm];
Matt Arsenault26faed32016-12-05 22:26:17 +0000977 }
978
Matt Arsenaultfd023142017-06-12 15:55:58 +0000979 bool hasFlatOffsets() const {
980 return getFeatureBits()[AMDGPU::FeatureFlatInstOffsets];
981 }
982
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000983 bool hasSGPR102_SGPR103() const {
984 return !isVI();
985 }
986
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +0000987 bool hasIntClamp() const {
988 return getFeatureBits()[AMDGPU::FeatureIntClamp];
989 }
990
Tom Stellard347ac792015-06-26 21:15:07 +0000991 AMDGPUTargetStreamer &getTargetStreamer() {
992 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
993 return static_cast<AMDGPUTargetStreamer &>(TS);
994 }
Matt Arsenault37fefd62016-06-10 02:18:02 +0000995
Sam Kolton1eeb11b2016-09-09 14:44:04 +0000996 const MCRegisterInfo *getMRI() const {
997 // We need this const_cast because for some reason getContext() is not const
998 // in MCAsmParser.
999 return const_cast<AMDGPUAsmParser*>(this)->getContext().getRegisterInfo();
1000 }
1001
1002 const MCInstrInfo *getMII() const {
1003 return &MII;
1004 }
1005
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00001006 const FeatureBitset &getFeatureBits() const {
1007 return getSTI().getFeatureBits();
1008 }
1009
Sam Kolton05ef1c92016-06-03 10:27:37 +00001010 void setForcedEncodingSize(unsigned Size) { ForcedEncodingSize = Size; }
1011 void setForcedDPP(bool ForceDPP_) { ForcedDPP = ForceDPP_; }
1012 void setForcedSDWA(bool ForceSDWA_) { ForcedSDWA = ForceSDWA_; }
Tom Stellard347ac792015-06-26 21:15:07 +00001013
Sam Kolton05ef1c92016-06-03 10:27:37 +00001014 unsigned getForcedEncodingSize() const { return ForcedEncodingSize; }
1015 bool isForcedVOP3() const { return ForcedEncodingSize == 64; }
1016 bool isForcedDPP() const { return ForcedDPP; }
1017 bool isForcedSDWA() const { return ForcedSDWA; }
Matt Arsenault5f45e782017-01-09 18:44:11 +00001018 ArrayRef<unsigned> getMatchedVariants() const;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001019
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001020 std::unique_ptr<AMDGPUOperand> parseRegister();
Tom Stellard45bb48e2015-06-13 03:28:10 +00001021 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc) override;
1022 unsigned checkTargetMatchPredicate(MCInst &Inst) override;
Sam Kolton11de3702016-05-24 12:38:33 +00001023 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
1024 unsigned Kind) override;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001025 bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
1026 OperandVector &Operands, MCStreamer &Out,
1027 uint64_t &ErrorInfo,
1028 bool MatchingInlineAsm) override;
1029 bool ParseDirective(AsmToken DirectiveID) override;
1030 OperandMatchResultTy parseOperand(OperandVector &Operands, StringRef Mnemonic);
Sam Kolton05ef1c92016-06-03 10:27:37 +00001031 StringRef parseMnemonicSuffix(StringRef Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001032 bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
1033 SMLoc NameLoc, OperandVector &Operands) override;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001034 //bool ProcessInstruction(MCInst &Inst);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001035
Sam Kolton11de3702016-05-24 12:38:33 +00001036 OperandMatchResultTy parseIntWithPrefix(const char *Prefix, int64_t &Int);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001037
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001038 OperandMatchResultTy
1039 parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001040 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001041 bool (*ConvertResult)(int64_t &) = nullptr);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001042
1043 OperandMatchResultTy parseOperandArrayWithPrefix(
1044 const char *Prefix,
1045 OperandVector &Operands,
1046 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone,
1047 bool (*ConvertResult)(int64_t&) = nullptr);
1048
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001049 OperandMatchResultTy
1050 parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00001051 AMDGPUOperand::ImmTy ImmTy = AMDGPUOperand::ImmTyNone);
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001052 OperandMatchResultTy parseStringWithPrefix(StringRef Prefix,
1053 StringRef &Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001054
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001055 bool parseAbsoluteExpr(int64_t &Val, bool AbsMod = false);
1056 OperandMatchResultTy parseImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001057 OperandMatchResultTy parseReg(OperandVector &Operands);
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001058 OperandMatchResultTy parseRegOrImm(OperandVector &Operands, bool AbsMod = false);
Sam Kolton9772eb32017-01-11 11:46:30 +00001059 OperandMatchResultTy parseRegOrImmWithFPInputMods(OperandVector &Operands, bool AllowImm = true);
1060 OperandMatchResultTy parseRegOrImmWithIntInputMods(OperandVector &Operands, bool AllowImm = true);
1061 OperandMatchResultTy parseRegWithFPInputMods(OperandVector &Operands);
1062 OperandMatchResultTy parseRegWithIntInputMods(OperandVector &Operands);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001063 OperandMatchResultTy parseVReg32OrOff(OperandVector &Operands);
Sam Kolton1bdcef72016-05-23 09:59:02 +00001064
Tom Stellard45bb48e2015-06-13 03:28:10 +00001065 void cvtDSOffset01(MCInst &Inst, const OperandVector &Operands);
Artem Tamazov43b61562017-02-03 12:47:30 +00001066 void cvtDS(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, false); }
1067 void cvtDSGds(MCInst &Inst, const OperandVector &Operands) { cvtDSImpl(Inst, Operands, true); }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001068 void cvtExp(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001069
1070 bool parseCnt(int64_t &IntVal);
1071 OperandMatchResultTy parseSWaitCntOps(OperandVector &Operands);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001072 OperandMatchResultTy parseHwreg(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001073
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001074private:
1075 struct OperandInfoTy {
1076 int64_t Id;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001077 bool IsSymbolic = false;
1078
1079 OperandInfoTy(int64_t Id_) : Id(Id_) {}
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001080 };
Sam Kolton11de3702016-05-24 12:38:33 +00001081
Artem Tamazov6edc1352016-05-26 17:00:33 +00001082 bool parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId);
1083 bool parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset, int64_t &Width);
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001084
1085 void errorExpTgt();
1086 OperandMatchResultTy parseExpTgtImpl(StringRef Str, uint8_t &Val);
1087
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00001088 bool validateInstruction(const MCInst &Inst, const SMLoc &IDLoc);
1089 bool validateConstantBusLimitations(const MCInst &Inst);
1090 bool validateEarlyClobberLimitations(const MCInst &Inst);
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00001091 bool validateIntClampSupported(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001092 bool validateMIMGAtomicDMask(const MCInst &Inst);
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00001093 bool validateMIMGGatherDMask(const MCInst &Inst);
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00001094 bool validateMIMGDataSize(const MCInst &Inst);
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00001095 bool validateMIMGR128(const MCInst &Inst);
1096 bool validateMIMGD16(const MCInst &Inst);
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001097 bool usesConstantBus(const MCInst &Inst, unsigned OpIdx);
1098 bool isInlineConstant(const MCInst &Inst, unsigned OpIdx) const;
1099 unsigned findImplicitSGPRReadInVOP(const MCInst &Inst) const;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00001100
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001101 bool trySkipId(const StringRef Id);
1102 bool trySkipToken(const AsmToken::TokenKind Kind);
1103 bool skipToken(const AsmToken::TokenKind Kind, const StringRef ErrMsg);
1104 bool parseString(StringRef &Val, const StringRef ErrMsg = "expected a string");
1105 bool parseExpr(int64_t &Imm);
1106
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001107public:
Sam Kolton11de3702016-05-24 12:38:33 +00001108 OperandMatchResultTy parseOptionalOperand(OperandVector &Operands);
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00001109 OperandMatchResultTy parseOptionalOpr(OperandVector &Operands);
Sam Kolton11de3702016-05-24 12:38:33 +00001110
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00001111 OperandMatchResultTy parseExpTgt(OperandVector &Operands);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00001112 OperandMatchResultTy parseSendMsgOp(OperandVector &Operands);
Matt Arsenault0e8a2992016-12-15 20:40:20 +00001113 OperandMatchResultTy parseInterpSlot(OperandVector &Operands);
1114 OperandMatchResultTy parseInterpAttr(OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001115 OperandMatchResultTy parseSOppBrTarget(OperandVector &Operands);
1116
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00001117 bool parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
1118 const unsigned MinVal,
1119 const unsigned MaxVal,
1120 const StringRef ErrMsg);
1121 OperandMatchResultTy parseSwizzleOp(OperandVector &Operands);
1122 bool parseSwizzleOffset(int64_t &Imm);
1123 bool parseSwizzleMacro(int64_t &Imm);
1124 bool parseSwizzleQuadPerm(int64_t &Imm);
1125 bool parseSwizzleBitmaskPerm(int64_t &Imm);
1126 bool parseSwizzleBroadcast(int64_t &Imm);
1127 bool parseSwizzleSwap(int64_t &Imm);
1128 bool parseSwizzleReverse(int64_t &Imm);
1129
Artem Tamazov8ce1f712016-05-19 12:22:39 +00001130 void cvtMubuf(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false); }
1131 void cvtMubufAtomic(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, false); }
1132 void cvtMubufAtomicReturn(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, true, true); }
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00001133 void cvtMubufLds(MCInst &Inst, const OperandVector &Operands) { cvtMubufImpl(Inst, Operands, false, false, true); }
David Stuttard70e8bc12017-06-22 16:29:22 +00001134 void cvtMtbuf(MCInst &Inst, const OperandVector &Operands);
1135
Sam Kolton5f10a132016-05-06 11:31:17 +00001136 AMDGPUOperand::Ptr defaultGLC() const;
1137 AMDGPUOperand::Ptr defaultSLC() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001138
Artem Tamazov54bfd542016-10-31 16:07:39 +00001139 AMDGPUOperand::Ptr defaultSMRDOffset8() const;
1140 AMDGPUOperand::Ptr defaultSMRDOffset20() const;
Sam Kolton5f10a132016-05-06 11:31:17 +00001141 AMDGPUOperand::Ptr defaultSMRDLiteralOffset() const;
Matt Arsenaultfd023142017-06-12 15:55:58 +00001142 AMDGPUOperand::Ptr defaultOffsetU12() const;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00001143 AMDGPUOperand::Ptr defaultOffsetS13() const;
Matt Arsenault37fefd62016-06-10 02:18:02 +00001144
Nikolay Haustov4f672a32016-04-29 09:02:30 +00001145 OperandMatchResultTy parseOModOperand(OperandVector &Operands);
1146
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001147 void cvtVOP3(MCInst &Inst, const OperandVector &Operands,
1148 OptionalImmIndexMap &OptionalIdx);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00001149 void cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001150 void cvtVOP3(MCInst &Inst, const OperandVector &Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001151 void cvtVOP3P(MCInst &Inst, const OperandVector &Operands);
Nikolay Haustov2f684f12016-02-26 09:51:05 +00001152
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00001153 void cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands);
1154
Sam Kolton10ac2fd2017-07-07 15:21:52 +00001155 void cvtMIMG(MCInst &Inst, const OperandVector &Operands,
1156 bool IsAtomic = false);
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00001157 void cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands);
Sam Koltondfa29f72016-03-09 12:29:31 +00001158
Sam Kolton11de3702016-05-24 12:38:33 +00001159 OperandMatchResultTy parseDPPCtrl(OperandVector &Operands);
Sam Kolton5f10a132016-05-06 11:31:17 +00001160 AMDGPUOperand::Ptr defaultRowMask() const;
1161 AMDGPUOperand::Ptr defaultBankMask() const;
1162 AMDGPUOperand::Ptr defaultBoundCtrl() const;
1163 void cvtDPP(MCInst &Inst, const OperandVector &Operands);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001164
Sam Kolton05ef1c92016-06-03 10:27:37 +00001165 OperandMatchResultTy parseSDWASel(OperandVector &Operands, StringRef Prefix,
1166 AMDGPUOperand::ImmTy Type);
Sam Kolton3025e7f2016-04-26 13:33:56 +00001167 OperandMatchResultTy parseSDWADstUnused(OperandVector &Operands);
Sam Kolton945231a2016-06-10 09:57:59 +00001168 void cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands);
1169 void cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands);
Sam Koltonf7659d712017-05-23 10:08:55 +00001170 void cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands);
Sam Kolton5196b882016-07-01 09:59:21 +00001171 void cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands);
1172 void cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00001173 uint64_t BasicInstType, bool skipVcc = false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001174};
1175
1176struct OptionalOperand {
1177 const char *Name;
1178 AMDGPUOperand::ImmTy Type;
1179 bool IsBit;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001180 bool (*ConvertResult)(int64_t&);
1181};
1182
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00001183} // end anonymous namespace
1184
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001185// May be called with integer type with equivalent bitwidth.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001186static const fltSemantics *getFltSemantics(unsigned Size) {
1187 switch (Size) {
1188 case 4:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001189 return &APFloat::IEEEsingle();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001190 case 8:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001191 return &APFloat::IEEEdouble();
Matt Arsenault4bd72362016-12-10 00:39:12 +00001192 case 2:
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001193 return &APFloat::IEEEhalf();
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001194 default:
1195 llvm_unreachable("unsupported fp type");
1196 }
1197}
1198
Matt Arsenault4bd72362016-12-10 00:39:12 +00001199static const fltSemantics *getFltSemantics(MVT VT) {
1200 return getFltSemantics(VT.getSizeInBits() / 8);
1201}
1202
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001203static const fltSemantics *getOpFltSemantics(uint8_t OperandType) {
1204 switch (OperandType) {
1205 case AMDGPU::OPERAND_REG_IMM_INT32:
1206 case AMDGPU::OPERAND_REG_IMM_FP32:
1207 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1208 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1209 return &APFloat::IEEEsingle();
1210 case AMDGPU::OPERAND_REG_IMM_INT64:
1211 case AMDGPU::OPERAND_REG_IMM_FP64:
1212 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
1213 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
1214 return &APFloat::IEEEdouble();
1215 case AMDGPU::OPERAND_REG_IMM_INT16:
1216 case AMDGPU::OPERAND_REG_IMM_FP16:
1217 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1218 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1219 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1220 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16:
1221 return &APFloat::IEEEhalf();
1222 default:
1223 llvm_unreachable("unsupported fp type");
1224 }
1225}
1226
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001227//===----------------------------------------------------------------------===//
1228// Operand
1229//===----------------------------------------------------------------------===//
1230
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001231static bool canLosslesslyConvertToFPType(APFloat &FPLiteral, MVT VT) {
1232 bool Lost;
1233
1234 // Convert literal to single precision
1235 APFloat::opStatus Status = FPLiteral.convert(*getFltSemantics(VT),
1236 APFloat::rmNearestTiesToEven,
1237 &Lost);
1238 // We allow precision lost but not overflow or underflow
1239 if (Status != APFloat::opOK &&
1240 Lost &&
1241 ((Status & APFloat::opOverflow) != 0 ||
1242 (Status & APFloat::opUnderflow) != 0)) {
1243 return false;
1244 }
1245
1246 return true;
1247}
1248
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001249bool AMDGPUOperand::isInlinableImm(MVT type) const {
1250 if (!isImmTy(ImmTyNone)) {
1251 // Only plain immediates are inlinable (e.g. "clamp" attribute is not)
1252 return false;
1253 }
1254 // TODO: We should avoid using host float here. It would be better to
1255 // check the float bit values which is what a few other places do.
1256 // We've had bot failures before due to weird NaN support on mips hosts.
1257
1258 APInt Literal(64, Imm.Val);
1259
1260 if (Imm.IsFPImm) { // We got fp literal token
1261 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001262 return AMDGPU::isInlinableLiteral64(Imm.Val,
1263 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001264 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001265
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001266 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001267 if (!canLosslesslyConvertToFPType(FPLiteral, type))
1268 return false;
1269
Sam Kolton9dffada2017-01-17 15:26:02 +00001270 if (type.getScalarSizeInBits() == 16) {
1271 return AMDGPU::isInlinableLiteral16(
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001272 static_cast<int16_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Sam Kolton9dffada2017-01-17 15:26:02 +00001273 AsmParser->hasInv2PiInlineImm());
1274 }
1275
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001276 // Check if single precision literal is inlinable
1277 return AMDGPU::isInlinableLiteral32(
1278 static_cast<int32_t>(FPLiteral.bitcastToAPInt().getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001279 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001280 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001281
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001282 // We got int literal token.
1283 if (type == MVT::f64 || type == MVT::i64) { // Expected 64-bit operand
Matt Arsenault26faed32016-12-05 22:26:17 +00001284 return AMDGPU::isInlinableLiteral64(Imm.Val,
1285 AsmParser->hasInv2PiInlineImm());
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001286 }
1287
Matt Arsenault4bd72362016-12-10 00:39:12 +00001288 if (type.getScalarSizeInBits() == 16) {
1289 return AMDGPU::isInlinableLiteral16(
1290 static_cast<int16_t>(Literal.getLoBits(16).getSExtValue()),
1291 AsmParser->hasInv2PiInlineImm());
1292 }
1293
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001294 return AMDGPU::isInlinableLiteral32(
1295 static_cast<int32_t>(Literal.getLoBits(32).getZExtValue()),
Matt Arsenault26faed32016-12-05 22:26:17 +00001296 AsmParser->hasInv2PiInlineImm());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001297}
1298
1299bool AMDGPUOperand::isLiteralImm(MVT type) const {
Hiroshi Inoue7f46baf2017-07-16 08:11:56 +00001300 // Check that this immediate can be added as literal
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001301 if (!isImmTy(ImmTyNone)) {
1302 return false;
1303 }
1304
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001305 if (!Imm.IsFPImm) {
1306 // We got int literal token.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001307
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001308 if (type == MVT::f64 && hasFPModifiers()) {
1309 // Cannot apply fp modifiers to int literals preserving the same semantics
1310 // for VOP1/2/C and VOP3 because of integer truncation. To avoid ambiguity,
1311 // disable these cases.
1312 return false;
1313 }
1314
Matt Arsenault4bd72362016-12-10 00:39:12 +00001315 unsigned Size = type.getSizeInBits();
1316 if (Size == 64)
1317 Size = 32;
1318
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001319 // FIXME: 64-bit operands can zero extend, sign extend, or pad zeroes for FP
1320 // types.
Matt Arsenault4bd72362016-12-10 00:39:12 +00001321 return isUIntN(Size, Imm.Val) || isIntN(Size, Imm.Val);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001322 }
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001323
1324 // We got fp literal token
1325 if (type == MVT::f64) { // Expected 64-bit fp operand
1326 // We would set low 64-bits of literal to zeroes but we accept this literals
1327 return true;
1328 }
1329
1330 if (type == MVT::i64) { // Expected 64-bit int operand
1331 // We don't allow fp literals in 64-bit integer instructions. It is
1332 // unclear how we should encode them.
1333 return false;
1334 }
1335
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001336 APFloat FPLiteral(APFloat::IEEEdouble(), APInt(64, Imm.Val));
Matt Arsenaultc7f28a52016-12-05 22:07:21 +00001337 return canLosslesslyConvertToFPType(FPLiteral, type);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001338}
1339
1340bool AMDGPUOperand::isRegClass(unsigned RCID) const {
Sam Kolton9772eb32017-01-11 11:46:30 +00001341 return isRegKind() && AsmParser->getMRI()->getRegClass(RCID).contains(getReg());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001342}
1343
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001344bool AMDGPUOperand::isSDWAOperand(MVT type) const {
Sam Kolton549c89d2017-06-21 08:53:38 +00001345 if (AsmParser->isVI())
1346 return isVReg();
1347 else if (AsmParser->isGFX9())
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001348 return isRegKind() || isInlinableImm(type);
Sam Kolton549c89d2017-06-21 08:53:38 +00001349 else
1350 return false;
1351}
1352
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00001353bool AMDGPUOperand::isSDWAFP16Operand() const {
1354 return isSDWAOperand(MVT::f16);
1355}
1356
1357bool AMDGPUOperand::isSDWAFP32Operand() const {
1358 return isSDWAOperand(MVT::f32);
1359}
1360
1361bool AMDGPUOperand::isSDWAInt16Operand() const {
1362 return isSDWAOperand(MVT::i16);
1363}
1364
1365bool AMDGPUOperand::isSDWAInt32Operand() const {
1366 return isSDWAOperand(MVT::i32);
1367}
1368
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001369uint64_t AMDGPUOperand::applyInputFPModifiers(uint64_t Val, unsigned Size) const
1370{
1371 assert(isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
1372 assert(Size == 2 || Size == 4 || Size == 8);
1373
1374 const uint64_t FpSignMask = (1ULL << (Size * 8 - 1));
1375
1376 if (Imm.Mods.Abs) {
1377 Val &= ~FpSignMask;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001378 }
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001379 if (Imm.Mods.Neg) {
1380 Val ^= FpSignMask;
1381 }
1382
1383 return Val;
1384}
1385
1386void AMDGPUOperand::addImmOperands(MCInst &Inst, unsigned N, bool ApplyModifiers) const {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001387 if (AMDGPU::isSISrcOperand(AsmParser->getMII()->get(Inst.getOpcode()),
1388 Inst.getNumOperands())) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001389 addLiteralImmOperand(Inst, Imm.Val,
1390 ApplyModifiers &
1391 isImmTy(ImmTyNone) && Imm.Mods.hasFPModifiers());
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001392 } else {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001393 assert(!isImmTy(ImmTyNone) || !hasModifiers());
1394 Inst.addOperand(MCOperand::createImm(Imm.Val));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001395 }
1396}
1397
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001398void AMDGPUOperand::addLiteralImmOperand(MCInst &Inst, int64_t Val, bool ApplyModifiers) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001399 const auto& InstDesc = AsmParser->getMII()->get(Inst.getOpcode());
1400 auto OpNum = Inst.getNumOperands();
1401 // Check that this operand accepts literals
1402 assert(AMDGPU::isSISrcOperand(InstDesc, OpNum));
1403
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001404 if (ApplyModifiers) {
1405 assert(AMDGPU::isSISrcFPOperand(InstDesc, OpNum));
1406 const unsigned Size = Imm.IsFPImm ? sizeof(double) : getOperandSize(InstDesc, OpNum);
1407 Val = applyInputFPModifiers(Val, Size);
1408 }
1409
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001410 APInt Literal(64, Val);
1411 uint8_t OpTy = InstDesc.OpInfo[OpNum].OperandType;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001412
1413 if (Imm.IsFPImm) { // We got fp literal token
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001414 switch (OpTy) {
1415 case AMDGPU::OPERAND_REG_IMM_INT64:
1416 case AMDGPU::OPERAND_REG_IMM_FP64:
1417 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001418 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault26faed32016-12-05 22:26:17 +00001419 if (AMDGPU::isInlinableLiteral64(Literal.getZExtValue(),
1420 AsmParser->hasInv2PiInlineImm())) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001421 Inst.addOperand(MCOperand::createImm(Literal.getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001422 return;
1423 }
1424
1425 // Non-inlineable
1426 if (AMDGPU::isSISrcFPOperand(InstDesc, OpNum)) { // Expected 64-bit fp operand
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001427 // For fp operands we check if low 32 bits are zeros
1428 if (Literal.getLoBits(32) != 0) {
1429 const_cast<AMDGPUAsmParser *>(AsmParser)->Warning(Inst.getLoc(),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001430 "Can't encode literal as exact 64-bit floating-point operand. "
1431 "Low 32-bits will be set to zero");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001432 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001433
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001434 Inst.addOperand(MCOperand::createImm(Literal.lshr(32).getZExtValue()));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001435 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001436 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001437
1438 // We don't allow fp literals in 64-bit integer instructions. It is
1439 // unclear how we should encode them. This case should be checked earlier
1440 // in predicate methods (isLiteralImm())
1441 llvm_unreachable("fp literal in 64-bit integer instruction.");
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001442
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001443 case AMDGPU::OPERAND_REG_IMM_INT32:
1444 case AMDGPU::OPERAND_REG_IMM_FP32:
1445 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
1446 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
1447 case AMDGPU::OPERAND_REG_IMM_INT16:
1448 case AMDGPU::OPERAND_REG_IMM_FP16:
1449 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
1450 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
1451 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1452 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001453 bool lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001454 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001455 // Convert literal to single precision
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001456 FPLiteral.convert(*getOpFltSemantics(OpTy),
Matt Arsenault4bd72362016-12-10 00:39:12 +00001457 APFloat::rmNearestTiesToEven, &lost);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001458 // We allow precision lost but not overflow or underflow. This should be
1459 // checked earlier in isLiteralImm()
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001460
1461 uint64_t ImmVal = FPLiteral.bitcastToAPInt().getZExtValue();
1462 if (OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
1463 OpTy == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
1464 ImmVal |= (ImmVal << 16);
1465 }
1466
1467 Inst.addOperand(MCOperand::createImm(ImmVal));
Matt Arsenault4bd72362016-12-10 00:39:12 +00001468 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001469 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001470 default:
1471 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001472 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001473
1474 return;
1475 }
1476
1477 // We got int literal token.
1478 // Only sign extend inline immediates.
1479 // FIXME: No errors on truncation
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001480 switch (OpTy) {
1481 case AMDGPU::OPERAND_REG_IMM_INT32:
1482 case AMDGPU::OPERAND_REG_IMM_FP32:
1483 case AMDGPU::OPERAND_REG_INLINE_C_INT32:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001484 case AMDGPU::OPERAND_REG_INLINE_C_FP32:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001485 if (isInt<32>(Val) &&
1486 AMDGPU::isInlinableLiteral32(static_cast<int32_t>(Val),
1487 AsmParser->hasInv2PiInlineImm())) {
1488 Inst.addOperand(MCOperand::createImm(Val));
1489 return;
1490 }
1491
1492 Inst.addOperand(MCOperand::createImm(Val & 0xffffffff));
1493 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001494
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001495 case AMDGPU::OPERAND_REG_IMM_INT64:
1496 case AMDGPU::OPERAND_REG_IMM_FP64:
1497 case AMDGPU::OPERAND_REG_INLINE_C_INT64:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001498 case AMDGPU::OPERAND_REG_INLINE_C_FP64:
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001499 if (AMDGPU::isInlinableLiteral64(Val, AsmParser->hasInv2PiInlineImm())) {
Matt Arsenault4bd72362016-12-10 00:39:12 +00001500 Inst.addOperand(MCOperand::createImm(Val));
1501 return;
1502 }
1503
1504 Inst.addOperand(MCOperand::createImm(Lo_32(Val)));
1505 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001506
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001507 case AMDGPU::OPERAND_REG_IMM_INT16:
1508 case AMDGPU::OPERAND_REG_IMM_FP16:
1509 case AMDGPU::OPERAND_REG_INLINE_C_INT16:
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001510 case AMDGPU::OPERAND_REG_INLINE_C_FP16:
Matt Arsenault4bd72362016-12-10 00:39:12 +00001511 if (isInt<16>(Val) &&
1512 AMDGPU::isInlinableLiteral16(static_cast<int16_t>(Val),
1513 AsmParser->hasInv2PiInlineImm())) {
1514 Inst.addOperand(MCOperand::createImm(Val));
1515 return;
1516 }
1517
1518 Inst.addOperand(MCOperand::createImm(Val & 0xffff));
1519 return;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00001520
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001521 case AMDGPU::OPERAND_REG_INLINE_C_V2INT16:
1522 case AMDGPU::OPERAND_REG_INLINE_C_V2FP16: {
1523 auto LiteralVal = static_cast<uint16_t>(Literal.getLoBits(16).getZExtValue());
1524 assert(AMDGPU::isInlinableLiteral16(LiteralVal,
1525 AsmParser->hasInv2PiInlineImm()));
Eugene Zelenko66203762017-01-21 00:53:49 +00001526
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00001527 uint32_t ImmVal = static_cast<uint32_t>(LiteralVal) << 16 |
1528 static_cast<uint32_t>(LiteralVal);
1529 Inst.addOperand(MCOperand::createImm(ImmVal));
1530 return;
1531 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001532 default:
1533 llvm_unreachable("invalid operand size");
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001534 }
1535}
1536
Matt Arsenault4bd72362016-12-10 00:39:12 +00001537template <unsigned Bitwidth>
1538void AMDGPUOperand::addKImmFPOperands(MCInst &Inst, unsigned N) const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001539 APInt Literal(64, Imm.Val);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001540
1541 if (!Imm.IsFPImm) {
1542 // We got int literal token.
1543 Inst.addOperand(MCOperand::createImm(Literal.getLoBits(Bitwidth).getZExtValue()));
1544 return;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001545 }
Matt Arsenault4bd72362016-12-10 00:39:12 +00001546
1547 bool Lost;
Stephan Bergmann17c7f702016-12-14 11:57:17 +00001548 APFloat FPLiteral(APFloat::IEEEdouble(), Literal);
Matt Arsenault4bd72362016-12-10 00:39:12 +00001549 FPLiteral.convert(*getFltSemantics(Bitwidth / 8),
1550 APFloat::rmNearestTiesToEven, &Lost);
1551 Inst.addOperand(MCOperand::createImm(FPLiteral.bitcastToAPInt().getZExtValue()));
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001552}
1553
1554void AMDGPUOperand::addRegOperands(MCInst &Inst, unsigned N) const {
1555 Inst.addOperand(MCOperand::createReg(AMDGPU::getMCReg(getReg(), AsmParser->getSTI())));
1556}
1557
1558//===----------------------------------------------------------------------===//
1559// AsmParser
1560//===----------------------------------------------------------------------===//
1561
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001562static int getRegClass(RegisterKind Is, unsigned RegWidth) {
1563 if (Is == IS_VGPR) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001564 switch (RegWidth) {
Matt Arsenault967c2f52015-11-03 22:50:32 +00001565 default: return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001566 case 1: return AMDGPU::VGPR_32RegClassID;
1567 case 2: return AMDGPU::VReg_64RegClassID;
1568 case 3: return AMDGPU::VReg_96RegClassID;
1569 case 4: return AMDGPU::VReg_128RegClassID;
1570 case 8: return AMDGPU::VReg_256RegClassID;
1571 case 16: return AMDGPU::VReg_512RegClassID;
1572 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001573 } else if (Is == IS_TTMP) {
1574 switch (RegWidth) {
1575 default: return -1;
1576 case 1: return AMDGPU::TTMP_32RegClassID;
1577 case 2: return AMDGPU::TTMP_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001578 case 4: return AMDGPU::TTMP_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001579 case 8: return AMDGPU::TTMP_256RegClassID;
1580 case 16: return AMDGPU::TTMP_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001581 }
1582 } else if (Is == IS_SGPR) {
1583 switch (RegWidth) {
1584 default: return -1;
1585 case 1: return AMDGPU::SGPR_32RegClassID;
1586 case 2: return AMDGPU::SGPR_64RegClassID;
Artem Tamazov38e496b2016-04-29 17:04:50 +00001587 case 4: return AMDGPU::SGPR_128RegClassID;
Dmitry Preobrazhensky27134952017-12-22 15:18:06 +00001588 case 8: return AMDGPU::SGPR_256RegClassID;
1589 case 16: return AMDGPU::SGPR_512RegClassID;
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001590 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00001591 }
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001592 return -1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001593}
1594
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001595static unsigned getSpecialRegForName(StringRef RegName) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00001596 return StringSwitch<unsigned>(RegName)
1597 .Case("exec", AMDGPU::EXEC)
1598 .Case("vcc", AMDGPU::VCC)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001599 .Case("flat_scratch", AMDGPU::FLAT_SCR)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001600 .Case("xnack_mask", AMDGPU::XNACK_MASK)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001601 .Case("m0", AMDGPU::M0)
1602 .Case("scc", AMDGPU::SCC)
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001603 .Case("tba", AMDGPU::TBA)
1604 .Case("tma", AMDGPU::TMA)
Matt Arsenaultaac9b492015-11-03 22:50:34 +00001605 .Case("flat_scratch_lo", AMDGPU::FLAT_SCR_LO)
1606 .Case("flat_scratch_hi", AMDGPU::FLAT_SCR_HI)
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001607 .Case("xnack_mask_lo", AMDGPU::XNACK_MASK_LO)
1608 .Case("xnack_mask_hi", AMDGPU::XNACK_MASK_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001609 .Case("vcc_lo", AMDGPU::VCC_LO)
1610 .Case("vcc_hi", AMDGPU::VCC_HI)
1611 .Case("exec_lo", AMDGPU::EXEC_LO)
1612 .Case("exec_hi", AMDGPU::EXEC_HI)
Artem Tamazoveb4d5a92016-04-13 16:18:41 +00001613 .Case("tma_lo", AMDGPU::TMA_LO)
1614 .Case("tma_hi", AMDGPU::TMA_HI)
1615 .Case("tba_lo", AMDGPU::TBA_LO)
1616 .Case("tba_hi", AMDGPU::TBA_HI)
Tom Stellard45bb48e2015-06-13 03:28:10 +00001617 .Default(0);
1618}
1619
Eugene Zelenko66203762017-01-21 00:53:49 +00001620bool AMDGPUAsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc,
1621 SMLoc &EndLoc) {
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001622 auto R = parseRegister();
1623 if (!R) return true;
1624 assert(R->isReg());
1625 RegNo = R->getReg();
1626 StartLoc = R->getStartLoc();
1627 EndLoc = R->getEndLoc();
1628 return false;
1629}
1630
Eugene Zelenko66203762017-01-21 00:53:49 +00001631bool AMDGPUAsmParser::AddNextRegisterToList(unsigned &Reg, unsigned &RegWidth,
1632 RegisterKind RegKind, unsigned Reg1,
1633 unsigned RegNum) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001634 switch (RegKind) {
1635 case IS_SPECIAL:
Eugene Zelenko66203762017-01-21 00:53:49 +00001636 if (Reg == AMDGPU::EXEC_LO && Reg1 == AMDGPU::EXEC_HI) {
1637 Reg = AMDGPU::EXEC;
1638 RegWidth = 2;
1639 return true;
1640 }
1641 if (Reg == AMDGPU::FLAT_SCR_LO && Reg1 == AMDGPU::FLAT_SCR_HI) {
1642 Reg = AMDGPU::FLAT_SCR;
1643 RegWidth = 2;
1644 return true;
1645 }
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00001646 if (Reg == AMDGPU::XNACK_MASK_LO && Reg1 == AMDGPU::XNACK_MASK_HI) {
1647 Reg = AMDGPU::XNACK_MASK;
1648 RegWidth = 2;
1649 return true;
1650 }
Eugene Zelenko66203762017-01-21 00:53:49 +00001651 if (Reg == AMDGPU::VCC_LO && Reg1 == AMDGPU::VCC_HI) {
1652 Reg = AMDGPU::VCC;
1653 RegWidth = 2;
1654 return true;
1655 }
1656 if (Reg == AMDGPU::TBA_LO && Reg1 == AMDGPU::TBA_HI) {
1657 Reg = AMDGPU::TBA;
1658 RegWidth = 2;
1659 return true;
1660 }
1661 if (Reg == AMDGPU::TMA_LO && Reg1 == AMDGPU::TMA_HI) {
1662 Reg = AMDGPU::TMA;
1663 RegWidth = 2;
1664 return true;
1665 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001666 return false;
1667 case IS_VGPR:
1668 case IS_SGPR:
1669 case IS_TTMP:
Eugene Zelenko66203762017-01-21 00:53:49 +00001670 if (Reg1 != Reg + RegWidth) {
1671 return false;
1672 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001673 RegWidth++;
1674 return true;
1675 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001676 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001677 }
1678}
1679
Eugene Zelenko66203762017-01-21 00:53:49 +00001680bool AMDGPUAsmParser::ParseAMDGPURegister(RegisterKind &RegKind, unsigned &Reg,
1681 unsigned &RegNum, unsigned &RegWidth,
1682 unsigned *DwordRegIndex) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001683 if (DwordRegIndex) { *DwordRegIndex = 0; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001684 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
1685 if (getLexer().is(AsmToken::Identifier)) {
1686 StringRef RegName = Parser.getTok().getString();
1687 if ((Reg = getSpecialRegForName(RegName))) {
1688 Parser.Lex();
1689 RegKind = IS_SPECIAL;
1690 } else {
1691 unsigned RegNumIndex = 0;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001692 if (RegName[0] == 'v') {
1693 RegNumIndex = 1;
1694 RegKind = IS_VGPR;
1695 } else if (RegName[0] == 's') {
1696 RegNumIndex = 1;
1697 RegKind = IS_SGPR;
1698 } else if (RegName.startswith("ttmp")) {
1699 RegNumIndex = strlen("ttmp");
1700 RegKind = IS_TTMP;
1701 } else {
1702 return false;
1703 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001704 if (RegName.size() > RegNumIndex) {
1705 // Single 32-bit register: vXX.
Artem Tamazovf88397c2016-06-03 14:41:17 +00001706 if (RegName.substr(RegNumIndex).getAsInteger(10, RegNum))
1707 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001708 Parser.Lex();
1709 RegWidth = 1;
1710 } else {
Artem Tamazov7da9b822016-05-27 12:50:13 +00001711 // Range of registers: v[XX:YY]. ":YY" is optional.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001712 Parser.Lex();
1713 int64_t RegLo, RegHi;
Artem Tamazovf88397c2016-06-03 14:41:17 +00001714 if (getLexer().isNot(AsmToken::LBrac))
1715 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001716 Parser.Lex();
1717
Artem Tamazovf88397c2016-06-03 14:41:17 +00001718 if (getParser().parseAbsoluteExpression(RegLo))
1719 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001720
Artem Tamazov7da9b822016-05-27 12:50:13 +00001721 const bool isRBrace = getLexer().is(AsmToken::RBrac);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001722 if (!isRBrace && getLexer().isNot(AsmToken::Colon))
1723 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001724 Parser.Lex();
1725
Artem Tamazov7da9b822016-05-27 12:50:13 +00001726 if (isRBrace) {
1727 RegHi = RegLo;
1728 } else {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001729 if (getParser().parseAbsoluteExpression(RegHi))
1730 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001731
Artem Tamazovf88397c2016-06-03 14:41:17 +00001732 if (getLexer().isNot(AsmToken::RBrac))
1733 return false;
Artem Tamazov7da9b822016-05-27 12:50:13 +00001734 Parser.Lex();
1735 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001736 RegNum = (unsigned) RegLo;
1737 RegWidth = (RegHi - RegLo) + 1;
1738 }
1739 }
1740 } else if (getLexer().is(AsmToken::LBrac)) {
1741 // List of consecutive registers: [s0,s1,s2,s3]
1742 Parser.Lex();
Artem Tamazova01cce82016-12-27 16:00:11 +00001743 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, nullptr))
Artem Tamazovf88397c2016-06-03 14:41:17 +00001744 return false;
1745 if (RegWidth != 1)
1746 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001747 RegisterKind RegKind1;
1748 unsigned Reg1, RegNum1, RegWidth1;
1749 do {
1750 if (getLexer().is(AsmToken::Comma)) {
1751 Parser.Lex();
1752 } else if (getLexer().is(AsmToken::RBrac)) {
1753 Parser.Lex();
1754 break;
Artem Tamazova01cce82016-12-27 16:00:11 +00001755 } else if (ParseAMDGPURegister(RegKind1, Reg1, RegNum1, RegWidth1, nullptr)) {
Artem Tamazovf88397c2016-06-03 14:41:17 +00001756 if (RegWidth1 != 1) {
1757 return false;
1758 }
1759 if (RegKind1 != RegKind) {
1760 return false;
1761 }
1762 if (!AddNextRegisterToList(Reg, RegWidth, RegKind1, Reg1, RegNum1)) {
1763 return false;
1764 }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001765 } else {
1766 return false;
1767 }
1768 } while (true);
1769 } else {
1770 return false;
1771 }
1772 switch (RegKind) {
1773 case IS_SPECIAL:
1774 RegNum = 0;
1775 RegWidth = 1;
1776 break;
1777 case IS_VGPR:
1778 case IS_SGPR:
1779 case IS_TTMP:
1780 {
1781 unsigned Size = 1;
1782 if (RegKind == IS_SGPR || RegKind == IS_TTMP) {
Artem Tamazova01cce82016-12-27 16:00:11 +00001783 // SGPR and TTMP registers must be aligned. Max required alignment is 4 dwords.
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001784 Size = std::min(RegWidth, 4u);
1785 }
Artem Tamazovf88397c2016-06-03 14:41:17 +00001786 if (RegNum % Size != 0)
1787 return false;
Artem Tamazova01cce82016-12-27 16:00:11 +00001788 if (DwordRegIndex) { *DwordRegIndex = RegNum; }
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001789 RegNum = RegNum / Size;
1790 int RCID = getRegClass(RegKind, RegWidth);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001791 if (RCID == -1)
1792 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001793 const MCRegisterClass RC = TRI->getRegClass(RCID);
Artem Tamazovf88397c2016-06-03 14:41:17 +00001794 if (RegNum >= RC.getNumRegs())
1795 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001796 Reg = RC.getRegister(RegNum);
1797 break;
1798 }
1799
1800 default:
Matt Arsenault92b355b2016-11-15 19:34:37 +00001801 llvm_unreachable("unexpected register kind");
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001802 }
1803
Artem Tamazovf88397c2016-06-03 14:41:17 +00001804 if (!subtargetHasRegister(*TRI, Reg))
1805 return false;
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001806 return true;
1807}
1808
Scott Linder1e8c2c72018-06-21 19:38:56 +00001809Optional<StringRef>
1810AMDGPUAsmParser::getGprCountSymbolName(RegisterKind RegKind) {
1811 switch (RegKind) {
1812 case IS_VGPR:
1813 return StringRef(".amdgcn.next_free_vgpr");
1814 case IS_SGPR:
1815 return StringRef(".amdgcn.next_free_sgpr");
1816 default:
1817 return None;
1818 }
1819}
1820
1821void AMDGPUAsmParser::initializeGprCountSymbol(RegisterKind RegKind) {
1822 auto SymbolName = getGprCountSymbolName(RegKind);
1823 assert(SymbolName && "initializing invalid register kind");
1824 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1825 Sym->setVariableValue(MCConstantExpr::create(0, getContext()));
1826}
1827
1828bool AMDGPUAsmParser::updateGprCountSymbols(RegisterKind RegKind,
1829 unsigned DwordRegIndex,
1830 unsigned RegWidth) {
1831 // Symbols are only defined for GCN targets
1832 if (AMDGPU::IsaInfo::getIsaVersion(getFeatureBits()).Major < 6)
1833 return true;
1834
1835 auto SymbolName = getGprCountSymbolName(RegKind);
1836 if (!SymbolName)
1837 return true;
1838 MCSymbol *Sym = getContext().getOrCreateSymbol(*SymbolName);
1839
1840 int64_t NewMax = DwordRegIndex + RegWidth - 1;
1841 int64_t OldCount;
1842
1843 if (!Sym->isVariable())
1844 return !Error(getParser().getTok().getLoc(),
1845 ".amdgcn.next_free_{v,s}gpr symbols must be variable");
1846 if (!Sym->getVariableValue(false)->evaluateAsAbsolute(OldCount))
1847 return !Error(
1848 getParser().getTok().getLoc(),
1849 ".amdgcn.next_free_{v,s}gpr symbols must be absolute expressions");
1850
1851 if (OldCount <= NewMax)
1852 Sym->setVariableValue(MCConstantExpr::create(NewMax + 1, getContext()));
1853
1854 return true;
1855}
1856
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001857std::unique_ptr<AMDGPUOperand> AMDGPUAsmParser::parseRegister() {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001858 const auto &Tok = Parser.getTok();
Valery Pykhtin0f97f172016-03-14 07:43:42 +00001859 SMLoc StartLoc = Tok.getLoc();
1860 SMLoc EndLoc = Tok.getEndLoc();
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001861 RegisterKind RegKind;
Artem Tamazova01cce82016-12-27 16:00:11 +00001862 unsigned Reg, RegNum, RegWidth, DwordRegIndex;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001863
Artem Tamazova01cce82016-12-27 16:00:11 +00001864 if (!ParseAMDGPURegister(RegKind, Reg, RegNum, RegWidth, &DwordRegIndex)) {
Nikolay Haustovfb5c3072016-04-20 09:34:48 +00001865 return nullptr;
Tom Stellard45bb48e2015-06-13 03:28:10 +00001866 }
Scott Linder1e8c2c72018-06-21 19:38:56 +00001867 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
1868 if (!updateGprCountSymbols(RegKind, DwordRegIndex, RegWidth))
1869 return nullptr;
1870 } else
1871 KernelScope.usesRegister(RegKind, DwordRegIndex, RegWidth);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001872 return AMDGPUOperand::CreateReg(this, Reg, StartLoc, EndLoc, false);
Tom Stellard45bb48e2015-06-13 03:28:10 +00001873}
1874
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001875bool
1876AMDGPUAsmParser::parseAbsoluteExpr(int64_t &Val, bool AbsMod) {
1877 if (AbsMod && getLexer().peekTok().is(AsmToken::Pipe) &&
1878 (getLexer().getKind() == AsmToken::Integer ||
1879 getLexer().getKind() == AsmToken::Real)) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001880 // This is a workaround for handling operands like these:
1881 // |1.0|
1882 // |-1|
1883 // This syntax is not compatible with syntax of standard
1884 // MC expressions (due to the trailing '|').
1885
1886 SMLoc EndLoc;
1887 const MCExpr *Expr;
1888
1889 if (getParser().parsePrimaryExpr(Expr, EndLoc)) {
1890 return true;
1891 }
1892
1893 return !Expr->evaluateAsAbsolute(Val);
1894 }
1895
1896 return getParser().parseAbsoluteExpression(Val);
1897}
1898
Alex Bradbury58eba092016-11-01 16:32:05 +00001899OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001900AMDGPUAsmParser::parseImm(OperandVector &Operands, bool AbsMod) {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001901 // TODO: add syntactic sugar for 1/(2*PI)
Sam Kolton1bdcef72016-05-23 09:59:02 +00001902 bool Minus = false;
1903 if (getLexer().getKind() == AsmToken::Minus) {
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001904 const AsmToken NextToken = getLexer().peekTok();
1905 if (!NextToken.is(AsmToken::Integer) &&
1906 !NextToken.is(AsmToken::Real)) {
1907 return MatchOperand_NoMatch;
1908 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001909 Minus = true;
1910 Parser.Lex();
1911 }
1912
1913 SMLoc S = Parser.getTok().getLoc();
1914 switch(getLexer().getKind()) {
1915 case AsmToken::Integer: {
1916 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001917 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001918 return MatchOperand_ParseFail;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001919 if (Minus)
1920 IntVal *= -1;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001921 Operands.push_back(AMDGPUOperand::CreateImm(this, IntVal, S));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001922 return MatchOperand_Success;
1923 }
1924 case AsmToken::Real: {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001925 int64_t IntVal;
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001926 if (parseAbsoluteExpr(IntVal, AbsMod))
Sam Kolton1bdcef72016-05-23 09:59:02 +00001927 return MatchOperand_ParseFail;
1928
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001929 APFloat F(BitsToDouble(IntVal));
Sam Kolton1bdcef72016-05-23 09:59:02 +00001930 if (Minus)
1931 F.changeSign();
1932 Operands.push_back(
Sam Kolton1eeb11b2016-09-09 14:44:04 +00001933 AMDGPUOperand::CreateImm(this, F.bitcastToAPInt().getZExtValue(), S,
Sam Kolton1bdcef72016-05-23 09:59:02 +00001934 AMDGPUOperand::ImmTyNone, true));
1935 return MatchOperand_Success;
1936 }
1937 default:
Dmitry Preobrazhensky471adf72017-12-22 18:03:35 +00001938 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001939 }
1940}
1941
Alex Bradbury58eba092016-11-01 16:32:05 +00001942OperandMatchResultTy
Sam Kolton9772eb32017-01-11 11:46:30 +00001943AMDGPUAsmParser::parseReg(OperandVector &Operands) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00001944 if (auto R = parseRegister()) {
1945 assert(R->isReg());
1946 R->Reg.IsForcedVOP3 = isForcedVOP3();
1947 Operands.push_back(std::move(R));
1948 return MatchOperand_Success;
1949 }
Sam Kolton9772eb32017-01-11 11:46:30 +00001950 return MatchOperand_NoMatch;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001951}
1952
Alex Bradbury58eba092016-11-01 16:32:05 +00001953OperandMatchResultTy
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00001954AMDGPUAsmParser::parseRegOrImm(OperandVector &Operands, bool AbsMod) {
1955 auto res = parseImm(Operands, AbsMod);
Sam Kolton9772eb32017-01-11 11:46:30 +00001956 if (res != MatchOperand_NoMatch) {
1957 return res;
1958 }
1959
1960 return parseReg(Operands);
1961}
1962
1963OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00001964AMDGPUAsmParser::parseRegOrImmWithFPInputMods(OperandVector &Operands,
1965 bool AllowImm) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001966 bool Negate = false, Negate2 = false, Abs = false, Abs2 = false;
Sam Kolton1bdcef72016-05-23 09:59:02 +00001967
1968 if (getLexer().getKind()== AsmToken::Minus) {
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001969 const AsmToken NextToken = getLexer().peekTok();
1970
1971 // Disable ambiguous constructs like '--1' etc. Should use neg(-1) instead.
1972 if (NextToken.is(AsmToken::Minus)) {
1973 Error(Parser.getTok().getLoc(), "invalid syntax, expected 'neg' modifier");
1974 return MatchOperand_ParseFail;
1975 }
1976
1977 // '-' followed by an integer literal N should be interpreted as integer
1978 // negation rather than a floating-point NEG modifier applied to N.
1979 // Beside being contr-intuitive, such use of floating-point NEG modifier
1980 // results in different meaning of integer literals used with VOP1/2/C
1981 // and VOP3, for example:
1982 // v_exp_f32_e32 v5, -1 // VOP1: src0 = 0xFFFFFFFF
1983 // v_exp_f32_e64 v5, -1 // VOP3: src0 = 0x80000001
1984 // Negative fp literals should be handled likewise for unifomtity
1985 if (!NextToken.is(AsmToken::Integer) && !NextToken.is(AsmToken::Real)) {
1986 Parser.Lex();
1987 Negate = true;
1988 }
1989 }
1990
1991 if (getLexer().getKind() == AsmToken::Identifier &&
1992 Parser.getTok().getString() == "neg") {
1993 if (Negate) {
1994 Error(Parser.getTok().getLoc(), "expected register or immediate");
1995 return MatchOperand_ParseFail;
1996 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00001997 Parser.Lex();
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00001998 Negate2 = true;
1999 if (getLexer().isNot(AsmToken::LParen)) {
2000 Error(Parser.getTok().getLoc(), "expected left paren after neg");
2001 return MatchOperand_ParseFail;
2002 }
2003 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00002004 }
2005
Eugene Zelenko66203762017-01-21 00:53:49 +00002006 if (getLexer().getKind() == AsmToken::Identifier &&
2007 Parser.getTok().getString() == "abs") {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002008 Parser.Lex();
2009 Abs2 = true;
2010 if (getLexer().isNot(AsmToken::LParen)) {
2011 Error(Parser.getTok().getLoc(), "expected left paren after abs");
2012 return MatchOperand_ParseFail;
2013 }
2014 Parser.Lex();
2015 }
2016
2017 if (getLexer().getKind() == AsmToken::Pipe) {
2018 if (Abs2) {
2019 Error(Parser.getTok().getLoc(), "expected register or immediate");
2020 return MatchOperand_ParseFail;
2021 }
2022 Parser.Lex();
2023 Abs = true;
2024 }
2025
Sam Kolton9772eb32017-01-11 11:46:30 +00002026 OperandMatchResultTy Res;
2027 if (AllowImm) {
Dmitry Preobrazhensky1e124e12017-03-20 16:33:20 +00002028 Res = parseRegOrImm(Operands, Abs);
Sam Kolton9772eb32017-01-11 11:46:30 +00002029 } else {
2030 Res = parseReg(Operands);
2031 }
Sam Kolton1bdcef72016-05-23 09:59:02 +00002032 if (Res != MatchOperand_Success) {
2033 return Res;
2034 }
2035
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002036 AMDGPUOperand::Modifiers Mods;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002037 if (Abs) {
2038 if (getLexer().getKind() != AsmToken::Pipe) {
2039 Error(Parser.getTok().getLoc(), "expected vertical bar");
2040 return MatchOperand_ParseFail;
2041 }
2042 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002043 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002044 }
2045 if (Abs2) {
2046 if (getLexer().isNot(AsmToken::RParen)) {
2047 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2048 return MatchOperand_ParseFail;
2049 }
2050 Parser.Lex();
Sam Kolton945231a2016-06-10 09:57:59 +00002051 Mods.Abs = true;
Sam Kolton1bdcef72016-05-23 09:59:02 +00002052 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00002053
Dmitry Preobrazhensky40af9c32017-03-20 14:50:35 +00002054 if (Negate) {
2055 Mods.Neg = true;
2056 } else if (Negate2) {
2057 if (getLexer().isNot(AsmToken::RParen)) {
2058 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2059 return MatchOperand_ParseFail;
2060 }
2061 Parser.Lex();
2062 Mods.Neg = true;
2063 }
2064
Sam Kolton945231a2016-06-10 09:57:59 +00002065 if (Mods.hasFPModifiers()) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00002066 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002067 Op.setModifiers(Mods);
Sam Kolton1bdcef72016-05-23 09:59:02 +00002068 }
2069 return MatchOperand_Success;
2070}
2071
Alex Bradbury58eba092016-11-01 16:32:05 +00002072OperandMatchResultTy
Eugene Zelenko66203762017-01-21 00:53:49 +00002073AMDGPUAsmParser::parseRegOrImmWithIntInputMods(OperandVector &Operands,
2074 bool AllowImm) {
Sam Kolton945231a2016-06-10 09:57:59 +00002075 bool Sext = false;
2076
Eugene Zelenko66203762017-01-21 00:53:49 +00002077 if (getLexer().getKind() == AsmToken::Identifier &&
2078 Parser.getTok().getString() == "sext") {
Sam Kolton945231a2016-06-10 09:57:59 +00002079 Parser.Lex();
2080 Sext = true;
2081 if (getLexer().isNot(AsmToken::LParen)) {
2082 Error(Parser.getTok().getLoc(), "expected left paren after sext");
2083 return MatchOperand_ParseFail;
2084 }
2085 Parser.Lex();
2086 }
2087
Sam Kolton9772eb32017-01-11 11:46:30 +00002088 OperandMatchResultTy Res;
2089 if (AllowImm) {
2090 Res = parseRegOrImm(Operands);
2091 } else {
2092 Res = parseReg(Operands);
2093 }
Sam Kolton945231a2016-06-10 09:57:59 +00002094 if (Res != MatchOperand_Success) {
2095 return Res;
2096 }
2097
Matt Arsenaultb55f6202016-12-03 18:22:49 +00002098 AMDGPUOperand::Modifiers Mods;
Sam Kolton945231a2016-06-10 09:57:59 +00002099 if (Sext) {
2100 if (getLexer().isNot(AsmToken::RParen)) {
2101 Error(Parser.getTok().getLoc(), "expected closing parentheses");
2102 return MatchOperand_ParseFail;
2103 }
2104 Parser.Lex();
2105 Mods.Sext = true;
2106 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00002107
Sam Kolton945231a2016-06-10 09:57:59 +00002108 if (Mods.hasIntModifiers()) {
Sam Koltona9cd6aa2016-07-05 14:01:11 +00002109 AMDGPUOperand &Op = static_cast<AMDGPUOperand &>(*Operands.back());
Sam Kolton945231a2016-06-10 09:57:59 +00002110 Op.setModifiers(Mods);
2111 }
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002112
Sam Kolton945231a2016-06-10 09:57:59 +00002113 return MatchOperand_Success;
2114}
Sam Kolton1bdcef72016-05-23 09:59:02 +00002115
Sam Kolton9772eb32017-01-11 11:46:30 +00002116OperandMatchResultTy
2117AMDGPUAsmParser::parseRegWithFPInputMods(OperandVector &Operands) {
2118 return parseRegOrImmWithFPInputMods(Operands, false);
2119}
2120
2121OperandMatchResultTy
2122AMDGPUAsmParser::parseRegWithIntInputMods(OperandVector &Operands) {
2123 return parseRegOrImmWithIntInputMods(Operands, false);
2124}
2125
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00002126OperandMatchResultTy AMDGPUAsmParser::parseVReg32OrOff(OperandVector &Operands) {
2127 std::unique_ptr<AMDGPUOperand> Reg = parseRegister();
2128 if (Reg) {
2129 Operands.push_back(std::move(Reg));
2130 return MatchOperand_Success;
2131 }
2132
2133 const AsmToken &Tok = Parser.getTok();
2134 if (Tok.getString() == "off") {
2135 Operands.push_back(AMDGPUOperand::CreateImm(this, 0, Tok.getLoc(),
2136 AMDGPUOperand::ImmTyOff, false));
2137 Parser.Lex();
2138 return MatchOperand_Success;
2139 }
2140
2141 return MatchOperand_NoMatch;
2142}
2143
Tom Stellard45bb48e2015-06-13 03:28:10 +00002144unsigned AMDGPUAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00002145 uint64_t TSFlags = MII.get(Inst.getOpcode()).TSFlags;
2146
2147 if ((getForcedEncodingSize() == 32 && (TSFlags & SIInstrFlags::VOP3)) ||
Sam Kolton05ef1c92016-06-03 10:27:37 +00002148 (getForcedEncodingSize() == 64 && !(TSFlags & SIInstrFlags::VOP3)) ||
2149 (isForcedDPP() && !(TSFlags & SIInstrFlags::DPP)) ||
2150 (isForcedSDWA() && !(TSFlags & SIInstrFlags::SDWA)) )
Tom Stellard45bb48e2015-06-13 03:28:10 +00002151 return Match_InvalidOperand;
2152
Tom Stellard88e0b252015-10-06 15:57:53 +00002153 if ((TSFlags & SIInstrFlags::VOP3) &&
2154 (TSFlags & SIInstrFlags::VOPAsmPrefer32Bit) &&
2155 getForcedEncodingSize() != 64)
2156 return Match_PreferE32;
2157
Sam Koltona568e3d2016-12-22 12:57:41 +00002158 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
2159 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00002160 // v_mac_f32/16 allow only dst_sel == DWORD;
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00002161 auto OpNum =
2162 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::dst_sel);
Sam Koltona3ec5c12016-10-07 14:46:06 +00002163 const auto &Op = Inst.getOperand(OpNum);
2164 if (!Op.isImm() || Op.getImm() != AMDGPU::SDWA::SdwaSel::DWORD) {
2165 return Match_InvalidOperand;
2166 }
2167 }
2168
Matt Arsenaultfd023142017-06-12 15:55:58 +00002169 if ((TSFlags & SIInstrFlags::FLAT) && !hasFlatOffsets()) {
2170 // FIXME: Produces error without correct column reported.
2171 auto OpNum =
2172 AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::offset);
2173 const auto &Op = Inst.getOperand(OpNum);
2174 if (Op.getImm() != 0)
2175 return Match_InvalidOperand;
2176 }
2177
Tom Stellard45bb48e2015-06-13 03:28:10 +00002178 return Match_Success;
2179}
2180
Matt Arsenault5f45e782017-01-09 18:44:11 +00002181// What asm variants we should check
2182ArrayRef<unsigned> AMDGPUAsmParser::getMatchedVariants() const {
2183 if (getForcedEncodingSize() == 32) {
2184 static const unsigned Variants[] = {AMDGPUAsmVariants::DEFAULT};
2185 return makeArrayRef(Variants);
2186 }
2187
2188 if (isForcedVOP3()) {
2189 static const unsigned Variants[] = {AMDGPUAsmVariants::VOP3};
2190 return makeArrayRef(Variants);
2191 }
2192
2193 if (isForcedSDWA()) {
Sam Koltonf7659d712017-05-23 10:08:55 +00002194 static const unsigned Variants[] = {AMDGPUAsmVariants::SDWA,
2195 AMDGPUAsmVariants::SDWA9};
Matt Arsenault5f45e782017-01-09 18:44:11 +00002196 return makeArrayRef(Variants);
2197 }
2198
2199 if (isForcedDPP()) {
2200 static const unsigned Variants[] = {AMDGPUAsmVariants::DPP};
2201 return makeArrayRef(Variants);
2202 }
2203
2204 static const unsigned Variants[] = {
2205 AMDGPUAsmVariants::DEFAULT, AMDGPUAsmVariants::VOP3,
Sam Koltonf7659d712017-05-23 10:08:55 +00002206 AMDGPUAsmVariants::SDWA, AMDGPUAsmVariants::SDWA9, AMDGPUAsmVariants::DPP
Matt Arsenault5f45e782017-01-09 18:44:11 +00002207 };
2208
2209 return makeArrayRef(Variants);
2210}
2211
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002212unsigned AMDGPUAsmParser::findImplicitSGPRReadInVOP(const MCInst &Inst) const {
2213 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2214 const unsigned Num = Desc.getNumImplicitUses();
2215 for (unsigned i = 0; i < Num; ++i) {
2216 unsigned Reg = Desc.ImplicitUses[i];
2217 switch (Reg) {
2218 case AMDGPU::FLAT_SCR:
2219 case AMDGPU::VCC:
2220 case AMDGPU::M0:
2221 return Reg;
2222 default:
2223 break;
2224 }
2225 }
2226 return AMDGPU::NoRegister;
2227}
2228
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002229// NB: This code is correct only when used to check constant
2230// bus limitations because GFX7 support no f16 inline constants.
2231// Note that there are no cases when a GFX7 opcode violates
2232// constant bus limitations due to the use of an f16 constant.
2233bool AMDGPUAsmParser::isInlineConstant(const MCInst &Inst,
2234 unsigned OpIdx) const {
2235 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
2236
2237 if (!AMDGPU::isSISrcOperand(Desc, OpIdx)) {
2238 return false;
2239 }
2240
2241 const MCOperand &MO = Inst.getOperand(OpIdx);
2242
2243 int64_t Val = MO.getImm();
2244 auto OpSize = AMDGPU::getOperandSize(Desc, OpIdx);
2245
2246 switch (OpSize) { // expected operand size
2247 case 8:
2248 return AMDGPU::isInlinableLiteral64(Val, hasInv2PiInlineImm());
2249 case 4:
2250 return AMDGPU::isInlinableLiteral32(Val, hasInv2PiInlineImm());
2251 case 2: {
2252 const unsigned OperandType = Desc.OpInfo[OpIdx].OperandType;
2253 if (OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2INT16 ||
2254 OperandType == AMDGPU::OPERAND_REG_INLINE_C_V2FP16) {
2255 return AMDGPU::isInlinableLiteralV216(Val, hasInv2PiInlineImm());
2256 } else {
2257 return AMDGPU::isInlinableLiteral16(Val, hasInv2PiInlineImm());
2258 }
2259 }
2260 default:
2261 llvm_unreachable("invalid operand size");
2262 }
2263}
2264
2265bool AMDGPUAsmParser::usesConstantBus(const MCInst &Inst, unsigned OpIdx) {
2266 const MCOperand &MO = Inst.getOperand(OpIdx);
2267 if (MO.isImm()) {
2268 return !isInlineConstant(Inst, OpIdx);
2269 }
Sam Koltonf7659d712017-05-23 10:08:55 +00002270 return !MO.isReg() ||
2271 isSGPR(mc2PseudoReg(MO.getReg()), getContext().getRegisterInfo());
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002272}
2273
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002274bool AMDGPUAsmParser::validateConstantBusLimitations(const MCInst &Inst) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002275 const unsigned Opcode = Inst.getOpcode();
2276 const MCInstrDesc &Desc = MII.get(Opcode);
2277 unsigned ConstantBusUseCount = 0;
2278
2279 if (Desc.TSFlags &
2280 (SIInstrFlags::VOPC |
2281 SIInstrFlags::VOP1 | SIInstrFlags::VOP2 |
Sam Koltonf7659d712017-05-23 10:08:55 +00002282 SIInstrFlags::VOP3 | SIInstrFlags::VOP3P |
2283 SIInstrFlags::SDWA)) {
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002284 // Check special imm operands (used by madmk, etc)
2285 if (AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::imm) != -1) {
2286 ++ConstantBusUseCount;
2287 }
2288
2289 unsigned SGPRUsed = findImplicitSGPRReadInVOP(Inst);
2290 if (SGPRUsed != AMDGPU::NoRegister) {
2291 ++ConstantBusUseCount;
2292 }
2293
2294 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2295 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2296 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2297
2298 const int OpIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2299
2300 for (int OpIdx : OpIndices) {
2301 if (OpIdx == -1) break;
2302
2303 const MCOperand &MO = Inst.getOperand(OpIdx);
2304 if (usesConstantBus(Inst, OpIdx)) {
2305 if (MO.isReg()) {
2306 const unsigned Reg = mc2PseudoReg(MO.getReg());
2307 // Pairs of registers with a partial intersections like these
2308 // s0, s[0:1]
2309 // flat_scratch_lo, flat_scratch
2310 // flat_scratch_lo, flat_scratch_hi
2311 // are theoretically valid but they are disabled anyway.
2312 // Note that this code mimics SIInstrInfo::verifyInstruction
2313 if (Reg != SGPRUsed) {
2314 ++ConstantBusUseCount;
2315 }
2316 SGPRUsed = Reg;
2317 } else { // Expression or a literal
2318 ++ConstantBusUseCount;
2319 }
2320 }
2321 }
2322 }
2323
2324 return ConstantBusUseCount <= 1;
2325}
2326
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002327bool AMDGPUAsmParser::validateEarlyClobberLimitations(const MCInst &Inst) {
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002328 const unsigned Opcode = Inst.getOpcode();
2329 const MCInstrDesc &Desc = MII.get(Opcode);
2330
2331 const int DstIdx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::vdst);
2332 if (DstIdx == -1 ||
2333 Desc.getOperandConstraint(DstIdx, MCOI::EARLY_CLOBBER) == -1) {
2334 return true;
2335 }
2336
2337 const MCRegisterInfo *TRI = getContext().getRegisterInfo();
2338
2339 const int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0);
2340 const int Src1Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src1);
2341 const int Src2Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src2);
2342
2343 assert(DstIdx != -1);
2344 const MCOperand &Dst = Inst.getOperand(DstIdx);
2345 assert(Dst.isReg());
2346 const unsigned DstReg = mc2PseudoReg(Dst.getReg());
2347
2348 const int SrcIndices[] = { Src0Idx, Src1Idx, Src2Idx };
2349
2350 for (int SrcIdx : SrcIndices) {
2351 if (SrcIdx == -1) break;
2352 const MCOperand &Src = Inst.getOperand(SrcIdx);
2353 if (Src.isReg()) {
2354 const unsigned SrcReg = mc2PseudoReg(Src.getReg());
2355 if (isRegIntersect(DstReg, SrcReg, TRI)) {
2356 return false;
2357 }
2358 }
2359 }
2360
2361 return true;
2362}
2363
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002364bool AMDGPUAsmParser::validateIntClampSupported(const MCInst &Inst) {
2365
2366 const unsigned Opc = Inst.getOpcode();
2367 const MCInstrDesc &Desc = MII.get(Opc);
2368
2369 if ((Desc.TSFlags & SIInstrFlags::IntClamp) != 0 && !hasIntClamp()) {
2370 int ClampIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp);
2371 assert(ClampIdx != -1);
2372 return Inst.getOperand(ClampIdx).getImm() == 0;
2373 }
2374
2375 return true;
2376}
2377
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002378bool AMDGPUAsmParser::validateMIMGDataSize(const MCInst &Inst) {
2379
2380 const unsigned Opc = Inst.getOpcode();
2381 const MCInstrDesc &Desc = MII.get(Opc);
2382
2383 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2384 return true;
2385
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002386 int VDataIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdata);
2387 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2388 int TFEIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::tfe);
2389
2390 assert(VDataIdx != -1);
2391 assert(DMaskIdx != -1);
2392 assert(TFEIdx != -1);
2393
2394 unsigned VDataSize = AMDGPU::getRegOperandSize(getMRI(), Desc, VDataIdx);
2395 unsigned TFESize = Inst.getOperand(TFEIdx).getImm()? 1 : 0;
2396 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2397 if (DMask == 0)
2398 DMask = 1;
2399
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002400 unsigned DataSize =
2401 (Desc.TSFlags & SIInstrFlags::Gather4) ? 4 : countPopulation(DMask);
2402 if (hasPackedD16()) {
2403 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2404 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm())
2405 DataSize = (DataSize + 1) / 2;
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002406 }
2407
2408 return (VDataSize / 4) == DataSize + TFESize;
Dmitry Preobrazhensky70682812018-01-26 16:42:51 +00002409}
2410
2411bool AMDGPUAsmParser::validateMIMGAtomicDMask(const MCInst &Inst) {
2412
2413 const unsigned Opc = Inst.getOpcode();
2414 const MCInstrDesc &Desc = MII.get(Opc);
2415
2416 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2417 return true;
2418 if (!Desc.mayLoad() || !Desc.mayStore())
2419 return true; // Not atomic
2420
2421 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2422 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2423
2424 // This is an incomplete check because image_atomic_cmpswap
2425 // may only use 0x3 and 0xf while other atomic operations
2426 // may use 0x1 and 0x3. However these limitations are
2427 // verified when we check that dmask matches dst size.
2428 return DMask == 0x1 || DMask == 0x3 || DMask == 0xf;
2429}
2430
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002431bool AMDGPUAsmParser::validateMIMGGatherDMask(const MCInst &Inst) {
2432
2433 const unsigned Opc = Inst.getOpcode();
2434 const MCInstrDesc &Desc = MII.get(Opc);
2435
2436 if ((Desc.TSFlags & SIInstrFlags::Gather4) == 0)
2437 return true;
2438
2439 int DMaskIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::dmask);
2440 unsigned DMask = Inst.getOperand(DMaskIdx).getImm() & 0xf;
2441
2442 // GATHER4 instructions use dmask in a different fashion compared to
2443 // other MIMG instructions. The only useful DMASK values are
2444 // 1=red, 2=green, 4=blue, 8=alpha. (e.g. 1 returns
2445 // (red,red,red,red) etc.) The ISA document doesn't mention
2446 // this.
2447 return DMask == 0x1 || DMask == 0x2 || DMask == 0x4 || DMask == 0x8;
2448}
2449
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002450bool AMDGPUAsmParser::validateMIMGR128(const MCInst &Inst) {
2451
2452 const unsigned Opc = Inst.getOpcode();
2453 const MCInstrDesc &Desc = MII.get(Opc);
2454
2455 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2456 return true;
2457
2458 int Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::r128);
2459 assert(Idx != -1);
2460
2461 bool R128 = (Inst.getOperand(Idx).getImm() != 0);
2462
2463 return !R128 || hasMIMG_R128();
2464}
2465
2466bool AMDGPUAsmParser::validateMIMGD16(const MCInst &Inst) {
2467
2468 const unsigned Opc = Inst.getOpcode();
2469 const MCInstrDesc &Desc = MII.get(Opc);
2470
2471 if ((Desc.TSFlags & SIInstrFlags::MIMG) == 0)
2472 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002473
Nicolai Haehnlef2674312018-06-21 13:36:01 +00002474 int D16Idx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::d16);
2475 if (D16Idx >= 0 && Inst.getOperand(D16Idx).getImm()) {
2476 if (isCI() || isSI())
2477 return false;
2478 }
2479
2480 return true;
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002481}
2482
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002483bool AMDGPUAsmParser::validateInstruction(const MCInst &Inst,
2484 const SMLoc &IDLoc) {
2485 if (!validateConstantBusLimitations(Inst)) {
2486 Error(IDLoc,
2487 "invalid operand (violates constant bus restrictions)");
2488 return false;
2489 }
2490 if (!validateEarlyClobberLimitations(Inst)) {
2491 Error(IDLoc,
2492 "destination must be different than all sources");
2493 return false;
2494 }
Dmitry Preobrazhenskyff64aa52017-08-16 13:51:56 +00002495 if (!validateIntClampSupported(Inst)) {
2496 Error(IDLoc,
2497 "integer clamping is not supported on this GPU");
2498 return false;
2499 }
Dmitry Preobrazhenskye3271ae2018-02-05 12:45:43 +00002500 if (!validateMIMGR128(Inst)) {
2501 Error(IDLoc,
2502 "r128 modifier is not supported on this GPU");
2503 return false;
2504 }
2505 // For MUBUF/MTBUF d16 is a part of opcode, so there is nothing to validate.
2506 if (!validateMIMGD16(Inst)) {
2507 Error(IDLoc,
2508 "d16 modifier is not supported on this GPU");
2509 return false;
2510 }
Dmitry Preobrazhensky0a1ff462018-02-05 14:18:53 +00002511 if (!validateMIMGDataSize(Inst)) {
2512 Error(IDLoc,
2513 "image data size does not match dmask and tfe");
2514 return false;
2515 }
2516 if (!validateMIMGAtomicDMask(Inst)) {
2517 Error(IDLoc,
2518 "invalid atomic image dmask");
2519 return false;
2520 }
Dmitry Preobrazhenskyda4a7c02018-03-12 15:03:34 +00002521 if (!validateMIMGGatherDMask(Inst)) {
2522 Error(IDLoc,
2523 "invalid image_gather dmask: only one bit must be set");
2524 return false;
2525 }
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002526
2527 return true;
2528}
2529
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002530static std::string AMDGPUMnemonicSpellCheck(StringRef S, uint64_t FBS,
2531 unsigned VariantID = 0);
2532
Tom Stellard45bb48e2015-06-13 03:28:10 +00002533bool AMDGPUAsmParser::MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
2534 OperandVector &Operands,
2535 MCStreamer &Out,
2536 uint64_t &ErrorInfo,
2537 bool MatchingInlineAsm) {
2538 MCInst Inst;
Sam Koltond63d8a72016-09-09 09:37:51 +00002539 unsigned Result = Match_Success;
Matt Arsenault5f45e782017-01-09 18:44:11 +00002540 for (auto Variant : getMatchedVariants()) {
Sam Koltond63d8a72016-09-09 09:37:51 +00002541 uint64_t EI;
2542 auto R = MatchInstructionImpl(Operands, Inst, EI, MatchingInlineAsm,
2543 Variant);
2544 // We order match statuses from least to most specific. We use most specific
2545 // status as resulting
2546 // Match_MnemonicFail < Match_InvalidOperand < Match_MissingFeature < Match_PreferE32
2547 if ((R == Match_Success) ||
2548 (R == Match_PreferE32) ||
2549 (R == Match_MissingFeature && Result != Match_PreferE32) ||
2550 (R == Match_InvalidOperand && Result != Match_MissingFeature
2551 && Result != Match_PreferE32) ||
2552 (R == Match_MnemonicFail && Result != Match_InvalidOperand
2553 && Result != Match_MissingFeature
2554 && Result != Match_PreferE32)) {
2555 Result = R;
2556 ErrorInfo = EI;
Tom Stellard45bb48e2015-06-13 03:28:10 +00002557 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002558 if (R == Match_Success)
2559 break;
2560 }
2561
2562 switch (Result) {
2563 default: break;
2564 case Match_Success:
Dmitry Preobrazhenskydc4ac822017-06-21 14:41:34 +00002565 if (!validateInstruction(Inst, IDLoc)) {
2566 return true;
Dmitry Preobrazhensky03880f82017-03-03 14:31:06 +00002567 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002568 Inst.setLoc(IDLoc);
2569 Out.EmitInstruction(Inst, getSTI());
2570 return false;
2571
2572 case Match_MissingFeature:
2573 return Error(IDLoc, "instruction not supported on this GPU");
2574
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00002575 case Match_MnemonicFail: {
2576 uint64_t FBS = ComputeAvailableFeatures(getSTI().getFeatureBits());
2577 std::string Suggestion = AMDGPUMnemonicSpellCheck(
2578 ((AMDGPUOperand &)*Operands[0]).getToken(), FBS);
2579 return Error(IDLoc, "invalid instruction" + Suggestion,
2580 ((AMDGPUOperand &)*Operands[0]).getLocRange());
2581 }
Sam Koltond63d8a72016-09-09 09:37:51 +00002582
2583 case Match_InvalidOperand: {
2584 SMLoc ErrorLoc = IDLoc;
2585 if (ErrorInfo != ~0ULL) {
2586 if (ErrorInfo >= Operands.size()) {
2587 return Error(IDLoc, "too few operands for instruction");
2588 }
2589 ErrorLoc = ((AMDGPUOperand &)*Operands[ErrorInfo]).getStartLoc();
2590 if (ErrorLoc == SMLoc())
2591 ErrorLoc = IDLoc;
2592 }
2593 return Error(ErrorLoc, "invalid operand for instruction");
2594 }
2595
2596 case Match_PreferE32:
2597 return Error(IDLoc, "internal error: instruction without _e64 suffix "
2598 "should be encoded as e32");
Tom Stellard45bb48e2015-06-13 03:28:10 +00002599 }
2600 llvm_unreachable("Implement any new match types added!");
2601}
2602
Artem Tamazov25478d82016-12-29 15:41:52 +00002603bool AMDGPUAsmParser::ParseAsAbsoluteExpression(uint32_t &Ret) {
2604 int64_t Tmp = -1;
2605 if (getLexer().isNot(AsmToken::Integer) && getLexer().isNot(AsmToken::Identifier)) {
2606 return true;
2607 }
2608 if (getParser().parseAbsoluteExpression(Tmp)) {
2609 return true;
2610 }
2611 Ret = static_cast<uint32_t>(Tmp);
2612 return false;
2613}
2614
Tom Stellard347ac792015-06-26 21:15:07 +00002615bool AMDGPUAsmParser::ParseDirectiveMajorMinor(uint32_t &Major,
2616 uint32_t &Minor) {
Artem Tamazov25478d82016-12-29 15:41:52 +00002617 if (ParseAsAbsoluteExpression(Major))
Tom Stellard347ac792015-06-26 21:15:07 +00002618 return TokError("invalid major version");
2619
Tom Stellard347ac792015-06-26 21:15:07 +00002620 if (getLexer().isNot(AsmToken::Comma))
2621 return TokError("minor version number required, comma expected");
2622 Lex();
2623
Artem Tamazov25478d82016-12-29 15:41:52 +00002624 if (ParseAsAbsoluteExpression(Minor))
Tom Stellard347ac792015-06-26 21:15:07 +00002625 return TokError("invalid minor version");
2626
Tom Stellard347ac792015-06-26 21:15:07 +00002627 return false;
2628}
2629
Scott Linder1e8c2c72018-06-21 19:38:56 +00002630bool AMDGPUAsmParser::ParseDirectiveAMDGCNTarget() {
2631 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2632 return TokError("directive only supported for amdgcn architecture");
2633
2634 std::string Target;
2635
2636 SMLoc TargetStart = getTok().getLoc();
2637 if (getParser().parseEscapedString(Target))
2638 return true;
2639 SMRange TargetRange = SMRange(TargetStart, getTok().getLoc());
2640
2641 std::string ExpectedTarget;
2642 raw_string_ostream ExpectedTargetOS(ExpectedTarget);
2643 IsaInfo::streamIsaVersion(&getSTI(), ExpectedTargetOS);
2644
2645 if (Target != ExpectedTargetOS.str())
2646 return getParser().Error(TargetRange.Start, "target must match options",
2647 TargetRange);
2648
2649 getTargetStreamer().EmitDirectiveAMDGCNTarget(Target);
2650 return false;
2651}
2652
2653bool AMDGPUAsmParser::OutOfRangeError(SMRange Range) {
2654 return getParser().Error(Range.Start, "value out of range", Range);
2655}
2656
2657bool AMDGPUAsmParser::calculateGPRBlocks(
2658 const FeatureBitset &Features, bool VCCUsed, bool FlatScrUsed,
2659 bool XNACKUsed, unsigned NextFreeVGPR, SMRange VGPRRange,
2660 unsigned NextFreeSGPR, SMRange SGPRRange, unsigned &VGPRBlocks,
2661 unsigned &SGPRBlocks) {
2662 // TODO(scott.linder): These calculations are duplicated from
2663 // AMDGPUAsmPrinter::getSIProgramInfo and could be unified.
2664 IsaInfo::IsaVersion Version = IsaInfo::getIsaVersion(Features);
2665
2666 unsigned NumVGPRs = NextFreeVGPR;
2667 unsigned NumSGPRs = NextFreeSGPR;
2668 unsigned MaxAddressableNumSGPRs = IsaInfo::getAddressableNumSGPRs(Features);
2669
2670 if (Version.Major >= 8 && !Features.test(FeatureSGPRInitBug) &&
2671 NumSGPRs > MaxAddressableNumSGPRs)
2672 return OutOfRangeError(SGPRRange);
2673
2674 NumSGPRs +=
2675 IsaInfo::getNumExtraSGPRs(Features, VCCUsed, FlatScrUsed, XNACKUsed);
2676
2677 if ((Version.Major <= 7 || Features.test(FeatureSGPRInitBug)) &&
2678 NumSGPRs > MaxAddressableNumSGPRs)
2679 return OutOfRangeError(SGPRRange);
2680
2681 if (Features.test(FeatureSGPRInitBug))
2682 NumSGPRs = IsaInfo::FIXED_NUM_SGPRS_FOR_INIT_BUG;
2683
2684 VGPRBlocks = IsaInfo::getNumVGPRBlocks(Features, NumVGPRs);
2685 SGPRBlocks = IsaInfo::getNumSGPRBlocks(Features, NumSGPRs);
2686
2687 return false;
2688}
2689
2690bool AMDGPUAsmParser::ParseDirectiveAMDHSAKernel() {
2691 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn)
2692 return TokError("directive only supported for amdgcn architecture");
2693
2694 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA)
2695 return TokError("directive only supported for amdhsa OS");
2696
2697 StringRef KernelName;
2698 if (getParser().parseIdentifier(KernelName))
2699 return true;
2700
2701 kernel_descriptor_t KD = getDefaultAmdhsaKernelDescriptor();
2702
2703 StringSet<> Seen;
2704
2705 IsaInfo::IsaVersion IVersion =
2706 IsaInfo::getIsaVersion(getSTI().getFeatureBits());
2707
2708 SMRange VGPRRange;
2709 uint64_t NextFreeVGPR = 0;
2710 SMRange SGPRRange;
2711 uint64_t NextFreeSGPR = 0;
2712 unsigned UserSGPRCount = 0;
2713 bool ReserveVCC = true;
2714 bool ReserveFlatScr = true;
2715 bool ReserveXNACK = hasXNACK();
2716
2717 while (true) {
2718 while (getLexer().is(AsmToken::EndOfStatement))
2719 Lex();
2720
2721 if (getLexer().isNot(AsmToken::Identifier))
2722 return TokError("expected .amdhsa_ directive or .end_amdhsa_kernel");
2723
2724 StringRef ID = getTok().getIdentifier();
2725 SMRange IDRange = getTok().getLocRange();
2726 Lex();
2727
2728 if (ID == ".end_amdhsa_kernel")
2729 break;
2730
2731 if (Seen.find(ID) != Seen.end())
2732 return TokError(".amdhsa_ directives cannot be repeated");
2733 Seen.insert(ID);
2734
2735 SMLoc ValStart = getTok().getLoc();
2736 int64_t IVal;
2737 if (getParser().parseAbsoluteExpression(IVal))
2738 return true;
2739 SMLoc ValEnd = getTok().getLoc();
2740 SMRange ValRange = SMRange(ValStart, ValEnd);
2741
2742 if (IVal < 0)
2743 return OutOfRangeError(ValRange);
2744
2745 uint64_t Val = IVal;
2746
2747#define PARSE_BITS_ENTRY(FIELD, ENTRY, VALUE, RANGE) \
2748 if (!isUInt<ENTRY##_WIDTH>(VALUE)) \
2749 return OutOfRangeError(RANGE); \
2750 AMDHSA_BITS_SET(FIELD, ENTRY, VALUE);
2751
2752 if (ID == ".amdhsa_group_segment_fixed_size") {
2753 if (!isUInt<sizeof(KD.group_segment_fixed_size) * CHAR_BIT>(Val))
2754 return OutOfRangeError(ValRange);
2755 KD.group_segment_fixed_size = Val;
2756 } else if (ID == ".amdhsa_private_segment_fixed_size") {
2757 if (!isUInt<sizeof(KD.private_segment_fixed_size) * CHAR_BIT>(Val))
2758 return OutOfRangeError(ValRange);
2759 KD.private_segment_fixed_size = Val;
2760 } else if (ID == ".amdhsa_user_sgpr_private_segment_buffer") {
2761 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2762 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_BUFFER,
2763 Val, ValRange);
2764 UserSGPRCount++;
2765 } else if (ID == ".amdhsa_user_sgpr_dispatch_ptr") {
2766 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2767 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_PTR, Val,
2768 ValRange);
2769 UserSGPRCount++;
2770 } else if (ID == ".amdhsa_user_sgpr_queue_ptr") {
2771 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2772 KERNEL_CODE_PROPERTY_ENABLE_SGPR_QUEUE_PTR, Val,
2773 ValRange);
2774 UserSGPRCount++;
2775 } else if (ID == ".amdhsa_user_sgpr_kernarg_segment_ptr") {
2776 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2777 KERNEL_CODE_PROPERTY_ENABLE_SGPR_KERNARG_SEGMENT_PTR,
2778 Val, ValRange);
2779 UserSGPRCount++;
2780 } else if (ID == ".amdhsa_user_sgpr_dispatch_id") {
2781 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2782 KERNEL_CODE_PROPERTY_ENABLE_SGPR_DISPATCH_ID, Val,
2783 ValRange);
2784 UserSGPRCount++;
2785 } else if (ID == ".amdhsa_user_sgpr_flat_scratch_init") {
2786 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2787 KERNEL_CODE_PROPERTY_ENABLE_SGPR_FLAT_SCRATCH_INIT, Val,
2788 ValRange);
2789 UserSGPRCount++;
2790 } else if (ID == ".amdhsa_user_sgpr_private_segment_size") {
2791 PARSE_BITS_ENTRY(KD.kernel_code_properties,
2792 KERNEL_CODE_PROPERTY_ENABLE_SGPR_PRIVATE_SEGMENT_SIZE,
2793 Val, ValRange);
2794 UserSGPRCount++;
2795 } else if (ID == ".amdhsa_system_sgpr_private_segment_wavefront_offset") {
2796 PARSE_BITS_ENTRY(
2797 KD.compute_pgm_rsrc2,
2798 COMPUTE_PGM_RSRC2_ENABLE_SGPR_PRIVATE_SEGMENT_WAVEFRONT_OFFSET, Val,
2799 ValRange);
2800 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_x") {
2801 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2802 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_X, Val,
2803 ValRange);
2804 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_y") {
2805 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2806 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Y, Val,
2807 ValRange);
2808 } else if (ID == ".amdhsa_system_sgpr_workgroup_id_z") {
2809 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2810 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_ID_Z, Val,
2811 ValRange);
2812 } else if (ID == ".amdhsa_system_sgpr_workgroup_info") {
2813 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2814 COMPUTE_PGM_RSRC2_ENABLE_SGPR_WORKGROUP_INFO, Val,
2815 ValRange);
2816 } else if (ID == ".amdhsa_system_vgpr_workitem_id") {
2817 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2818 COMPUTE_PGM_RSRC2_ENABLE_VGPR_WORKITEM_ID, Val,
2819 ValRange);
2820 } else if (ID == ".amdhsa_next_free_vgpr") {
2821 VGPRRange = ValRange;
2822 NextFreeVGPR = Val;
2823 } else if (ID == ".amdhsa_next_free_sgpr") {
2824 SGPRRange = ValRange;
2825 NextFreeSGPR = Val;
2826 } else if (ID == ".amdhsa_reserve_vcc") {
2827 if (!isUInt<1>(Val))
2828 return OutOfRangeError(ValRange);
2829 ReserveVCC = Val;
2830 } else if (ID == ".amdhsa_reserve_flat_scratch") {
2831 if (IVersion.Major < 7)
2832 return getParser().Error(IDRange.Start, "directive requires gfx7+",
2833 IDRange);
2834 if (!isUInt<1>(Val))
2835 return OutOfRangeError(ValRange);
2836 ReserveFlatScr = Val;
2837 } else if (ID == ".amdhsa_reserve_xnack_mask") {
2838 if (IVersion.Major < 8)
2839 return getParser().Error(IDRange.Start, "directive requires gfx8+",
2840 IDRange);
2841 if (!isUInt<1>(Val))
2842 return OutOfRangeError(ValRange);
2843 ReserveXNACK = Val;
2844 } else if (ID == ".amdhsa_float_round_mode_32") {
2845 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2846 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_32, Val, ValRange);
2847 } else if (ID == ".amdhsa_float_round_mode_16_64") {
2848 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2849 COMPUTE_PGM_RSRC1_FLOAT_ROUND_MODE_16_64, Val, ValRange);
2850 } else if (ID == ".amdhsa_float_denorm_mode_32") {
2851 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2852 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_32, Val, ValRange);
2853 } else if (ID == ".amdhsa_float_denorm_mode_16_64") {
2854 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2855 COMPUTE_PGM_RSRC1_FLOAT_DENORM_MODE_16_64, Val,
2856 ValRange);
2857 } else if (ID == ".amdhsa_dx10_clamp") {
2858 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1,
2859 COMPUTE_PGM_RSRC1_ENABLE_DX10_CLAMP, Val, ValRange);
2860 } else if (ID == ".amdhsa_ieee_mode") {
2861 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_ENABLE_IEEE_MODE,
2862 Val, ValRange);
2863 } else if (ID == ".amdhsa_fp16_overflow") {
2864 if (IVersion.Major < 9)
2865 return getParser().Error(IDRange.Start, "directive requires gfx9+",
2866 IDRange);
2867 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc1, COMPUTE_PGM_RSRC1_FP16_OVFL, Val,
2868 ValRange);
2869 } else if (ID == ".amdhsa_exception_fp_ieee_invalid_op") {
2870 PARSE_BITS_ENTRY(
2871 KD.compute_pgm_rsrc2,
2872 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INVALID_OPERATION, Val,
2873 ValRange);
2874 } else if (ID == ".amdhsa_exception_fp_denorm_src") {
2875 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2876 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_FP_DENORMAL_SOURCE,
2877 Val, ValRange);
2878 } else if (ID == ".amdhsa_exception_fp_ieee_div_zero") {
2879 PARSE_BITS_ENTRY(
2880 KD.compute_pgm_rsrc2,
2881 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_DIVISION_BY_ZERO, Val,
2882 ValRange);
2883 } else if (ID == ".amdhsa_exception_fp_ieee_overflow") {
2884 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2885 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_OVERFLOW,
2886 Val, ValRange);
2887 } else if (ID == ".amdhsa_exception_fp_ieee_underflow") {
2888 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2889 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_UNDERFLOW,
2890 Val, ValRange);
2891 } else if (ID == ".amdhsa_exception_fp_ieee_inexact") {
2892 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2893 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_IEEE_754_FP_INEXACT,
2894 Val, ValRange);
2895 } else if (ID == ".amdhsa_exception_int_div_zero") {
2896 PARSE_BITS_ENTRY(KD.compute_pgm_rsrc2,
2897 COMPUTE_PGM_RSRC2_ENABLE_EXCEPTION_INT_DIVIDE_BY_ZERO,
2898 Val, ValRange);
2899 } else {
2900 return getParser().Error(IDRange.Start,
2901 "unknown .amdhsa_kernel directive", IDRange);
2902 }
2903
2904#undef PARSE_BITS_ENTRY
2905 }
2906
2907 if (Seen.find(".amdhsa_next_free_vgpr") == Seen.end())
2908 return TokError(".amdhsa_next_free_vgpr directive is required");
2909
2910 if (Seen.find(".amdhsa_next_free_sgpr") == Seen.end())
2911 return TokError(".amdhsa_next_free_sgpr directive is required");
2912
2913 unsigned VGPRBlocks;
2914 unsigned SGPRBlocks;
2915 if (calculateGPRBlocks(getFeatureBits(), ReserveVCC, ReserveFlatScr,
2916 ReserveXNACK, NextFreeVGPR, VGPRRange, NextFreeSGPR,
2917 SGPRRange, VGPRBlocks, SGPRBlocks))
2918 return true;
2919
2920 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT_WIDTH>(
2921 VGPRBlocks))
2922 return OutOfRangeError(VGPRRange);
2923 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
2924 COMPUTE_PGM_RSRC1_GRANULATED_WORKITEM_VGPR_COUNT, VGPRBlocks);
2925
2926 if (!isUInt<COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT_WIDTH>(
2927 SGPRBlocks))
2928 return OutOfRangeError(SGPRRange);
2929 AMDHSA_BITS_SET(KD.compute_pgm_rsrc1,
2930 COMPUTE_PGM_RSRC1_GRANULATED_WAVEFRONT_SGPR_COUNT,
2931 SGPRBlocks);
2932
2933 if (!isUInt<COMPUTE_PGM_RSRC2_USER_SGPR_COUNT_WIDTH>(UserSGPRCount))
2934 return TokError("too many user SGPRs enabled");
2935 AMDHSA_BITS_SET(KD.compute_pgm_rsrc2, COMPUTE_PGM_RSRC2_USER_SGPR_COUNT,
2936 UserSGPRCount);
2937
2938 getTargetStreamer().EmitAmdhsaKernelDescriptor(
2939 getSTI(), KernelName, KD, NextFreeVGPR, NextFreeSGPR, ReserveVCC,
2940 ReserveFlatScr, ReserveXNACK);
2941 return false;
2942}
2943
Tom Stellard347ac792015-06-26 21:15:07 +00002944bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectVersion() {
Tom Stellard347ac792015-06-26 21:15:07 +00002945 uint32_t Major;
2946 uint32_t Minor;
2947
2948 if (ParseDirectiveMajorMinor(Major, Minor))
2949 return true;
2950
2951 getTargetStreamer().EmitDirectiveHSACodeObjectVersion(Major, Minor);
2952 return false;
2953}
2954
2955bool AMDGPUAsmParser::ParseDirectiveHSACodeObjectISA() {
Tom Stellard347ac792015-06-26 21:15:07 +00002956 uint32_t Major;
2957 uint32_t Minor;
2958 uint32_t Stepping;
2959 StringRef VendorName;
2960 StringRef ArchName;
2961
2962 // If this directive has no arguments, then use the ISA version for the
2963 // targeted GPU.
2964 if (getLexer().is(AsmToken::EndOfStatement)) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002965 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00002966 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00002967 getTargetStreamer().EmitDirectiveHSACodeObjectISA(ISA.Major, ISA.Minor,
2968 ISA.Stepping,
Tom Stellard347ac792015-06-26 21:15:07 +00002969 "AMD", "AMDGPU");
2970 return false;
2971 }
2972
Tom Stellard347ac792015-06-26 21:15:07 +00002973 if (ParseDirectiveMajorMinor(Major, Minor))
2974 return true;
2975
2976 if (getLexer().isNot(AsmToken::Comma))
2977 return TokError("stepping version number required, comma expected");
2978 Lex();
2979
Artem Tamazov25478d82016-12-29 15:41:52 +00002980 if (ParseAsAbsoluteExpression(Stepping))
Tom Stellard347ac792015-06-26 21:15:07 +00002981 return TokError("invalid stepping version");
2982
Tom Stellard347ac792015-06-26 21:15:07 +00002983 if (getLexer().isNot(AsmToken::Comma))
2984 return TokError("vendor name required, comma expected");
2985 Lex();
2986
2987 if (getLexer().isNot(AsmToken::String))
2988 return TokError("invalid vendor name");
2989
2990 VendorName = getLexer().getTok().getStringContents();
2991 Lex();
2992
2993 if (getLexer().isNot(AsmToken::Comma))
2994 return TokError("arch name required, comma expected");
2995 Lex();
2996
2997 if (getLexer().isNot(AsmToken::String))
2998 return TokError("invalid arch name");
2999
3000 ArchName = getLexer().getTok().getStringContents();
3001 Lex();
3002
3003 getTargetStreamer().EmitDirectiveHSACodeObjectISA(Major, Minor, Stepping,
3004 VendorName, ArchName);
3005 return false;
3006}
3007
Tom Stellardff7416b2015-06-26 21:58:31 +00003008bool AMDGPUAsmParser::ParseAMDKernelCodeTValue(StringRef ID,
3009 amd_kernel_code_t &Header) {
Konstantin Zhuravlyov61830652018-04-09 20:47:22 +00003010 // max_scratch_backing_memory_byte_size is deprecated. Ignore it while parsing
3011 // assembly for backwards compatibility.
3012 if (ID == "max_scratch_backing_memory_byte_size") {
3013 Parser.eatToEndOfStatement();
3014 return false;
3015 }
3016
Valery Pykhtindc110542016-03-06 20:25:36 +00003017 SmallString<40> ErrStr;
3018 raw_svector_ostream Err(ErrStr);
Valery Pykhtina852d692016-06-23 14:13:06 +00003019 if (!parseAmdKernelCodeField(ID, getParser(), Header, Err)) {
Valery Pykhtindc110542016-03-06 20:25:36 +00003020 return TokError(Err.str());
3021 }
Tom Stellardff7416b2015-06-26 21:58:31 +00003022 Lex();
Tom Stellardff7416b2015-06-26 21:58:31 +00003023 return false;
3024}
3025
3026bool AMDGPUAsmParser::ParseDirectiveAMDKernelCodeT() {
Tom Stellardff7416b2015-06-26 21:58:31 +00003027 amd_kernel_code_t Header;
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00003028 AMDGPU::initDefaultAMDKernelCodeT(Header, getFeatureBits());
Tom Stellardff7416b2015-06-26 21:58:31 +00003029
3030 while (true) {
Tom Stellardff7416b2015-06-26 21:58:31 +00003031 // Lex EndOfStatement. This is in a while loop, because lexing a comment
3032 // will set the current token to EndOfStatement.
3033 while(getLexer().is(AsmToken::EndOfStatement))
3034 Lex();
3035
3036 if (getLexer().isNot(AsmToken::Identifier))
3037 return TokError("expected value identifier or .end_amd_kernel_code_t");
3038
3039 StringRef ID = getLexer().getTok().getIdentifier();
3040 Lex();
3041
3042 if (ID == ".end_amd_kernel_code_t")
3043 break;
3044
3045 if (ParseAMDKernelCodeTValue(ID, Header))
3046 return true;
3047 }
3048
3049 getTargetStreamer().EmitAMDKernelCodeT(Header);
3050
3051 return false;
3052}
3053
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003054bool AMDGPUAsmParser::ParseDirectiveAMDGPUHsaKernel() {
3055 if (getLexer().isNot(AsmToken::Identifier))
3056 return TokError("expected symbol name");
3057
3058 StringRef KernelName = Parser.getTok().getString();
3059
3060 getTargetStreamer().EmitAMDGPUSymbolType(KernelName,
3061 ELF::STT_AMDGPU_HSA_KERNEL);
3062 Lex();
Scott Linder1e8c2c72018-06-21 19:38:56 +00003063 if (!AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI()))
3064 KernelScope.initialize(getContext());
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003065 return false;
3066}
3067
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003068bool AMDGPUAsmParser::ParseDirectiveISAVersion() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003069 if (getSTI().getTargetTriple().getArch() != Triple::amdgcn) {
3070 return Error(getParser().getTok().getLoc(),
3071 ".amd_amdgpu_isa directive is not available on non-amdgcn "
3072 "architectures");
3073 }
3074
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003075 auto ISAVersionStringFromASM = getLexer().getTok().getStringContents();
3076
3077 std::string ISAVersionStringFromSTI;
3078 raw_string_ostream ISAVersionStreamFromSTI(ISAVersionStringFromSTI);
3079 IsaInfo::streamIsaVersion(&getSTI(), ISAVersionStreamFromSTI);
3080
3081 if (ISAVersionStringFromASM != ISAVersionStreamFromSTI.str()) {
3082 return Error(getParser().getTok().getLoc(),
3083 ".amd_amdgpu_isa directive does not match triple and/or mcpu "
3084 "arguments specified through the command line");
3085 }
3086
3087 getTargetStreamer().EmitISAVersion(ISAVersionStreamFromSTI.str());
3088 Lex();
3089
3090 return false;
3091}
3092
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003093bool AMDGPUAsmParser::ParseDirectiveHSAMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003094 if (getSTI().getTargetTriple().getOS() != Triple::AMDHSA) {
3095 return Error(getParser().getTok().getLoc(),
3096 (Twine(HSAMD::AssemblerDirectiveBegin) + Twine(" directive is "
3097 "not available on non-amdhsa OSes")).str());
3098 }
3099
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003100 std::string HSAMetadataString;
3101 raw_string_ostream YamlStream(HSAMetadataString);
3102
3103 getLexer().setSkipSpace(false);
3104
3105 bool FoundEnd = false;
3106 while (!getLexer().is(AsmToken::Eof)) {
3107 while (getLexer().is(AsmToken::Space)) {
3108 YamlStream << getLexer().getTok().getString();
3109 Lex();
3110 }
3111
3112 if (getLexer().is(AsmToken::Identifier)) {
3113 StringRef ID = getLexer().getTok().getIdentifier();
3114 if (ID == AMDGPU::HSAMD::AssemblerDirectiveEnd) {
3115 Lex();
3116 FoundEnd = true;
3117 break;
3118 }
3119 }
3120
3121 YamlStream << Parser.parseStringToEndOfStatement()
3122 << getContext().getAsmInfo()->getSeparatorString();
3123
3124 Parser.eatToEndOfStatement();
3125 }
3126
3127 getLexer().setSkipSpace(true);
3128
3129 if (getLexer().is(AsmToken::Eof) && !FoundEnd) {
3130 return TokError(Twine("expected directive ") +
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003131 Twine(HSAMD::AssemblerDirectiveEnd) + Twine(" not found"));
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003132 }
3133
3134 YamlStream.flush();
3135
3136 if (!getTargetStreamer().EmitHSAMetadata(HSAMetadataString))
3137 return Error(getParser().getTok().getLoc(), "invalid HSA metadata");
3138
3139 return false;
3140}
3141
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003142bool AMDGPUAsmParser::ParseDirectivePALMetadata() {
Konstantin Zhuravlyov219066b2017-10-14 16:15:28 +00003143 if (getSTI().getTargetTriple().getOS() != Triple::AMDPAL) {
3144 return Error(getParser().getTok().getLoc(),
3145 (Twine(PALMD::AssemblerDirective) + Twine(" directive is "
3146 "not available on non-amdpal OSes")).str());
3147 }
3148
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003149 PALMD::Metadata PALMetadata;
Tim Renouf72800f02017-10-03 19:03:52 +00003150 for (;;) {
3151 uint32_t Value;
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003152 if (ParseAsAbsoluteExpression(Value)) {
3153 return TokError(Twine("invalid value in ") +
3154 Twine(PALMD::AssemblerDirective));
3155 }
3156 PALMetadata.push_back(Value);
Tim Renouf72800f02017-10-03 19:03:52 +00003157 if (getLexer().isNot(AsmToken::Comma))
3158 break;
3159 Lex();
3160 }
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003161 getTargetStreamer().EmitPALMetadata(PALMetadata);
Tim Renouf72800f02017-10-03 19:03:52 +00003162 return false;
3163}
3164
Tom Stellard45bb48e2015-06-13 03:28:10 +00003165bool AMDGPUAsmParser::ParseDirective(AsmToken DirectiveID) {
Tom Stellard347ac792015-06-26 21:15:07 +00003166 StringRef IDVal = DirectiveID.getString();
3167
Scott Linder1e8c2c72018-06-21 19:38:56 +00003168 if (AMDGPU::IsaInfo::hasCodeObjectV3(&getSTI())) {
3169 if (IDVal == ".amdgcn_target")
3170 return ParseDirectiveAMDGCNTarget();
Tom Stellard347ac792015-06-26 21:15:07 +00003171
Scott Linder1e8c2c72018-06-21 19:38:56 +00003172 if (IDVal == ".amdhsa_kernel")
3173 return ParseDirectiveAMDHSAKernel();
3174 } else {
3175 if (IDVal == ".hsa_code_object_version")
3176 return ParseDirectiveHSACodeObjectVersion();
Tom Stellard347ac792015-06-26 21:15:07 +00003177
Scott Linder1e8c2c72018-06-21 19:38:56 +00003178 if (IDVal == ".hsa_code_object_isa")
3179 return ParseDirectiveHSACodeObjectISA();
Tom Stellardff7416b2015-06-26 21:58:31 +00003180
Scott Linder1e8c2c72018-06-21 19:38:56 +00003181 if (IDVal == ".amd_kernel_code_t")
3182 return ParseDirectiveAMDKernelCodeT();
Tom Stellard1e1b05d2015-11-06 11:45:14 +00003183
Scott Linder1e8c2c72018-06-21 19:38:56 +00003184 if (IDVal == ".amdgpu_hsa_kernel")
3185 return ParseDirectiveAMDGPUHsaKernel();
3186
3187 if (IDVal == ".amd_amdgpu_isa")
3188 return ParseDirectiveISAVersion();
3189 }
Konstantin Zhuravlyov9c05b2b2017-10-14 15:40:33 +00003190
Konstantin Zhuravlyov516651b2017-10-11 22:59:35 +00003191 if (IDVal == AMDGPU::HSAMD::AssemblerDirectiveBegin)
3192 return ParseDirectiveHSAMetadata();
3193
Konstantin Zhuravlyovc3beb6a2017-10-11 22:41:09 +00003194 if (IDVal == PALMD::AssemblerDirective)
3195 return ParseDirectivePALMetadata();
Tim Renouf72800f02017-10-03 19:03:52 +00003196
Tom Stellard45bb48e2015-06-13 03:28:10 +00003197 return true;
3198}
3199
Matt Arsenault68802d32015-11-05 03:11:27 +00003200bool AMDGPUAsmParser::subtargetHasRegister(const MCRegisterInfo &MRI,
3201 unsigned RegNo) const {
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003202
3203 for (MCRegAliasIterator R(AMDGPU::TTMP12_TTMP13_TTMP14_TTMP15, &MRI, true);
3204 R.isValid(); ++R) {
3205 if (*R == RegNo)
3206 return isGFX9();
3207 }
3208
3209 switch (RegNo) {
3210 case AMDGPU::TBA:
3211 case AMDGPU::TBA_LO:
3212 case AMDGPU::TBA_HI:
3213 case AMDGPU::TMA:
3214 case AMDGPU::TMA_LO:
3215 case AMDGPU::TMA_HI:
3216 return !isGFX9();
Dmitry Preobrazhensky3afbd822018-01-10 14:22:19 +00003217 case AMDGPU::XNACK_MASK:
3218 case AMDGPU::XNACK_MASK_LO:
3219 case AMDGPU::XNACK_MASK_HI:
3220 return !isCI() && !isSI() && hasXNACK();
Dmitry Preobrazhenskyac2b0262017-12-11 15:23:20 +00003221 default:
3222 break;
3223 }
3224
Matt Arsenault3b159672015-12-01 20:31:08 +00003225 if (isCI())
Matt Arsenault68802d32015-11-05 03:11:27 +00003226 return true;
3227
Matt Arsenault3b159672015-12-01 20:31:08 +00003228 if (isSI()) {
3229 // No flat_scr
3230 switch (RegNo) {
3231 case AMDGPU::FLAT_SCR:
3232 case AMDGPU::FLAT_SCR_LO:
3233 case AMDGPU::FLAT_SCR_HI:
3234 return false;
3235 default:
3236 return true;
3237 }
3238 }
3239
Matt Arsenault68802d32015-11-05 03:11:27 +00003240 // VI only has 102 SGPRs, so make sure we aren't trying to use the 2 more that
3241 // SI/CI have.
3242 for (MCRegAliasIterator R(AMDGPU::SGPR102_SGPR103, &MRI, true);
3243 R.isValid(); ++R) {
3244 if (*R == RegNo)
3245 return false;
3246 }
3247
3248 return true;
3249}
3250
Alex Bradbury58eba092016-11-01 16:32:05 +00003251OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003252AMDGPUAsmParser::parseOperand(OperandVector &Operands, StringRef Mnemonic) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003253 // Try to parse with a custom parser
3254 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3255
3256 // If we successfully parsed the operand or if there as an error parsing,
3257 // we are done.
3258 //
3259 // If we are parsing after we reach EndOfStatement then this means we
3260 // are appending default values to the Operands list. This is only done
3261 // by custom parser, so we shouldn't continue on to the generic parsing.
Sam Kolton1bdcef72016-05-23 09:59:02 +00003262 if (ResTy == MatchOperand_Success || ResTy == MatchOperand_ParseFail ||
Tom Stellard45bb48e2015-06-13 03:28:10 +00003263 getLexer().is(AsmToken::EndOfStatement))
3264 return ResTy;
3265
Sam Kolton1bdcef72016-05-23 09:59:02 +00003266 ResTy = parseRegOrImm(Operands);
Nikolay Haustov9b7577e2016-03-09 11:03:21 +00003267
Sam Kolton1bdcef72016-05-23 09:59:02 +00003268 if (ResTy == MatchOperand_Success)
3269 return ResTy;
3270
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003271 const auto &Tok = Parser.getTok();
3272 SMLoc S = Tok.getLoc();
Tom Stellard89049702016-06-15 02:54:14 +00003273
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003274 const MCExpr *Expr = nullptr;
3275 if (!Parser.parseExpression(Expr)) {
3276 Operands.push_back(AMDGPUOperand::CreateExpr(this, Expr, S));
3277 return MatchOperand_Success;
3278 }
3279
3280 // Possibly this is an instruction flag like 'gds'.
3281 if (Tok.getKind() == AsmToken::Identifier) {
3282 Operands.push_back(AMDGPUOperand::CreateToken(this, Tok.getString(), S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003283 Parser.Lex();
Sam Kolton1bdcef72016-05-23 09:59:02 +00003284 return MatchOperand_Success;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003285 }
Dmitry Preobrazhensky4b11a782017-08-04 13:55:24 +00003286
Sam Kolton1bdcef72016-05-23 09:59:02 +00003287 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003288}
3289
Sam Kolton05ef1c92016-06-03 10:27:37 +00003290StringRef AMDGPUAsmParser::parseMnemonicSuffix(StringRef Name) {
3291 // Clear any forced encodings from the previous instruction.
3292 setForcedEncodingSize(0);
3293 setForcedDPP(false);
3294 setForcedSDWA(false);
3295
3296 if (Name.endswith("_e64")) {
3297 setForcedEncodingSize(64);
3298 return Name.substr(0, Name.size() - 4);
3299 } else if (Name.endswith("_e32")) {
3300 setForcedEncodingSize(32);
3301 return Name.substr(0, Name.size() - 4);
3302 } else if (Name.endswith("_dpp")) {
3303 setForcedDPP(true);
3304 return Name.substr(0, Name.size() - 4);
3305 } else if (Name.endswith("_sdwa")) {
3306 setForcedSDWA(true);
3307 return Name.substr(0, Name.size() - 5);
3308 }
3309 return Name;
3310}
3311
Tom Stellard45bb48e2015-06-13 03:28:10 +00003312bool AMDGPUAsmParser::ParseInstruction(ParseInstructionInfo &Info,
3313 StringRef Name,
3314 SMLoc NameLoc, OperandVector &Operands) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003315 // Add the instruction mnemonic
Sam Kolton05ef1c92016-06-03 10:27:37 +00003316 Name = parseMnemonicSuffix(Name);
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003317 Operands.push_back(AMDGPUOperand::CreateToken(this, Name, NameLoc));
Matt Arsenault37fefd62016-06-10 02:18:02 +00003318
Tom Stellard45bb48e2015-06-13 03:28:10 +00003319 while (!getLexer().is(AsmToken::EndOfStatement)) {
Alex Bradbury58eba092016-11-01 16:32:05 +00003320 OperandMatchResultTy Res = parseOperand(Operands, Name);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003321
3322 // Eat the comma or space if there is one.
3323 if (getLexer().is(AsmToken::Comma))
3324 Parser.Lex();
Matt Arsenault37fefd62016-06-10 02:18:02 +00003325
Tom Stellard45bb48e2015-06-13 03:28:10 +00003326 switch (Res) {
3327 case MatchOperand_Success: break;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003328 case MatchOperand_ParseFail:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003329 Error(getLexer().getLoc(), "failed parsing operand.");
3330 while (!getLexer().is(AsmToken::EndOfStatement)) {
3331 Parser.Lex();
3332 }
3333 return true;
Matt Arsenault37fefd62016-06-10 02:18:02 +00003334 case MatchOperand_NoMatch:
Sam Kolton1bdcef72016-05-23 09:59:02 +00003335 Error(getLexer().getLoc(), "not a valid operand.");
3336 while (!getLexer().is(AsmToken::EndOfStatement)) {
3337 Parser.Lex();
3338 }
3339 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003340 }
3341 }
3342
Tom Stellard45bb48e2015-06-13 03:28:10 +00003343 return false;
3344}
3345
3346//===----------------------------------------------------------------------===//
3347// Utility functions
3348//===----------------------------------------------------------------------===//
3349
Alex Bradbury58eba092016-11-01 16:32:05 +00003350OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00003351AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, int64_t &Int) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003352 switch(getLexer().getKind()) {
3353 default: return MatchOperand_NoMatch;
3354 case AsmToken::Identifier: {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003355 StringRef Name = Parser.getTok().getString();
3356 if (!Name.equals(Prefix)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003357 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003358 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00003359
3360 Parser.Lex();
3361 if (getLexer().isNot(AsmToken::Colon))
3362 return MatchOperand_ParseFail;
3363
3364 Parser.Lex();
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003365
3366 bool IsMinus = false;
3367 if (getLexer().getKind() == AsmToken::Minus) {
3368 Parser.Lex();
3369 IsMinus = true;
3370 }
3371
Tom Stellard45bb48e2015-06-13 03:28:10 +00003372 if (getLexer().isNot(AsmToken::Integer))
3373 return MatchOperand_ParseFail;
3374
3375 if (getParser().parseAbsoluteExpression(Int))
3376 return MatchOperand_ParseFail;
Matt Arsenault9698f1c2017-06-20 19:54:14 +00003377
3378 if (IsMinus)
3379 Int = -Int;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003380 break;
3381 }
3382 }
3383 return MatchOperand_Success;
3384}
3385
Alex Bradbury58eba092016-11-01 16:32:05 +00003386OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003387AMDGPUAsmParser::parseIntWithPrefix(const char *Prefix, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003388 AMDGPUOperand::ImmTy ImmTy,
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003389 bool (*ConvertResult)(int64_t&)) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003390 SMLoc S = Parser.getTok().getLoc();
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003391 int64_t Value = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003392
Alex Bradbury58eba092016-11-01 16:32:05 +00003393 OperandMatchResultTy Res = parseIntWithPrefix(Prefix, Value);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003394 if (Res != MatchOperand_Success)
3395 return Res;
3396
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003397 if (ConvertResult && !ConvertResult(Value)) {
3398 return MatchOperand_ParseFail;
3399 }
3400
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003401 Operands.push_back(AMDGPUOperand::CreateImm(this, Value, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003402 return MatchOperand_Success;
3403}
3404
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003405OperandMatchResultTy AMDGPUAsmParser::parseOperandArrayWithPrefix(
3406 const char *Prefix,
3407 OperandVector &Operands,
3408 AMDGPUOperand::ImmTy ImmTy,
3409 bool (*ConvertResult)(int64_t&)) {
3410 StringRef Name = Parser.getTok().getString();
3411 if (!Name.equals(Prefix))
3412 return MatchOperand_NoMatch;
3413
3414 Parser.Lex();
3415 if (getLexer().isNot(AsmToken::Colon))
3416 return MatchOperand_ParseFail;
3417
3418 Parser.Lex();
3419 if (getLexer().isNot(AsmToken::LBrac))
3420 return MatchOperand_ParseFail;
3421 Parser.Lex();
3422
3423 unsigned Val = 0;
3424 SMLoc S = Parser.getTok().getLoc();
3425
3426 // FIXME: How to verify the number of elements matches the number of src
3427 // operands?
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00003428 for (int I = 0; I < 4; ++I) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003429 if (I != 0) {
3430 if (getLexer().is(AsmToken::RBrac))
3431 break;
3432
3433 if (getLexer().isNot(AsmToken::Comma))
3434 return MatchOperand_ParseFail;
3435 Parser.Lex();
3436 }
3437
3438 if (getLexer().isNot(AsmToken::Integer))
3439 return MatchOperand_ParseFail;
3440
3441 int64_t Op;
3442 if (getParser().parseAbsoluteExpression(Op))
3443 return MatchOperand_ParseFail;
3444
3445 if (Op != 0 && Op != 1)
3446 return MatchOperand_ParseFail;
3447 Val |= (Op << I);
3448 }
3449
3450 Parser.Lex();
3451 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S, ImmTy));
3452 return MatchOperand_Success;
3453}
3454
Alex Bradbury58eba092016-11-01 16:32:05 +00003455OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003456AMDGPUAsmParser::parseNamedBit(const char *Name, OperandVector &Operands,
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003457 AMDGPUOperand::ImmTy ImmTy) {
Tom Stellard45bb48e2015-06-13 03:28:10 +00003458 int64_t Bit = 0;
3459 SMLoc S = Parser.getTok().getLoc();
3460
3461 // We are at the end of the statement, and this is a default argument, so
3462 // use a default value.
3463 if (getLexer().isNot(AsmToken::EndOfStatement)) {
3464 switch(getLexer().getKind()) {
3465 case AsmToken::Identifier: {
3466 StringRef Tok = Parser.getTok().getString();
3467 if (Tok == Name) {
3468 Bit = 1;
3469 Parser.Lex();
3470 } else if (Tok.startswith("no") && Tok.endswith(Name)) {
3471 Bit = 0;
3472 Parser.Lex();
3473 } else {
Sam Kolton11de3702016-05-24 12:38:33 +00003474 return MatchOperand_NoMatch;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003475 }
3476 break;
3477 }
3478 default:
3479 return MatchOperand_NoMatch;
3480 }
3481 }
3482
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003483 Operands.push_back(AMDGPUOperand::CreateImm(this, Bit, S, ImmTy));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003484 return MatchOperand_Success;
3485}
3486
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00003487static void addOptionalImmOperand(
3488 MCInst& Inst, const OperandVector& Operands,
3489 AMDGPUAsmParser::OptionalImmIndexMap& OptionalIdx,
3490 AMDGPUOperand::ImmTy ImmT,
3491 int64_t Default = 0) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003492 auto i = OptionalIdx.find(ImmT);
3493 if (i != OptionalIdx.end()) {
3494 unsigned Idx = i->second;
3495 ((AMDGPUOperand &)*Operands[Idx]).addImmOperands(Inst, 1);
3496 } else {
Sam Koltondfa29f72016-03-09 12:29:31 +00003497 Inst.addOperand(MCOperand::createImm(Default));
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003498 }
3499}
3500
Alex Bradbury58eba092016-11-01 16:32:05 +00003501OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00003502AMDGPUAsmParser::parseStringWithPrefix(StringRef Prefix, StringRef &Value) {
Sam Kolton3025e7f2016-04-26 13:33:56 +00003503 if (getLexer().isNot(AsmToken::Identifier)) {
3504 return MatchOperand_NoMatch;
3505 }
3506 StringRef Tok = Parser.getTok().getString();
3507 if (Tok != Prefix) {
3508 return MatchOperand_NoMatch;
3509 }
3510
3511 Parser.Lex();
3512 if (getLexer().isNot(AsmToken::Colon)) {
3513 return MatchOperand_ParseFail;
3514 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00003515
Sam Kolton3025e7f2016-04-26 13:33:56 +00003516 Parser.Lex();
3517 if (getLexer().isNot(AsmToken::Identifier)) {
3518 return MatchOperand_ParseFail;
3519 }
3520
3521 Value = Parser.getTok().getString();
3522 return MatchOperand_Success;
3523}
3524
Tom Stellard45bb48e2015-06-13 03:28:10 +00003525//===----------------------------------------------------------------------===//
3526// ds
3527//===----------------------------------------------------------------------===//
3528
Tom Stellard45bb48e2015-06-13 03:28:10 +00003529void AMDGPUAsmParser::cvtDSOffset01(MCInst &Inst,
3530 const OperandVector &Operands) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003531 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003532
3533 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3534 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3535
3536 // Add the register arguments
3537 if (Op.isReg()) {
3538 Op.addRegOperands(Inst, 1);
3539 continue;
3540 }
3541
3542 // Handle optional arguments
3543 OptionalIdx[Op.getImmTy()] = i;
3544 }
3545
Nikolay Haustov4f672a32016-04-29 09:02:30 +00003546 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset0);
3547 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset1);
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003548 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003549
Tom Stellard45bb48e2015-06-13 03:28:10 +00003550 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3551}
3552
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003553void AMDGPUAsmParser::cvtDSImpl(MCInst &Inst, const OperandVector &Operands,
3554 bool IsGdsHardcoded) {
3555 OptionalImmIndexMap OptionalIdx;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003556
3557 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3558 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3559
3560 // Add the register arguments
3561 if (Op.isReg()) {
3562 Op.addRegOperands(Inst, 1);
3563 continue;
3564 }
3565
3566 if (Op.isToken() && Op.getToken() == "gds") {
Artem Tamazov43b61562017-02-03 12:47:30 +00003567 IsGdsHardcoded = true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003568 continue;
3569 }
3570
3571 // Handle optional arguments
3572 OptionalIdx[Op.getImmTy()] = i;
3573 }
3574
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00003575 AMDGPUOperand::ImmTy OffsetType =
3576 (Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_si ||
3577 Inst.getOpcode() == AMDGPU::DS_SWIZZLE_B32_vi) ? AMDGPUOperand::ImmTySwizzle :
3578 AMDGPUOperand::ImmTyOffset;
3579
3580 addOptionalImmOperand(Inst, Operands, OptionalIdx, OffsetType);
3581
Artem Tamazov43b61562017-02-03 12:47:30 +00003582 if (!IsGdsHardcoded) {
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00003583 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGDS);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003584 }
3585 Inst.addOperand(MCOperand::createReg(AMDGPU::M0)); // m0
3586}
3587
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003588void AMDGPUAsmParser::cvtExp(MCInst &Inst, const OperandVector &Operands) {
3589 OptionalImmIndexMap OptionalIdx;
3590
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003591 unsigned OperandIdx[4];
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003592 unsigned EnMask = 0;
3593 int SrcIdx = 0;
3594
3595 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
3596 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
3597
3598 // Add the register arguments
3599 if (Op.isReg()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003600 assert(SrcIdx < 4);
3601 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003602 Op.addRegOperands(Inst, 1);
3603 ++SrcIdx;
3604 continue;
3605 }
3606
3607 if (Op.isOff()) {
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003608 assert(SrcIdx < 4);
3609 OperandIdx[SrcIdx] = Inst.size();
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003610 Inst.addOperand(MCOperand::createReg(AMDGPU::NoRegister));
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003611 ++SrcIdx;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003612 continue;
3613 }
3614
3615 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyExpTgt) {
3616 Op.addImmOperands(Inst, 1);
3617 continue;
3618 }
3619
3620 if (Op.isToken() && Op.getToken() == "done")
3621 continue;
3622
3623 // Handle optional arguments
3624 OptionalIdx[Op.getImmTy()] = i;
3625 }
3626
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00003627 assert(SrcIdx == 4);
3628
3629 bool Compr = false;
3630 if (OptionalIdx.find(AMDGPUOperand::ImmTyExpCompr) != OptionalIdx.end()) {
3631 Compr = true;
3632 Inst.getOperand(OperandIdx[1]) = Inst.getOperand(OperandIdx[2]);
3633 Inst.getOperand(OperandIdx[2]).setReg(AMDGPU::NoRegister);
3634 Inst.getOperand(OperandIdx[3]).setReg(AMDGPU::NoRegister);
3635 }
3636
3637 for (auto i = 0; i < SrcIdx; ++i) {
3638 if (Inst.getOperand(OperandIdx[i]).getReg() != AMDGPU::NoRegister) {
3639 EnMask |= Compr? (0x3 << i * 2) : (0x1 << i);
3640 }
3641 }
3642
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00003643 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpVM);
3644 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyExpCompr);
3645
3646 Inst.addOperand(MCOperand::createImm(EnMask));
3647}
Tom Stellard45bb48e2015-06-13 03:28:10 +00003648
3649//===----------------------------------------------------------------------===//
3650// s_waitcnt
3651//===----------------------------------------------------------------------===//
3652
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003653static bool
3654encodeCnt(
3655 const AMDGPU::IsaInfo::IsaVersion ISA,
3656 int64_t &IntVal,
3657 int64_t CntVal,
3658 bool Saturate,
3659 unsigned (*encode)(const IsaInfo::IsaVersion &Version, unsigned, unsigned),
3660 unsigned (*decode)(const IsaInfo::IsaVersion &Version, unsigned))
3661{
3662 bool Failed = false;
3663
3664 IntVal = encode(ISA, IntVal, CntVal);
3665 if (CntVal != decode(ISA, IntVal)) {
3666 if (Saturate) {
3667 IntVal = encode(ISA, IntVal, -1);
3668 } else {
3669 Failed = true;
3670 }
3671 }
3672 return Failed;
3673}
3674
Tom Stellard45bb48e2015-06-13 03:28:10 +00003675bool AMDGPUAsmParser::parseCnt(int64_t &IntVal) {
3676 StringRef CntName = Parser.getTok().getString();
3677 int64_t CntVal;
3678
3679 Parser.Lex();
3680 if (getLexer().isNot(AsmToken::LParen))
3681 return true;
3682
3683 Parser.Lex();
3684 if (getLexer().isNot(AsmToken::Integer))
3685 return true;
3686
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003687 SMLoc ValLoc = Parser.getTok().getLoc();
Tom Stellard45bb48e2015-06-13 03:28:10 +00003688 if (getParser().parseAbsoluteExpression(CntVal))
3689 return true;
3690
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003691 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00003692 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Tom Stellard45bb48e2015-06-13 03:28:10 +00003693
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003694 bool Failed = true;
3695 bool Sat = CntName.endswith("_sat");
3696
3697 if (CntName == "vmcnt" || CntName == "vmcnt_sat") {
3698 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeVmcnt, decodeVmcnt);
3699 } else if (CntName == "expcnt" || CntName == "expcnt_sat") {
3700 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeExpcnt, decodeExpcnt);
3701 } else if (CntName == "lgkmcnt" || CntName == "lgkmcnt_sat") {
3702 Failed = encodeCnt(ISA, IntVal, CntVal, Sat, encodeLgkmcnt, decodeLgkmcnt);
3703 }
3704
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003705 if (Failed) {
3706 Error(ValLoc, "too large value for " + CntName);
3707 return true;
3708 }
3709
3710 if (getLexer().isNot(AsmToken::RParen)) {
3711 return true;
3712 }
3713
3714 Parser.Lex();
3715 if (getLexer().is(AsmToken::Amp) || getLexer().is(AsmToken::Comma)) {
3716 const AsmToken NextToken = getLexer().peekTok();
3717 if (NextToken.is(AsmToken::Identifier)) {
3718 Parser.Lex();
Dmitry Preobrazhensky43d297e2017-04-26 17:55:50 +00003719 }
3720 }
3721
Dmitry Preobrazhensky5a2f8812017-06-07 16:08:02 +00003722 return false;
Tom Stellard45bb48e2015-06-13 03:28:10 +00003723}
3724
Alex Bradbury58eba092016-11-01 16:32:05 +00003725OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00003726AMDGPUAsmParser::parseSWaitCntOps(OperandVector &Operands) {
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003727 AMDGPU::IsaInfo::IsaVersion ISA =
Konstantin Zhuravlyov972948b2017-02-27 07:55:17 +00003728 AMDGPU::IsaInfo::getIsaVersion(getFeatureBits());
Konstantin Zhuravlyov9f89ede2017-02-08 14:05:23 +00003729 int64_t Waitcnt = getWaitcntBitMask(ISA);
Tom Stellard45bb48e2015-06-13 03:28:10 +00003730 SMLoc S = Parser.getTok().getLoc();
3731
3732 switch(getLexer().getKind()) {
3733 default: return MatchOperand_ParseFail;
3734 case AsmToken::Integer:
3735 // The operand can be an integer value.
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003736 if (getParser().parseAbsoluteExpression(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003737 return MatchOperand_ParseFail;
3738 break;
3739
3740 case AsmToken::Identifier:
3741 do {
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003742 if (parseCnt(Waitcnt))
Tom Stellard45bb48e2015-06-13 03:28:10 +00003743 return MatchOperand_ParseFail;
3744 } while(getLexer().isNot(AsmToken::EndOfStatement));
3745 break;
3746 }
Konstantin Zhuravlyovcdd45472016-10-11 18:58:22 +00003747 Operands.push_back(AMDGPUOperand::CreateImm(this, Waitcnt, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00003748 return MatchOperand_Success;
3749}
3750
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003751bool AMDGPUAsmParser::parseHwregConstruct(OperandInfoTy &HwReg, int64_t &Offset,
3752 int64_t &Width) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003753 using namespace llvm::AMDGPU::Hwreg;
3754
Artem Tamazovd6468662016-04-25 14:13:51 +00003755 if (Parser.getTok().getString() != "hwreg")
3756 return true;
3757 Parser.Lex();
3758
3759 if (getLexer().isNot(AsmToken::LParen))
3760 return true;
3761 Parser.Lex();
3762
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003763 if (getLexer().is(AsmToken::Identifier)) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003764 HwReg.IsSymbolic = true;
3765 HwReg.Id = ID_UNKNOWN_;
3766 const StringRef tok = Parser.getTok().getString();
Stanislav Mekhanoshin62875fc2018-01-15 18:49:15 +00003767 int Last = ID_SYMBOLIC_LAST_;
3768 if (isSI() || isCI() || isVI())
3769 Last = ID_SYMBOLIC_FIRST_GFX9_;
3770 for (int i = ID_SYMBOLIC_FIRST_; i < Last; ++i) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003771 if (tok == IdSymbolic[i]) {
3772 HwReg.Id = i;
3773 break;
3774 }
3775 }
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003776 Parser.Lex();
3777 } else {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003778 HwReg.IsSymbolic = false;
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003779 if (getLexer().isNot(AsmToken::Integer))
3780 return true;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003781 if (getParser().parseAbsoluteExpression(HwReg.Id))
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003782 return true;
3783 }
Artem Tamazovd6468662016-04-25 14:13:51 +00003784
3785 if (getLexer().is(AsmToken::RParen)) {
3786 Parser.Lex();
3787 return false;
3788 }
3789
3790 // optional params
3791 if (getLexer().isNot(AsmToken::Comma))
3792 return true;
3793 Parser.Lex();
3794
3795 if (getLexer().isNot(AsmToken::Integer))
3796 return true;
3797 if (getParser().parseAbsoluteExpression(Offset))
3798 return true;
3799
3800 if (getLexer().isNot(AsmToken::Comma))
3801 return true;
3802 Parser.Lex();
3803
3804 if (getLexer().isNot(AsmToken::Integer))
3805 return true;
3806 if (getParser().parseAbsoluteExpression(Width))
3807 return true;
3808
3809 if (getLexer().isNot(AsmToken::RParen))
3810 return true;
3811 Parser.Lex();
3812
3813 return false;
3814}
3815
Matt Arsenaultf15da6c2017-02-03 20:49:51 +00003816OperandMatchResultTy AMDGPUAsmParser::parseHwreg(OperandVector &Operands) {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003817 using namespace llvm::AMDGPU::Hwreg;
3818
Artem Tamazovd6468662016-04-25 14:13:51 +00003819 int64_t Imm16Val = 0;
3820 SMLoc S = Parser.getTok().getLoc();
3821
3822 switch(getLexer().getKind()) {
Sam Kolton11de3702016-05-24 12:38:33 +00003823 default: return MatchOperand_NoMatch;
Artem Tamazovd6468662016-04-25 14:13:51 +00003824 case AsmToken::Integer:
3825 // The operand can be an integer value.
3826 if (getParser().parseAbsoluteExpression(Imm16Val))
Artem Tamazov6edc1352016-05-26 17:00:33 +00003827 return MatchOperand_NoMatch;
3828 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovd6468662016-04-25 14:13:51 +00003829 Error(S, "invalid immediate: only 16-bit values are legal");
3830 // Do not return error code, but create an imm operand anyway and proceed
3831 // to the next operand, if any. That avoids unneccessary error messages.
3832 }
3833 break;
3834
3835 case AsmToken::Identifier: {
Artem Tamazov6edc1352016-05-26 17:00:33 +00003836 OperandInfoTy HwReg(ID_UNKNOWN_);
3837 int64_t Offset = OFFSET_DEFAULT_;
3838 int64_t Width = WIDTH_M1_DEFAULT_ + 1;
3839 if (parseHwregConstruct(HwReg, Offset, Width))
Artem Tamazovd6468662016-04-25 14:13:51 +00003840 return MatchOperand_ParseFail;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003841 if (HwReg.Id < 0 || !isUInt<ID_WIDTH_>(HwReg.Id)) {
3842 if (HwReg.IsSymbolic)
Artem Tamazov5cd55b12016-04-27 15:17:03 +00003843 Error(S, "invalid symbolic name of hardware register");
3844 else
3845 Error(S, "invalid code of hardware register: only 6-bit values are legal");
Reid Kleckner7f0ae152016-04-27 16:46:33 +00003846 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00003847 if (Offset < 0 || !isUInt<OFFSET_WIDTH_>(Offset))
Artem Tamazovd6468662016-04-25 14:13:51 +00003848 Error(S, "invalid bit offset: only 5-bit values are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003849 if ((Width-1) < 0 || !isUInt<WIDTH_M1_WIDTH_>(Width-1))
Artem Tamazovd6468662016-04-25 14:13:51 +00003850 Error(S, "invalid bitfield width: only values from 1 to 32 are legal");
Artem Tamazov6edc1352016-05-26 17:00:33 +00003851 Imm16Val = (HwReg.Id << ID_SHIFT_) | (Offset << OFFSET_SHIFT_) | ((Width-1) << WIDTH_M1_SHIFT_);
Artem Tamazovd6468662016-04-25 14:13:51 +00003852 }
3853 break;
3854 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00003855 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTyHwreg));
Artem Tamazovd6468662016-04-25 14:13:51 +00003856 return MatchOperand_Success;
3857}
3858
Tom Stellard45bb48e2015-06-13 03:28:10 +00003859bool AMDGPUOperand::isSWaitCnt() const {
3860 return isImm();
3861}
3862
Artem Tamazovd6468662016-04-25 14:13:51 +00003863bool AMDGPUOperand::isHwreg() const {
3864 return isImmTy(ImmTyHwreg);
3865}
3866
Artem Tamazov6edc1352016-05-26 17:00:33 +00003867bool AMDGPUAsmParser::parseSendMsgConstruct(OperandInfoTy &Msg, OperandInfoTy &Operation, int64_t &StreamId) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003868 using namespace llvm::AMDGPU::SendMsg;
3869
3870 if (Parser.getTok().getString() != "sendmsg")
3871 return true;
3872 Parser.Lex();
3873
3874 if (getLexer().isNot(AsmToken::LParen))
3875 return true;
3876 Parser.Lex();
3877
3878 if (getLexer().is(AsmToken::Identifier)) {
3879 Msg.IsSymbolic = true;
3880 Msg.Id = ID_UNKNOWN_;
3881 const std::string tok = Parser.getTok().getString();
3882 for (int i = ID_GAPS_FIRST_; i < ID_GAPS_LAST_; ++i) {
3883 switch(i) {
3884 default: continue; // Omit gaps.
3885 case ID_INTERRUPT: case ID_GS: case ID_GS_DONE: case ID_SYSMSG: break;
3886 }
3887 if (tok == IdSymbolic[i]) {
3888 Msg.Id = i;
3889 break;
3890 }
3891 }
3892 Parser.Lex();
3893 } else {
3894 Msg.IsSymbolic = false;
3895 if (getLexer().isNot(AsmToken::Integer))
3896 return true;
3897 if (getParser().parseAbsoluteExpression(Msg.Id))
3898 return true;
3899 if (getLexer().is(AsmToken::Integer))
3900 if (getParser().parseAbsoluteExpression(Msg.Id))
3901 Msg.Id = ID_UNKNOWN_;
3902 }
3903 if (Msg.Id == ID_UNKNOWN_) // Don't know how to parse the rest.
3904 return false;
3905
3906 if (!(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG)) {
3907 if (getLexer().isNot(AsmToken::RParen))
3908 return true;
3909 Parser.Lex();
3910 return false;
3911 }
3912
3913 if (getLexer().isNot(AsmToken::Comma))
3914 return true;
3915 Parser.Lex();
3916
3917 assert(Msg.Id == ID_GS || Msg.Id == ID_GS_DONE || Msg.Id == ID_SYSMSG);
3918 Operation.Id = ID_UNKNOWN_;
3919 if (getLexer().is(AsmToken::Identifier)) {
3920 Operation.IsSymbolic = true;
3921 const char* const *S = (Msg.Id == ID_SYSMSG) ? OpSysSymbolic : OpGsSymbolic;
3922 const int F = (Msg.Id == ID_SYSMSG) ? OP_SYS_FIRST_ : OP_GS_FIRST_;
3923 const int L = (Msg.Id == ID_SYSMSG) ? OP_SYS_LAST_ : OP_GS_LAST_;
Artem Tamazov6edc1352016-05-26 17:00:33 +00003924 const StringRef Tok = Parser.getTok().getString();
Artem Tamazovebe71ce2016-05-06 17:48:48 +00003925 for (int i = F; i < L; ++i) {
3926 if (Tok == S[i]) {
3927 Operation.Id = i;
3928 break;
3929 }
3930 }
3931 Parser.Lex();
3932 } else {
3933 Operation.IsSymbolic = false;
3934 if (getLexer().isNot(AsmToken::Integer))
3935 return true;
3936 if (getParser().parseAbsoluteExpression(Operation.Id))
3937 return true;
3938 }
3939
3940 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
3941 // Stream id is optional.
3942 if (getLexer().is(AsmToken::RParen)) {
3943 Parser.Lex();
3944 return false;
3945 }
3946
3947 if (getLexer().isNot(AsmToken::Comma))
3948 return true;
3949 Parser.Lex();
3950
3951 if (getLexer().isNot(AsmToken::Integer))
3952 return true;
3953 if (getParser().parseAbsoluteExpression(StreamId))
3954 return true;
3955 }
3956
3957 if (getLexer().isNot(AsmToken::RParen))
3958 return true;
3959 Parser.Lex();
3960 return false;
3961}
3962
Matt Arsenault0e8a2992016-12-15 20:40:20 +00003963OperandMatchResultTy AMDGPUAsmParser::parseInterpSlot(OperandVector &Operands) {
3964 if (getLexer().getKind() != AsmToken::Identifier)
3965 return MatchOperand_NoMatch;
3966
3967 StringRef Str = Parser.getTok().getString();
3968 int Slot = StringSwitch<int>(Str)
3969 .Case("p10", 0)
3970 .Case("p20", 1)
3971 .Case("p0", 2)
3972 .Default(-1);
3973
3974 SMLoc S = Parser.getTok().getLoc();
3975 if (Slot == -1)
3976 return MatchOperand_ParseFail;
3977
3978 Parser.Lex();
3979 Operands.push_back(AMDGPUOperand::CreateImm(this, Slot, S,
3980 AMDGPUOperand::ImmTyInterpSlot));
3981 return MatchOperand_Success;
3982}
3983
3984OperandMatchResultTy AMDGPUAsmParser::parseInterpAttr(OperandVector &Operands) {
3985 if (getLexer().getKind() != AsmToken::Identifier)
3986 return MatchOperand_NoMatch;
3987
3988 StringRef Str = Parser.getTok().getString();
3989 if (!Str.startswith("attr"))
3990 return MatchOperand_NoMatch;
3991
3992 StringRef Chan = Str.take_back(2);
3993 int AttrChan = StringSwitch<int>(Chan)
3994 .Case(".x", 0)
3995 .Case(".y", 1)
3996 .Case(".z", 2)
3997 .Case(".w", 3)
3998 .Default(-1);
3999 if (AttrChan == -1)
4000 return MatchOperand_ParseFail;
4001
4002 Str = Str.drop_back(2).drop_front(4);
4003
4004 uint8_t Attr;
4005 if (Str.getAsInteger(10, Attr))
4006 return MatchOperand_ParseFail;
4007
4008 SMLoc S = Parser.getTok().getLoc();
4009 Parser.Lex();
4010 if (Attr > 63) {
4011 Error(S, "out of bounds attr");
4012 return MatchOperand_Success;
4013 }
4014
4015 SMLoc SChan = SMLoc::getFromPointer(Chan.data());
4016
4017 Operands.push_back(AMDGPUOperand::CreateImm(this, Attr, S,
4018 AMDGPUOperand::ImmTyInterpAttr));
4019 Operands.push_back(AMDGPUOperand::CreateImm(this, AttrChan, SChan,
4020 AMDGPUOperand::ImmTyAttrChan));
4021 return MatchOperand_Success;
4022}
4023
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004024void AMDGPUAsmParser::errorExpTgt() {
4025 Error(Parser.getTok().getLoc(), "invalid exp target");
4026}
4027
4028OperandMatchResultTy AMDGPUAsmParser::parseExpTgtImpl(StringRef Str,
4029 uint8_t &Val) {
4030 if (Str == "null") {
4031 Val = 9;
4032 return MatchOperand_Success;
4033 }
4034
4035 if (Str.startswith("mrt")) {
4036 Str = Str.drop_front(3);
4037 if (Str == "z") { // == mrtz
4038 Val = 8;
4039 return MatchOperand_Success;
4040 }
4041
4042 if (Str.getAsInteger(10, Val))
4043 return MatchOperand_ParseFail;
4044
4045 if (Val > 7)
4046 errorExpTgt();
4047
4048 return MatchOperand_Success;
4049 }
4050
4051 if (Str.startswith("pos")) {
4052 Str = Str.drop_front(3);
4053 if (Str.getAsInteger(10, Val))
4054 return MatchOperand_ParseFail;
4055
4056 if (Val > 3)
4057 errorExpTgt();
4058
4059 Val += 12;
4060 return MatchOperand_Success;
4061 }
4062
4063 if (Str.startswith("param")) {
4064 Str = Str.drop_front(5);
4065 if (Str.getAsInteger(10, Val))
4066 return MatchOperand_ParseFail;
4067
4068 if (Val >= 32)
4069 errorExpTgt();
4070
4071 Val += 32;
4072 return MatchOperand_Success;
4073 }
4074
4075 if (Str.startswith("invalid_target_")) {
4076 Str = Str.drop_front(15);
4077 if (Str.getAsInteger(10, Val))
4078 return MatchOperand_ParseFail;
4079
4080 errorExpTgt();
4081 return MatchOperand_Success;
4082 }
4083
4084 return MatchOperand_NoMatch;
4085}
4086
4087OperandMatchResultTy AMDGPUAsmParser::parseExpTgt(OperandVector &Operands) {
4088 uint8_t Val;
4089 StringRef Str = Parser.getTok().getString();
4090
4091 auto Res = parseExpTgtImpl(Str, Val);
4092 if (Res != MatchOperand_Success)
4093 return Res;
4094
4095 SMLoc S = Parser.getTok().getLoc();
4096 Parser.Lex();
4097
4098 Operands.push_back(AMDGPUOperand::CreateImm(this, Val, S,
4099 AMDGPUOperand::ImmTyExpTgt));
4100 return MatchOperand_Success;
4101}
4102
Alex Bradbury58eba092016-11-01 16:32:05 +00004103OperandMatchResultTy
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004104AMDGPUAsmParser::parseSendMsgOp(OperandVector &Operands) {
4105 using namespace llvm::AMDGPU::SendMsg;
4106
4107 int64_t Imm16Val = 0;
4108 SMLoc S = Parser.getTok().getLoc();
4109
4110 switch(getLexer().getKind()) {
4111 default:
4112 return MatchOperand_NoMatch;
4113 case AsmToken::Integer:
4114 // The operand can be an integer value.
4115 if (getParser().parseAbsoluteExpression(Imm16Val))
4116 return MatchOperand_NoMatch;
Artem Tamazov6edc1352016-05-26 17:00:33 +00004117 if (Imm16Val < 0 || !isUInt<16>(Imm16Val)) {
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004118 Error(S, "invalid immediate: only 16-bit values are legal");
4119 // Do not return error code, but create an imm operand anyway and proceed
4120 // to the next operand, if any. That avoids unneccessary error messages.
4121 }
4122 break;
4123 case AsmToken::Identifier: {
4124 OperandInfoTy Msg(ID_UNKNOWN_);
4125 OperandInfoTy Operation(OP_UNKNOWN_);
Artem Tamazov6edc1352016-05-26 17:00:33 +00004126 int64_t StreamId = STREAM_ID_DEFAULT_;
4127 if (parseSendMsgConstruct(Msg, Operation, StreamId))
4128 return MatchOperand_ParseFail;
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004129 do {
4130 // Validate and encode message ID.
4131 if (! ((ID_INTERRUPT <= Msg.Id && Msg.Id <= ID_GS_DONE)
4132 || Msg.Id == ID_SYSMSG)) {
4133 if (Msg.IsSymbolic)
4134 Error(S, "invalid/unsupported symbolic name of message");
4135 else
4136 Error(S, "invalid/unsupported code of message");
4137 break;
4138 }
Artem Tamazov6edc1352016-05-26 17:00:33 +00004139 Imm16Val = (Msg.Id << ID_SHIFT_);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004140 // Validate and encode operation ID.
4141 if (Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) {
4142 if (! (OP_GS_FIRST_ <= Operation.Id && Operation.Id < OP_GS_LAST_)) {
4143 if (Operation.IsSymbolic)
4144 Error(S, "invalid symbolic name of GS_OP");
4145 else
4146 Error(S, "invalid code of GS_OP: only 2-bit values are legal");
4147 break;
4148 }
4149 if (Operation.Id == OP_GS_NOP
4150 && Msg.Id != ID_GS_DONE) {
4151 Error(S, "invalid GS_OP: NOP is for GS_DONE only");
4152 break;
4153 }
4154 Imm16Val |= (Operation.Id << OP_SHIFT_);
4155 }
4156 if (Msg.Id == ID_SYSMSG) {
4157 if (! (OP_SYS_FIRST_ <= Operation.Id && Operation.Id < OP_SYS_LAST_)) {
4158 if (Operation.IsSymbolic)
4159 Error(S, "invalid/unsupported symbolic name of SYSMSG_OP");
4160 else
4161 Error(S, "invalid/unsupported code of SYSMSG_OP");
4162 break;
4163 }
4164 Imm16Val |= (Operation.Id << OP_SHIFT_);
4165 }
4166 // Validate and encode stream ID.
4167 if ((Msg.Id == ID_GS || Msg.Id == ID_GS_DONE) && Operation.Id != OP_GS_NOP) {
4168 if (! (STREAM_ID_FIRST_ <= StreamId && StreamId < STREAM_ID_LAST_)) {
4169 Error(S, "invalid stream id: only 2-bit values are legal");
4170 break;
4171 }
4172 Imm16Val |= (StreamId << STREAM_ID_SHIFT_);
4173 }
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00004174 } while (false);
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004175 }
4176 break;
4177 }
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004178 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm16Val, S, AMDGPUOperand::ImmTySendMsg));
Artem Tamazovebe71ce2016-05-06 17:48:48 +00004179 return MatchOperand_Success;
4180}
4181
4182bool AMDGPUOperand::isSendMsg() const {
4183 return isImmTy(ImmTySendMsg);
4184}
4185
Tom Stellard45bb48e2015-06-13 03:28:10 +00004186//===----------------------------------------------------------------------===//
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004187// parser helpers
4188//===----------------------------------------------------------------------===//
4189
4190bool
4191AMDGPUAsmParser::trySkipId(const StringRef Id) {
4192 if (getLexer().getKind() == AsmToken::Identifier &&
4193 Parser.getTok().getString() == Id) {
4194 Parser.Lex();
4195 return true;
4196 }
4197 return false;
4198}
4199
4200bool
4201AMDGPUAsmParser::trySkipToken(const AsmToken::TokenKind Kind) {
4202 if (getLexer().getKind() == Kind) {
4203 Parser.Lex();
4204 return true;
4205 }
4206 return false;
4207}
4208
4209bool
4210AMDGPUAsmParser::skipToken(const AsmToken::TokenKind Kind,
4211 const StringRef ErrMsg) {
4212 if (!trySkipToken(Kind)) {
4213 Error(Parser.getTok().getLoc(), ErrMsg);
4214 return false;
4215 }
4216 return true;
4217}
4218
4219bool
4220AMDGPUAsmParser::parseExpr(int64_t &Imm) {
4221 return !getParser().parseAbsoluteExpression(Imm);
4222}
4223
4224bool
4225AMDGPUAsmParser::parseString(StringRef &Val, const StringRef ErrMsg) {
4226 SMLoc S = Parser.getTok().getLoc();
4227 if (getLexer().getKind() == AsmToken::String) {
4228 Val = Parser.getTok().getStringContents();
4229 Parser.Lex();
4230 return true;
4231 } else {
4232 Error(S, ErrMsg);
4233 return false;
4234 }
4235}
4236
4237//===----------------------------------------------------------------------===//
4238// swizzle
4239//===----------------------------------------------------------------------===//
4240
4241LLVM_READNONE
4242static unsigned
4243encodeBitmaskPerm(const unsigned AndMask,
4244 const unsigned OrMask,
4245 const unsigned XorMask) {
4246 using namespace llvm::AMDGPU::Swizzle;
4247
4248 return BITMASK_PERM_ENC |
4249 (AndMask << BITMASK_AND_SHIFT) |
4250 (OrMask << BITMASK_OR_SHIFT) |
4251 (XorMask << BITMASK_XOR_SHIFT);
4252}
4253
4254bool
4255AMDGPUAsmParser::parseSwizzleOperands(const unsigned OpNum, int64_t* Op,
4256 const unsigned MinVal,
4257 const unsigned MaxVal,
4258 const StringRef ErrMsg) {
4259 for (unsigned i = 0; i < OpNum; ++i) {
4260 if (!skipToken(AsmToken::Comma, "expected a comma")){
4261 return false;
4262 }
4263 SMLoc ExprLoc = Parser.getTok().getLoc();
4264 if (!parseExpr(Op[i])) {
4265 return false;
4266 }
4267 if (Op[i] < MinVal || Op[i] > MaxVal) {
4268 Error(ExprLoc, ErrMsg);
4269 return false;
4270 }
4271 }
4272
4273 return true;
4274}
4275
4276bool
4277AMDGPUAsmParser::parseSwizzleQuadPerm(int64_t &Imm) {
4278 using namespace llvm::AMDGPU::Swizzle;
4279
4280 int64_t Lane[LANE_NUM];
4281 if (parseSwizzleOperands(LANE_NUM, Lane, 0, LANE_MAX,
4282 "expected a 2-bit lane id")) {
4283 Imm = QUAD_PERM_ENC;
4284 for (auto i = 0; i < LANE_NUM; ++i) {
4285 Imm |= Lane[i] << (LANE_SHIFT * i);
4286 }
4287 return true;
4288 }
4289 return false;
4290}
4291
4292bool
4293AMDGPUAsmParser::parseSwizzleBroadcast(int64_t &Imm) {
4294 using namespace llvm::AMDGPU::Swizzle;
4295
4296 SMLoc S = Parser.getTok().getLoc();
4297 int64_t GroupSize;
4298 int64_t LaneIdx;
4299
4300 if (!parseSwizzleOperands(1, &GroupSize,
4301 2, 32,
4302 "group size must be in the interval [2,32]")) {
4303 return false;
4304 }
4305 if (!isPowerOf2_64(GroupSize)) {
4306 Error(S, "group size must be a power of two");
4307 return false;
4308 }
4309 if (parseSwizzleOperands(1, &LaneIdx,
4310 0, GroupSize - 1,
4311 "lane id must be in the interval [0,group size - 1]")) {
4312 Imm = encodeBitmaskPerm(BITMASK_MAX - GroupSize + 1, LaneIdx, 0);
4313 return true;
4314 }
4315 return false;
4316}
4317
4318bool
4319AMDGPUAsmParser::parseSwizzleReverse(int64_t &Imm) {
4320 using namespace llvm::AMDGPU::Swizzle;
4321
4322 SMLoc S = Parser.getTok().getLoc();
4323 int64_t GroupSize;
4324
4325 if (!parseSwizzleOperands(1, &GroupSize,
4326 2, 32, "group size must be in the interval [2,32]")) {
4327 return false;
4328 }
4329 if (!isPowerOf2_64(GroupSize)) {
4330 Error(S, "group size must be a power of two");
4331 return false;
4332 }
4333
4334 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize - 1);
4335 return true;
4336}
4337
4338bool
4339AMDGPUAsmParser::parseSwizzleSwap(int64_t &Imm) {
4340 using namespace llvm::AMDGPU::Swizzle;
4341
4342 SMLoc S = Parser.getTok().getLoc();
4343 int64_t GroupSize;
4344
4345 if (!parseSwizzleOperands(1, &GroupSize,
4346 1, 16, "group size must be in the interval [1,16]")) {
4347 return false;
4348 }
4349 if (!isPowerOf2_64(GroupSize)) {
4350 Error(S, "group size must be a power of two");
4351 return false;
4352 }
4353
4354 Imm = encodeBitmaskPerm(BITMASK_MAX, 0, GroupSize);
4355 return true;
4356}
4357
4358bool
4359AMDGPUAsmParser::parseSwizzleBitmaskPerm(int64_t &Imm) {
4360 using namespace llvm::AMDGPU::Swizzle;
4361
4362 if (!skipToken(AsmToken::Comma, "expected a comma")) {
4363 return false;
4364 }
4365
4366 StringRef Ctl;
4367 SMLoc StrLoc = Parser.getTok().getLoc();
4368 if (!parseString(Ctl)) {
4369 return false;
4370 }
4371 if (Ctl.size() != BITMASK_WIDTH) {
4372 Error(StrLoc, "expected a 5-character mask");
4373 return false;
4374 }
4375
4376 unsigned AndMask = 0;
4377 unsigned OrMask = 0;
4378 unsigned XorMask = 0;
4379
4380 for (size_t i = 0; i < Ctl.size(); ++i) {
4381 unsigned Mask = 1 << (BITMASK_WIDTH - 1 - i);
4382 switch(Ctl[i]) {
4383 default:
4384 Error(StrLoc, "invalid mask");
4385 return false;
4386 case '0':
4387 break;
4388 case '1':
4389 OrMask |= Mask;
4390 break;
4391 case 'p':
4392 AndMask |= Mask;
4393 break;
4394 case 'i':
4395 AndMask |= Mask;
4396 XorMask |= Mask;
4397 break;
4398 }
4399 }
4400
4401 Imm = encodeBitmaskPerm(AndMask, OrMask, XorMask);
4402 return true;
4403}
4404
4405bool
4406AMDGPUAsmParser::parseSwizzleOffset(int64_t &Imm) {
4407
4408 SMLoc OffsetLoc = Parser.getTok().getLoc();
4409
4410 if (!parseExpr(Imm)) {
4411 return false;
4412 }
4413 if (!isUInt<16>(Imm)) {
4414 Error(OffsetLoc, "expected a 16-bit offset");
4415 return false;
4416 }
4417 return true;
4418}
4419
4420bool
4421AMDGPUAsmParser::parseSwizzleMacro(int64_t &Imm) {
4422 using namespace llvm::AMDGPU::Swizzle;
4423
4424 if (skipToken(AsmToken::LParen, "expected a left parentheses")) {
4425
4426 SMLoc ModeLoc = Parser.getTok().getLoc();
4427 bool Ok = false;
4428
4429 if (trySkipId(IdSymbolic[ID_QUAD_PERM])) {
4430 Ok = parseSwizzleQuadPerm(Imm);
4431 } else if (trySkipId(IdSymbolic[ID_BITMASK_PERM])) {
4432 Ok = parseSwizzleBitmaskPerm(Imm);
4433 } else if (trySkipId(IdSymbolic[ID_BROADCAST])) {
4434 Ok = parseSwizzleBroadcast(Imm);
4435 } else if (trySkipId(IdSymbolic[ID_SWAP])) {
4436 Ok = parseSwizzleSwap(Imm);
4437 } else if (trySkipId(IdSymbolic[ID_REVERSE])) {
4438 Ok = parseSwizzleReverse(Imm);
4439 } else {
4440 Error(ModeLoc, "expected a swizzle mode");
4441 }
4442
4443 return Ok && skipToken(AsmToken::RParen, "expected a closing parentheses");
4444 }
4445
4446 return false;
4447}
4448
4449OperandMatchResultTy
4450AMDGPUAsmParser::parseSwizzleOp(OperandVector &Operands) {
4451 SMLoc S = Parser.getTok().getLoc();
4452 int64_t Imm = 0;
4453
4454 if (trySkipId("offset")) {
4455
4456 bool Ok = false;
4457 if (skipToken(AsmToken::Colon, "expected a colon")) {
4458 if (trySkipId("swizzle")) {
4459 Ok = parseSwizzleMacro(Imm);
4460 } else {
4461 Ok = parseSwizzleOffset(Imm);
4462 }
4463 }
4464
4465 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S, AMDGPUOperand::ImmTySwizzle));
4466
4467 return Ok? MatchOperand_Success : MatchOperand_ParseFail;
4468 } else {
Dmitry Preobrazhenskyc5b0c172017-12-22 17:13:28 +00004469 // Swizzle "offset" operand is optional.
4470 // If it is omitted, try parsing other optional operands.
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004471 return parseOptionalOpr(Operands);
Dmitry Preobrazhensky793c5922017-05-31 16:26:47 +00004472 }
4473}
4474
4475bool
4476AMDGPUOperand::isSwizzle() const {
4477 return isImmTy(ImmTySwizzle);
4478}
4479
4480//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004481// sopp branch targets
4482//===----------------------------------------------------------------------===//
4483
Alex Bradbury58eba092016-11-01 16:32:05 +00004484OperandMatchResultTy
Tom Stellard45bb48e2015-06-13 03:28:10 +00004485AMDGPUAsmParser::parseSOppBrTarget(OperandVector &Operands) {
4486 SMLoc S = Parser.getTok().getLoc();
4487
4488 switch (getLexer().getKind()) {
4489 default: return MatchOperand_ParseFail;
4490 case AsmToken::Integer: {
4491 int64_t Imm;
4492 if (getParser().parseAbsoluteExpression(Imm))
4493 return MatchOperand_ParseFail;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004494 Operands.push_back(AMDGPUOperand::CreateImm(this, Imm, S));
Tom Stellard45bb48e2015-06-13 03:28:10 +00004495 return MatchOperand_Success;
4496 }
4497
4498 case AsmToken::Identifier:
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004499 Operands.push_back(AMDGPUOperand::CreateExpr(this,
Tom Stellard45bb48e2015-06-13 03:28:10 +00004500 MCSymbolRefExpr::create(getContext().getOrCreateSymbol(
4501 Parser.getTok().getString()), getContext()), S));
4502 Parser.Lex();
4503 return MatchOperand_Success;
4504 }
4505}
4506
4507//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004508// mubuf
4509//===----------------------------------------------------------------------===//
4510
Sam Kolton5f10a132016-05-06 11:31:17 +00004511AMDGPUOperand::Ptr AMDGPUAsmParser::defaultGLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004512 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyGLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004513}
4514
4515AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSLC() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004516 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTySLC);
Sam Kolton5f10a132016-05-06 11:31:17 +00004517}
4518
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004519void AMDGPUAsmParser::cvtMubufImpl(MCInst &Inst,
4520 const OperandVector &Operands,
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004521 bool IsAtomic,
4522 bool IsAtomicReturn,
4523 bool IsLds) {
4524 bool IsLdsOpcode = IsLds;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004525 bool HasLdsModifier = false;
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004526 OptionalImmIndexMap OptionalIdx;
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004527 assert(IsAtomicReturn ? IsAtomic : true);
Tom Stellard45bb48e2015-06-13 03:28:10 +00004528
4529 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4530 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4531
4532 // Add the register arguments
4533 if (Op.isReg()) {
4534 Op.addRegOperands(Inst, 1);
4535 continue;
4536 }
4537
4538 // Handle the case where soffset is an immediate
4539 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4540 Op.addImmOperands(Inst, 1);
4541 continue;
4542 }
4543
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004544 HasLdsModifier = Op.isLDS();
4545
Tom Stellard45bb48e2015-06-13 03:28:10 +00004546 // Handle tokens like 'offen' which are sometimes hard-coded into the
4547 // asm string. There are no MCInst operands for these.
4548 if (Op.isToken()) {
4549 continue;
4550 }
4551 assert(Op.isImm());
4552
4553 // Handle optional arguments
4554 OptionalIdx[Op.getImmTy()] = i;
4555 }
4556
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004557 // This is a workaround for an llvm quirk which may result in an
4558 // incorrect instruction selection. Lds and non-lds versions of
4559 // MUBUF instructions are identical except that lds versions
4560 // have mandatory 'lds' modifier. However this modifier follows
4561 // optional modifiers and llvm asm matcher regards this 'lds'
4562 // modifier as an optional one. As a result, an lds version
4563 // of opcode may be selected even if it has no 'lds' modifier.
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004564 if (IsLdsOpcode && !HasLdsModifier) {
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004565 int NoLdsOpcode = AMDGPU::getMUBUFNoLdsInst(Inst.getOpcode());
4566 if (NoLdsOpcode != -1) { // Got lds version - correct it.
4567 Inst.setOpcode(NoLdsOpcode);
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004568 IsLdsOpcode = false;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004569 }
4570 }
4571
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004572 // Copy $vdata_in operand and insert as $vdata for MUBUF_Atomic RTN insns.
4573 if (IsAtomicReturn) {
4574 MCInst::iterator I = Inst.begin(); // $vdata_in is always at the beginning.
4575 Inst.insert(I, *I);
4576 }
4577
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004578 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOffset);
Artem Tamazov8ce1f712016-05-19 12:22:39 +00004579 if (!IsAtomic) { // glc is hard-coded.
4580 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4581 }
Nikolay Haustov2e4c7292016-02-25 10:58:54 +00004582 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004583
Dmitry Preobrazhenskyd98c97b2018-03-12 17:29:24 +00004584 if (!IsLdsOpcode) { // tfe is not legal with lds opcodes
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004585 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4586 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004587}
4588
David Stuttard70e8bc12017-06-22 16:29:22 +00004589void AMDGPUAsmParser::cvtMtbuf(MCInst &Inst, const OperandVector &Operands) {
4590 OptionalImmIndexMap OptionalIdx;
4591
4592 for (unsigned i = 1, e = Operands.size(); i != e; ++i) {
4593 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[i]);
4594
4595 // Add the register arguments
4596 if (Op.isReg()) {
4597 Op.addRegOperands(Inst, 1);
4598 continue;
4599 }
4600
4601 // Handle the case where soffset is an immediate
4602 if (Op.isImm() && Op.getImmTy() == AMDGPUOperand::ImmTyNone) {
4603 Op.addImmOperands(Inst, 1);
4604 continue;
4605 }
4606
4607 // Handle tokens like 'offen' which are sometimes hard-coded into the
4608 // asm string. There are no MCInst operands for these.
4609 if (Op.isToken()) {
4610 continue;
4611 }
4612 assert(Op.isImm());
4613
4614 // Handle optional arguments
4615 OptionalIdx[Op.getImmTy()] = i;
4616 }
4617
4618 addOptionalImmOperand(Inst, Operands, OptionalIdx,
4619 AMDGPUOperand::ImmTyOffset);
4620 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDFMT);
4621 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyNFMT);
4622 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
4623 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
4624 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4625}
4626
Tom Stellard45bb48e2015-06-13 03:28:10 +00004627//===----------------------------------------------------------------------===//
4628// mimg
4629//===----------------------------------------------------------------------===//
4630
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004631void AMDGPUAsmParser::cvtMIMG(MCInst &Inst, const OperandVector &Operands,
4632 bool IsAtomic) {
Sam Kolton1bdcef72016-05-23 09:59:02 +00004633 unsigned I = 1;
4634 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4635 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4636 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4637 }
4638
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004639 if (IsAtomic) {
4640 // Add src, same as dst
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004641 assert(Desc.getNumDefs() == 1);
4642 ((AMDGPUOperand &)*Operands[I - 1]).addRegOperands(Inst, 1);
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004643 }
4644
Sam Kolton1bdcef72016-05-23 09:59:02 +00004645 OptionalImmIndexMap OptionalIdx;
4646
4647 for (unsigned E = Operands.size(); I != E; ++I) {
4648 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4649
4650 // Add the register arguments
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004651 if (Op.isReg()) {
4652 Op.addRegOperands(Inst, 1);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004653 } else if (Op.isImmModifier()) {
4654 OptionalIdx[Op.getImmTy()] = I;
4655 } else {
Matt Arsenault92b355b2016-11-15 19:34:37 +00004656 llvm_unreachable("unexpected operand type");
Sam Kolton1bdcef72016-05-23 09:59:02 +00004657 }
4658 }
4659
4660 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDMask);
4661 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyUNorm);
4662 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyGLC);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004663 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySLC);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004664 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyR128);
4665 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyTFE);
4666 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyLWE);
Dmitry Preobrazhensky0e074e32018-01-19 13:49:53 +00004667 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDA);
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004668 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyD16);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004669}
4670
4671void AMDGPUAsmParser::cvtMIMGAtomic(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004672 cvtMIMG(Inst, Operands, true);
Sam Kolton1bdcef72016-05-23 09:59:02 +00004673}
4674
Tom Stellard45bb48e2015-06-13 03:28:10 +00004675//===----------------------------------------------------------------------===//
Tom Stellard217361c2015-08-06 19:28:38 +00004676// smrd
4677//===----------------------------------------------------------------------===//
4678
Artem Tamazov54bfd542016-10-31 16:07:39 +00004679bool AMDGPUOperand::isSMRDOffset8() const {
Tom Stellard217361c2015-08-06 19:28:38 +00004680 return isImm() && isUInt<8>(getImm());
4681}
4682
Artem Tamazov54bfd542016-10-31 16:07:39 +00004683bool AMDGPUOperand::isSMRDOffset20() const {
4684 return isImm() && isUInt<20>(getImm());
4685}
4686
Tom Stellard217361c2015-08-06 19:28:38 +00004687bool AMDGPUOperand::isSMRDLiteralOffset() const {
4688 // 32-bit literals are only supported on CI and we only want to use them
4689 // when the offset is > 8-bits.
4690 return isImm() && !isUInt<8>(getImm()) && isUInt<32>(getImm());
4691}
4692
Artem Tamazov54bfd542016-10-31 16:07:39 +00004693AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset8() const {
4694 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4695}
4696
4697AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDOffset20() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004698 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00004699}
4700
4701AMDGPUOperand::Ptr AMDGPUAsmParser::defaultSMRDLiteralOffset() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00004702 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
Sam Kolton5f10a132016-05-06 11:31:17 +00004703}
4704
Matt Arsenaultfd023142017-06-12 15:55:58 +00004705AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetU12() const {
4706 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4707}
4708
Matt Arsenault9698f1c2017-06-20 19:54:14 +00004709AMDGPUOperand::Ptr AMDGPUAsmParser::defaultOffsetS13() const {
4710 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyOffset);
4711}
4712
Tom Stellard217361c2015-08-06 19:28:38 +00004713//===----------------------------------------------------------------------===//
Tom Stellard45bb48e2015-06-13 03:28:10 +00004714// vop3
4715//===----------------------------------------------------------------------===//
4716
4717static bool ConvertOmodMul(int64_t &Mul) {
4718 if (Mul != 1 && Mul != 2 && Mul != 4)
4719 return false;
4720
4721 Mul >>= 1;
4722 return true;
4723}
4724
4725static bool ConvertOmodDiv(int64_t &Div) {
4726 if (Div == 1) {
4727 Div = 0;
4728 return true;
4729 }
4730
4731 if (Div == 2) {
4732 Div = 3;
4733 return true;
4734 }
4735
4736 return false;
4737}
4738
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004739static bool ConvertBoundCtrl(int64_t &BoundCtrl) {
4740 if (BoundCtrl == 0) {
4741 BoundCtrl = 1;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004742 return true;
Matt Arsenault12c53892016-11-15 19:58:54 +00004743 }
4744
4745 if (BoundCtrl == -1) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004746 BoundCtrl = 0;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004747 return true;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004748 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004749
Tom Stellard45bb48e2015-06-13 03:28:10 +00004750 return false;
4751}
4752
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004753// Note: the order in this table matches the order of operands in AsmString.
Sam Kolton11de3702016-05-24 12:38:33 +00004754static const OptionalOperand AMDGPUOptionalOperandTable[] = {
4755 {"offen", AMDGPUOperand::ImmTyOffen, true, nullptr},
4756 {"idxen", AMDGPUOperand::ImmTyIdxen, true, nullptr},
4757 {"addr64", AMDGPUOperand::ImmTyAddr64, true, nullptr},
4758 {"offset0", AMDGPUOperand::ImmTyOffset0, false, nullptr},
4759 {"offset1", AMDGPUOperand::ImmTyOffset1, false, nullptr},
4760 {"gds", AMDGPUOperand::ImmTyGDS, true, nullptr},
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00004761 {"lds", AMDGPUOperand::ImmTyLDS, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004762 {"offset", AMDGPUOperand::ImmTyOffset, false, nullptr},
Dmitry Preobrazhenskydd2f1c92017-11-24 13:22:38 +00004763 {"inst_offset", AMDGPUOperand::ImmTyInstOffset, false, nullptr},
David Stuttard70e8bc12017-06-22 16:29:22 +00004764 {"dfmt", AMDGPUOperand::ImmTyDFMT, false, nullptr},
4765 {"nfmt", AMDGPUOperand::ImmTyNFMT, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004766 {"glc", AMDGPUOperand::ImmTyGLC, true, nullptr},
4767 {"slc", AMDGPUOperand::ImmTySLC, true, nullptr},
4768 {"tfe", AMDGPUOperand::ImmTyTFE, true, nullptr},
Dmitry Preobrazhensky4f321ae2018-01-29 14:20:42 +00004769 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004770 {"high", AMDGPUOperand::ImmTyHigh, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004771 {"clamp", AMDGPUOperand::ImmTyClampSI, true, nullptr},
4772 {"omod", AMDGPUOperand::ImmTyOModSI, false, ConvertOmodMul},
4773 {"unorm", AMDGPUOperand::ImmTyUNorm, true, nullptr},
4774 {"da", AMDGPUOperand::ImmTyDA, true, nullptr},
4775 {"r128", AMDGPUOperand::ImmTyR128, true, nullptr},
4776 {"lwe", AMDGPUOperand::ImmTyLWE, true, nullptr},
Nicolai Haehnlef2674312018-06-21 13:36:01 +00004777 {"d16", AMDGPUOperand::ImmTyD16, true, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004778 {"dmask", AMDGPUOperand::ImmTyDMask, false, nullptr},
4779 {"row_mask", AMDGPUOperand::ImmTyDppRowMask, false, nullptr},
4780 {"bank_mask", AMDGPUOperand::ImmTyDppBankMask, false, nullptr},
4781 {"bound_ctrl", AMDGPUOperand::ImmTyDppBoundCtrl, false, ConvertBoundCtrl},
Sam Kolton05ef1c92016-06-03 10:27:37 +00004782 {"dst_sel", AMDGPUOperand::ImmTySdwaDstSel, false, nullptr},
4783 {"src0_sel", AMDGPUOperand::ImmTySdwaSrc0Sel, false, nullptr},
4784 {"src1_sel", AMDGPUOperand::ImmTySdwaSrc1Sel, false, nullptr},
Sam Kolton11de3702016-05-24 12:38:33 +00004785 {"dst_unused", AMDGPUOperand::ImmTySdwaDstUnused, false, nullptr},
Dmitry Preobrazhensky9321e8f2017-05-19 13:36:09 +00004786 {"compr", AMDGPUOperand::ImmTyExpCompr, true, nullptr },
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00004787 {"vm", AMDGPUOperand::ImmTyExpVM, true, nullptr},
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004788 {"op_sel", AMDGPUOperand::ImmTyOpSel, false, nullptr},
4789 {"op_sel_hi", AMDGPUOperand::ImmTyOpSelHi, false, nullptr},
4790 {"neg_lo", AMDGPUOperand::ImmTyNegLo, false, nullptr},
4791 {"neg_hi", AMDGPUOperand::ImmTyNegHi, false, nullptr}
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004792};
Tom Stellard45bb48e2015-06-13 03:28:10 +00004793
Alex Bradbury58eba092016-11-01 16:32:05 +00004794OperandMatchResultTy AMDGPUAsmParser::parseOptionalOperand(OperandVector &Operands) {
Dmitry Preobrazhensky414e0532017-12-29 13:55:11 +00004795 unsigned size = Operands.size();
4796 assert(size > 0);
4797
4798 OperandMatchResultTy res = parseOptionalOpr(Operands);
4799
4800 // This is a hack to enable hardcoded mandatory operands which follow
4801 // optional operands.
4802 //
4803 // Current design assumes that all operands after the first optional operand
4804 // are also optional. However implementation of some instructions violates
4805 // this rule (see e.g. flat/global atomic which have hardcoded 'glc' operands).
4806 //
4807 // To alleviate this problem, we have to (implicitly) parse extra operands
4808 // to make sure autogenerated parser of custom operands never hit hardcoded
4809 // mandatory operands.
4810
4811 if (size == 1 || ((AMDGPUOperand &)*Operands[size - 1]).isRegKind()) {
4812
4813 // We have parsed the first optional operand.
4814 // Parse as many operands as necessary to skip all mandatory operands.
4815
4816 for (unsigned i = 0; i < MAX_OPR_LOOKAHEAD; ++i) {
4817 if (res != MatchOperand_Success ||
4818 getLexer().is(AsmToken::EndOfStatement)) break;
4819 if (getLexer().is(AsmToken::Comma)) Parser.Lex();
4820 res = parseOptionalOpr(Operands);
4821 }
4822 }
4823
4824 return res;
4825}
4826
4827OperandMatchResultTy AMDGPUAsmParser::parseOptionalOpr(OperandVector &Operands) {
Sam Kolton11de3702016-05-24 12:38:33 +00004828 OperandMatchResultTy res;
4829 for (const OptionalOperand &Op : AMDGPUOptionalOperandTable) {
4830 // try to parse any optional operand here
4831 if (Op.IsBit) {
4832 res = parseNamedBit(Op.Name, Operands, Op.Type);
4833 } else if (Op.Type == AMDGPUOperand::ImmTyOModSI) {
4834 res = parseOModOperand(Operands);
Sam Kolton05ef1c92016-06-03 10:27:37 +00004835 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstSel ||
4836 Op.Type == AMDGPUOperand::ImmTySdwaSrc0Sel ||
4837 Op.Type == AMDGPUOperand::ImmTySdwaSrc1Sel) {
4838 res = parseSDWASel(Operands, Op.Name, Op.Type);
Sam Kolton11de3702016-05-24 12:38:33 +00004839 } else if (Op.Type == AMDGPUOperand::ImmTySdwaDstUnused) {
4840 res = parseSDWADstUnused(Operands);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004841 } else if (Op.Type == AMDGPUOperand::ImmTyOpSel ||
4842 Op.Type == AMDGPUOperand::ImmTyOpSelHi ||
4843 Op.Type == AMDGPUOperand::ImmTyNegLo ||
4844 Op.Type == AMDGPUOperand::ImmTyNegHi) {
4845 res = parseOperandArrayWithPrefix(Op.Name, Operands, Op.Type,
4846 Op.ConvertResult);
Sam Kolton11de3702016-05-24 12:38:33 +00004847 } else {
4848 res = parseIntWithPrefix(Op.Name, Operands, Op.Type, Op.ConvertResult);
4849 }
4850 if (res != MatchOperand_NoMatch) {
4851 return res;
Tom Stellard45bb48e2015-06-13 03:28:10 +00004852 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004853 }
4854 return MatchOperand_NoMatch;
4855}
4856
Matt Arsenault12c53892016-11-15 19:58:54 +00004857OperandMatchResultTy AMDGPUAsmParser::parseOModOperand(OperandVector &Operands) {
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004858 StringRef Name = Parser.getTok().getString();
4859 if (Name == "mul") {
Matt Arsenault12c53892016-11-15 19:58:54 +00004860 return parseIntWithPrefix("mul", Operands,
4861 AMDGPUOperand::ImmTyOModSI, ConvertOmodMul);
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004862 }
Matt Arsenault12c53892016-11-15 19:58:54 +00004863
4864 if (Name == "div") {
4865 return parseIntWithPrefix("div", Operands,
4866 AMDGPUOperand::ImmTyOModSI, ConvertOmodDiv);
4867 }
4868
4869 return MatchOperand_NoMatch;
Nikolay Haustov4f672a32016-04-29 09:02:30 +00004870}
4871
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00004872void AMDGPUAsmParser::cvtVOP3OpSel(MCInst &Inst, const OperandVector &Operands) {
4873 cvtVOP3P(Inst, Operands);
4874
4875 int Opc = Inst.getOpcode();
4876
4877 int SrcNum;
4878 const int Ops[] = { AMDGPU::OpName::src0,
4879 AMDGPU::OpName::src1,
4880 AMDGPU::OpName::src2 };
4881 for (SrcNum = 0;
4882 SrcNum < 3 && AMDGPU::getNamedOperandIdx(Opc, Ops[SrcNum]) != -1;
4883 ++SrcNum);
4884 assert(SrcNum > 0);
4885
4886 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
4887 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
4888
4889 if ((OpSel & (1 << SrcNum)) != 0) {
4890 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers);
4891 uint32_t ModVal = Inst.getOperand(ModIdx).getImm();
4892 Inst.getOperand(ModIdx).setImm(ModVal | SISrcMods::DST_OP_SEL);
4893 }
4894}
4895
Sam Koltona3ec5c12016-10-07 14:46:06 +00004896static bool isRegOrImmWithInputMods(const MCInstrDesc &Desc, unsigned OpNum) {
4897 // 1. This operand is input modifiers
4898 return Desc.OpInfo[OpNum].OperandType == AMDGPU::OPERAND_INPUT_MODS
4899 // 2. This is not last operand
4900 && Desc.NumOperands > (OpNum + 1)
4901 // 3. Next operand is register class
4902 && Desc.OpInfo[OpNum + 1].RegClass != -1
4903 // 4. Next register is not tied to any other operand
4904 && Desc.getOperandConstraint(OpNum + 1, MCOI::OperandConstraint::TIED_TO) == -1;
4905}
4906
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00004907void AMDGPUAsmParser::cvtVOP3Interp(MCInst &Inst, const OperandVector &Operands)
4908{
Dmitry Preobrazhensky50805a02017-08-07 13:14:12 +00004909 OptionalImmIndexMap OptionalIdx;
4910 unsigned Opc = Inst.getOpcode();
4911
4912 unsigned I = 1;
4913 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
4914 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
4915 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
4916 }
4917
4918 for (unsigned E = Operands.size(); I != E; ++I) {
4919 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4920 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4921 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4922 } else if (Op.isInterpSlot() ||
4923 Op.isInterpAttr() ||
4924 Op.isAttrChan()) {
4925 Inst.addOperand(MCOperand::createImm(Op.Imm.Val));
4926 } else if (Op.isImmModifier()) {
4927 OptionalIdx[Op.getImmTy()] = I;
4928 } else {
4929 llvm_unreachable("unhandled operand type");
4930 }
4931 }
4932
4933 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::high) != -1) {
4934 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyHigh);
4935 }
4936
4937 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4938 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4939 }
4940
4941 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4942 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4943 }
4944}
4945
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004946void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands,
4947 OptionalImmIndexMap &OptionalIdx) {
4948 unsigned Opc = Inst.getOpcode();
4949
Tom Stellarda90b9522016-02-11 03:28:15 +00004950 unsigned I = 1;
4951 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
Tom Stellarde9934512016-02-11 18:25:26 +00004952 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
Tom Stellarda90b9522016-02-11 03:28:15 +00004953 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
Tom Stellard88e0b252015-10-06 15:57:53 +00004954 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004955
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004956 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src0_modifiers) != -1) {
4957 // This instruction has src modifiers
4958 for (unsigned E = Operands.size(); I != E; ++I) {
4959 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4960 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
4961 Op.addRegOrImmWithFPInputModsOperands(Inst, 2);
4962 } else if (Op.isImmModifier()) {
4963 OptionalIdx[Op.getImmTy()] = I;
4964 } else if (Op.isRegOrImm()) {
4965 Op.addRegOrImmOperands(Inst, 1);
4966 } else {
4967 llvm_unreachable("unhandled operand type");
4968 }
4969 }
4970 } else {
4971 // No src modifiers
4972 for (unsigned E = Operands.size(); I != E; ++I) {
4973 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
4974 if (Op.isMod()) {
4975 OptionalIdx[Op.getImmTy()] = I;
4976 } else {
4977 Op.addRegOrImmOperands(Inst, 1);
4978 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00004979 }
Tom Stellarda90b9522016-02-11 03:28:15 +00004980 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004981
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004982 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::clamp) != -1) {
4983 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI);
4984 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00004985
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004986 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::omod) != -1) {
4987 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI);
4988 }
Sam Koltona3ec5c12016-10-07 14:46:06 +00004989
Matt Arsenault0084adc2018-04-30 19:08:16 +00004990 // Special case v_mac_{f16, f32} and v_fmac_f32 (gfx906):
Sam Koltona3ec5c12016-10-07 14:46:06 +00004991 // it has src2 register operand that is tied to dst operand
4992 // we don't allow modifiers for this operand in assembler so src2_modifiers
Matt Arsenault0084adc2018-04-30 19:08:16 +00004993 // should be 0.
4994 if (Opc == AMDGPU::V_MAC_F32_e64_si ||
4995 Opc == AMDGPU::V_MAC_F32_e64_vi ||
4996 Opc == AMDGPU::V_MAC_F16_e64_vi ||
4997 Opc == AMDGPU::V_FMAC_F32_e64_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00004998 auto it = Inst.begin();
Sam Kolton10ac2fd2017-07-07 15:21:52 +00004999 std::advance(it, AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::src2_modifiers));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005000 it = Inst.insert(it, MCOperand::createImm(0)); // no modifiers for src2
5001 ++it;
5002 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
5003 }
Tom Stellard45bb48e2015-06-13 03:28:10 +00005004}
5005
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005006void AMDGPUAsmParser::cvtVOP3(MCInst &Inst, const OperandVector &Operands) {
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005007 OptionalImmIndexMap OptionalIdx;
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005008 cvtVOP3(Inst, Operands, OptionalIdx);
Dmitry Preobrazhenskyc512d442017-03-27 15:57:17 +00005009}
5010
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005011void AMDGPUAsmParser::cvtVOP3P(MCInst &Inst,
5012 const OperandVector &Operands) {
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005013 OptionalImmIndexMap OptIdx;
Dmitry Preobrazhensky682a6542017-11-17 15:15:40 +00005014 const int Opc = Inst.getOpcode();
5015 const MCInstrDesc &Desc = MII.get(Opc);
5016
5017 const bool IsPacked = (Desc.TSFlags & SIInstrFlags::IsPacked) != 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005018
Sam Kolton10ac2fd2017-07-07 15:21:52 +00005019 cvtVOP3(Inst, Operands, OptIdx);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005020
Matt Arsenaulte135c4c2017-09-20 20:53:49 +00005021 if (AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::vdst_in) != -1) {
5022 assert(!IsPacked);
5023 Inst.addOperand(Inst.getOperand(0));
5024 }
5025
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005026 // FIXME: This is messy. Parse the modifiers as if it was a normal VOP3
5027 // instruction, and then figure out where to actually put the modifiers
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005028
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005029 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSel);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005030
5031 int OpSelHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel_hi);
5032 if (OpSelHiIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005033 int DefaultVal = IsPacked ? -1 : 0;
5034 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyOpSelHi,
5035 DefaultVal);
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005036 }
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005037
5038 int NegLoIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_lo);
5039 if (NegLoIdx != -1) {
Matt Arsenaultc8f8cda2017-08-30 22:18:40 +00005040 assert(IsPacked);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005041 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegLo);
5042 addOptionalImmOperand(Inst, Operands, OptIdx, AMDGPUOperand::ImmTyNegHi);
5043 }
5044
5045 const int Ops[] = { AMDGPU::OpName::src0,
5046 AMDGPU::OpName::src1,
5047 AMDGPU::OpName::src2 };
5048 const int ModOps[] = { AMDGPU::OpName::src0_modifiers,
5049 AMDGPU::OpName::src1_modifiers,
5050 AMDGPU::OpName::src2_modifiers };
5051
5052 int OpSelIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::op_sel);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005053
5054 unsigned OpSel = Inst.getOperand(OpSelIdx).getImm();
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005055 unsigned OpSelHi = 0;
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005056 unsigned NegLo = 0;
5057 unsigned NegHi = 0;
5058
Dmitry Preobrazhenskyabf28392017-07-21 13:54:11 +00005059 if (OpSelHiIdx != -1) {
5060 OpSelHi = Inst.getOperand(OpSelHiIdx).getImm();
5061 }
5062
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005063 if (NegLoIdx != -1) {
5064 int NegHiIdx = AMDGPU::getNamedOperandIdx(Opc, AMDGPU::OpName::neg_hi);
5065 NegLo = Inst.getOperand(NegLoIdx).getImm();
5066 NegHi = Inst.getOperand(NegHiIdx).getImm();
5067 }
5068
5069 for (int J = 0; J < 3; ++J) {
5070 int OpIdx = AMDGPU::getNamedOperandIdx(Opc, Ops[J]);
5071 if (OpIdx == -1)
5072 break;
5073
5074 uint32_t ModVal = 0;
5075
5076 if ((OpSel & (1 << J)) != 0)
5077 ModVal |= SISrcMods::OP_SEL_0;
5078
5079 if ((OpSelHi & (1 << J)) != 0)
5080 ModVal |= SISrcMods::OP_SEL_1;
5081
5082 if ((NegLo & (1 << J)) != 0)
5083 ModVal |= SISrcMods::NEG;
5084
5085 if ((NegHi & (1 << J)) != 0)
5086 ModVal |= SISrcMods::NEG_HI;
5087
5088 int ModIdx = AMDGPU::getNamedOperandIdx(Opc, ModOps[J]);
5089
Dmitry Preobrazhenskyb2d24e22017-07-07 14:29:06 +00005090 Inst.getOperand(ModIdx).setImm(Inst.getOperand(ModIdx).getImm() | ModVal);
Matt Arsenault9be7b0d2017-02-27 18:49:11 +00005091 }
5092}
5093
Sam Koltondfa29f72016-03-09 12:29:31 +00005094//===----------------------------------------------------------------------===//
5095// dpp
5096//===----------------------------------------------------------------------===//
5097
5098bool AMDGPUOperand::isDPPCtrl() const {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005099 using namespace AMDGPU::DPP;
5100
Sam Koltondfa29f72016-03-09 12:29:31 +00005101 bool result = isImm() && getImmTy() == ImmTyDppCtrl && isUInt<9>(getImm());
5102 if (result) {
5103 int64_t Imm = getImm();
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005104 return (Imm >= DppCtrl::QUAD_PERM_FIRST && Imm <= DppCtrl::QUAD_PERM_LAST) ||
5105 (Imm >= DppCtrl::ROW_SHL_FIRST && Imm <= DppCtrl::ROW_SHL_LAST) ||
5106 (Imm >= DppCtrl::ROW_SHR_FIRST && Imm <= DppCtrl::ROW_SHR_LAST) ||
5107 (Imm >= DppCtrl::ROW_ROR_FIRST && Imm <= DppCtrl::ROW_ROR_LAST) ||
5108 (Imm == DppCtrl::WAVE_SHL1) ||
5109 (Imm == DppCtrl::WAVE_ROL1) ||
5110 (Imm == DppCtrl::WAVE_SHR1) ||
5111 (Imm == DppCtrl::WAVE_ROR1) ||
5112 (Imm == DppCtrl::ROW_MIRROR) ||
5113 (Imm == DppCtrl::ROW_HALF_MIRROR) ||
5114 (Imm == DppCtrl::BCAST15) ||
5115 (Imm == DppCtrl::BCAST31);
Sam Koltondfa29f72016-03-09 12:29:31 +00005116 }
5117 return false;
5118}
5119
Matt Arsenaultcc88ce32016-10-12 18:00:51 +00005120bool AMDGPUOperand::isGPRIdxMode() const {
5121 return isImm() && isUInt<4>(getImm());
5122}
5123
Dmitry Preobrazhenskyc7d35a02017-04-26 15:34:19 +00005124bool AMDGPUOperand::isS16Imm() const {
5125 return isImm() && (isInt<16>(getImm()) || isUInt<16>(getImm()));
5126}
5127
5128bool AMDGPUOperand::isU16Imm() const {
5129 return isImm() && isUInt<16>(getImm());
5130}
5131
Alex Bradbury58eba092016-11-01 16:32:05 +00005132OperandMatchResultTy
Sam Kolton11de3702016-05-24 12:38:33 +00005133AMDGPUAsmParser::parseDPPCtrl(OperandVector &Operands) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005134 using namespace AMDGPU::DPP;
5135
Sam Koltondfa29f72016-03-09 12:29:31 +00005136 SMLoc S = Parser.getTok().getLoc();
5137 StringRef Prefix;
5138 int64_t Int;
Sam Koltondfa29f72016-03-09 12:29:31 +00005139
Sam Koltona74cd522016-03-18 15:35:51 +00005140 if (getLexer().getKind() == AsmToken::Identifier) {
5141 Prefix = Parser.getTok().getString();
5142 } else {
5143 return MatchOperand_NoMatch;
5144 }
5145
5146 if (Prefix == "row_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005147 Int = DppCtrl::ROW_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005148 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005149 } else if (Prefix == "row_half_mirror") {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005150 Int = DppCtrl::ROW_HALF_MIRROR;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005151 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005152 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005153 // Check to prevent parseDPPCtrlOps from eating invalid tokens
5154 if (Prefix != "quad_perm"
5155 && Prefix != "row_shl"
5156 && Prefix != "row_shr"
5157 && Prefix != "row_ror"
5158 && Prefix != "wave_shl"
5159 && Prefix != "wave_rol"
5160 && Prefix != "wave_shr"
5161 && Prefix != "wave_ror"
5162 && Prefix != "row_bcast") {
Sam Kolton11de3702016-05-24 12:38:33 +00005163 return MatchOperand_NoMatch;
Sam Kolton201398e2016-04-21 13:14:24 +00005164 }
5165
Sam Koltona74cd522016-03-18 15:35:51 +00005166 Parser.Lex();
5167 if (getLexer().isNot(AsmToken::Colon))
5168 return MatchOperand_ParseFail;
5169
5170 if (Prefix == "quad_perm") {
5171 // quad_perm:[%d,%d,%d,%d]
Sam Koltondfa29f72016-03-09 12:29:31 +00005172 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005173 if (getLexer().isNot(AsmToken::LBrac))
Sam Koltondfa29f72016-03-09 12:29:31 +00005174 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005175 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005176
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005177 if (getParser().parseAbsoluteExpression(Int) || !(0 <= Int && Int <=3))
Sam Koltondfa29f72016-03-09 12:29:31 +00005178 return MatchOperand_ParseFail;
5179
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005180 for (int i = 0; i < 3; ++i) {
5181 if (getLexer().isNot(AsmToken::Comma))
5182 return MatchOperand_ParseFail;
5183 Parser.Lex();
Sam Koltondfa29f72016-03-09 12:29:31 +00005184
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005185 int64_t Temp;
5186 if (getParser().parseAbsoluteExpression(Temp) || !(0 <= Temp && Temp <=3))
5187 return MatchOperand_ParseFail;
5188 const int shift = i*2 + 2;
5189 Int += (Temp << shift);
5190 }
Sam Koltona74cd522016-03-18 15:35:51 +00005191
Sam Koltona74cd522016-03-18 15:35:51 +00005192 if (getLexer().isNot(AsmToken::RBrac))
5193 return MatchOperand_ParseFail;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005194 Parser.Lex();
Sam Koltona74cd522016-03-18 15:35:51 +00005195 } else {
5196 // sel:%d
5197 Parser.Lex();
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005198 if (getParser().parseAbsoluteExpression(Int))
Sam Koltona74cd522016-03-18 15:35:51 +00005199 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005200
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005201 if (Prefix == "row_shl" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005202 Int |= DppCtrl::ROW_SHL0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005203 } else if (Prefix == "row_shr" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005204 Int |= DppCtrl::ROW_SHR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005205 } else if (Prefix == "row_ror" && 1 <= Int && Int <= 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005206 Int |= DppCtrl::ROW_ROR0;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005207 } else if (Prefix == "wave_shl" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005208 Int = DppCtrl::WAVE_SHL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005209 } else if (Prefix == "wave_rol" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005210 Int = DppCtrl::WAVE_ROL1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005211 } else if (Prefix == "wave_shr" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005212 Int = DppCtrl::WAVE_SHR1;
Artem Tamazov2146a0a2016-09-22 11:47:21 +00005213 } else if (Prefix == "wave_ror" && 1 == Int) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005214 Int = DppCtrl::WAVE_ROR1;
Sam Koltona74cd522016-03-18 15:35:51 +00005215 } else if (Prefix == "row_bcast") {
5216 if (Int == 15) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005217 Int = DppCtrl::BCAST15;
Sam Koltona74cd522016-03-18 15:35:51 +00005218 } else if (Int == 31) {
Stanislav Mekhanoshin43293612018-05-08 16:53:02 +00005219 Int = DppCtrl::BCAST31;
Sam Kolton7a2a3232016-07-14 14:50:35 +00005220 } else {
5221 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005222 }
5223 } else {
Sam Kolton201398e2016-04-21 13:14:24 +00005224 return MatchOperand_ParseFail;
Sam Koltona74cd522016-03-18 15:35:51 +00005225 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005226 }
Sam Koltondfa29f72016-03-09 12:29:31 +00005227 }
Sam Koltona74cd522016-03-18 15:35:51 +00005228
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005229 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTyDppCtrl));
Sam Koltondfa29f72016-03-09 12:29:31 +00005230 return MatchOperand_Success;
5231}
5232
Sam Kolton5f10a132016-05-06 11:31:17 +00005233AMDGPUOperand::Ptr AMDGPUAsmParser::defaultRowMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005234 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppRowMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005235}
5236
Sam Kolton5f10a132016-05-06 11:31:17 +00005237AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBankMask() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005238 return AMDGPUOperand::CreateImm(this, 0xf, SMLoc(), AMDGPUOperand::ImmTyDppBankMask);
Sam Koltondfa29f72016-03-09 12:29:31 +00005239}
5240
Sam Kolton5f10a132016-05-06 11:31:17 +00005241AMDGPUOperand::Ptr AMDGPUAsmParser::defaultBoundCtrl() const {
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005242 return AMDGPUOperand::CreateImm(this, 0, SMLoc(), AMDGPUOperand::ImmTyDppBoundCtrl);
Sam Kolton5f10a132016-05-06 11:31:17 +00005243}
5244
5245void AMDGPUAsmParser::cvtDPP(MCInst &Inst, const OperandVector &Operands) {
Sam Koltondfa29f72016-03-09 12:29:31 +00005246 OptionalImmIndexMap OptionalIdx;
5247
5248 unsigned I = 1;
5249 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5250 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5251 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5252 }
5253
Connor Abbott79f3ade2017-08-07 19:10:56 +00005254 // All DPP instructions with at least one source operand have a fake "old"
5255 // source at the beginning that's tied to the dst operand. Handle it here.
5256 if (Desc.getNumOperands() >= 2)
5257 Inst.addOperand(Inst.getOperand(0));
5258
Sam Koltondfa29f72016-03-09 12:29:31 +00005259 for (unsigned E = Operands.size(); I != E; ++I) {
5260 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
5261 // Add the register arguments
Sam Koltone66365e2016-12-27 10:06:42 +00005262 if (Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
Sam Kolton07dbde22017-01-20 10:01:25 +00005263 // VOP2b (v_add_u32, v_sub_u32 ...) dpp use "vcc" token.
Sam Koltone66365e2016-12-27 10:06:42 +00005264 // Skip it.
5265 continue;
5266 } if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Sam Kolton9772eb32017-01-11 11:46:30 +00005267 Op.addRegWithFPInputModsOperands(Inst, 2);
Sam Koltondfa29f72016-03-09 12:29:31 +00005268 } else if (Op.isDPPCtrl()) {
5269 Op.addImmOperands(Inst, 1);
5270 } else if (Op.isImm()) {
5271 // Handle optional arguments
5272 OptionalIdx[Op.getImmTy()] = I;
5273 } else {
5274 llvm_unreachable("Invalid operand type");
5275 }
5276 }
5277
Sam Koltondfa29f72016-03-09 12:29:31 +00005278 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppRowMask, 0xf);
5279 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBankMask, 0xf);
5280 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyDppBoundCtrl);
5281}
Nikolay Haustov5bf46ac12016-03-04 10:39:50 +00005282
Sam Kolton3025e7f2016-04-26 13:33:56 +00005283//===----------------------------------------------------------------------===//
5284// sdwa
5285//===----------------------------------------------------------------------===//
5286
Alex Bradbury58eba092016-11-01 16:32:05 +00005287OperandMatchResultTy
Sam Kolton05ef1c92016-06-03 10:27:37 +00005288AMDGPUAsmParser::parseSDWASel(OperandVector &Operands, StringRef Prefix,
5289 AMDGPUOperand::ImmTy Type) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005290 using namespace llvm::AMDGPU::SDWA;
5291
Sam Kolton3025e7f2016-04-26 13:33:56 +00005292 SMLoc S = Parser.getTok().getLoc();
5293 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005294 OperandMatchResultTy res;
Matt Arsenault37fefd62016-06-10 02:18:02 +00005295
Sam Kolton05ef1c92016-06-03 10:27:37 +00005296 res = parseStringWithPrefix(Prefix, Value);
5297 if (res != MatchOperand_Success) {
5298 return res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005299 }
Matt Arsenault37fefd62016-06-10 02:18:02 +00005300
Sam Kolton3025e7f2016-04-26 13:33:56 +00005301 int64_t Int;
5302 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005303 .Case("BYTE_0", SdwaSel::BYTE_0)
5304 .Case("BYTE_1", SdwaSel::BYTE_1)
5305 .Case("BYTE_2", SdwaSel::BYTE_2)
5306 .Case("BYTE_3", SdwaSel::BYTE_3)
5307 .Case("WORD_0", SdwaSel::WORD_0)
5308 .Case("WORD_1", SdwaSel::WORD_1)
5309 .Case("DWORD", SdwaSel::DWORD)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005310 .Default(0xffffffff);
5311 Parser.Lex(); // eat last token
5312
5313 if (Int == 0xffffffff) {
5314 return MatchOperand_ParseFail;
5315 }
5316
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005317 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, Type));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005318 return MatchOperand_Success;
5319}
5320
Alex Bradbury58eba092016-11-01 16:32:05 +00005321OperandMatchResultTy
Sam Kolton3025e7f2016-04-26 13:33:56 +00005322AMDGPUAsmParser::parseSDWADstUnused(OperandVector &Operands) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005323 using namespace llvm::AMDGPU::SDWA;
5324
Sam Kolton3025e7f2016-04-26 13:33:56 +00005325 SMLoc S = Parser.getTok().getLoc();
5326 StringRef Value;
Alex Bradbury58eba092016-11-01 16:32:05 +00005327 OperandMatchResultTy res;
Sam Kolton3025e7f2016-04-26 13:33:56 +00005328
5329 res = parseStringWithPrefix("dst_unused", Value);
5330 if (res != MatchOperand_Success) {
5331 return res;
5332 }
5333
5334 int64_t Int;
5335 Int = StringSwitch<int64_t>(Value)
Sam Koltona3ec5c12016-10-07 14:46:06 +00005336 .Case("UNUSED_PAD", DstUnused::UNUSED_PAD)
5337 .Case("UNUSED_SEXT", DstUnused::UNUSED_SEXT)
5338 .Case("UNUSED_PRESERVE", DstUnused::UNUSED_PRESERVE)
Sam Kolton3025e7f2016-04-26 13:33:56 +00005339 .Default(0xffffffff);
5340 Parser.Lex(); // eat last token
5341
5342 if (Int == 0xffffffff) {
5343 return MatchOperand_ParseFail;
5344 }
5345
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005346 Operands.push_back(AMDGPUOperand::CreateImm(this, Int, S, AMDGPUOperand::ImmTySdwaDstUnused));
Sam Kolton3025e7f2016-04-26 13:33:56 +00005347 return MatchOperand_Success;
5348}
5349
Sam Kolton945231a2016-06-10 09:57:59 +00005350void AMDGPUAsmParser::cvtSdwaVOP1(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005351 cvtSDWA(Inst, Operands, SIInstrFlags::VOP1);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005352}
5353
Sam Kolton945231a2016-06-10 09:57:59 +00005354void AMDGPUAsmParser::cvtSdwaVOP2(MCInst &Inst, const OperandVector &Operands) {
Sam Kolton5196b882016-07-01 09:59:21 +00005355 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2);
5356}
5357
Sam Koltonf7659d712017-05-23 10:08:55 +00005358void AMDGPUAsmParser::cvtSdwaVOP2b(MCInst &Inst, const OperandVector &Operands) {
5359 cvtSDWA(Inst, Operands, SIInstrFlags::VOP2, true);
5360}
5361
Sam Kolton5196b882016-07-01 09:59:21 +00005362void AMDGPUAsmParser::cvtSdwaVOPC(MCInst &Inst, const OperandVector &Operands) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005363 cvtSDWA(Inst, Operands, SIInstrFlags::VOPC, isVI());
Sam Kolton05ef1c92016-06-03 10:27:37 +00005364}
5365
5366void AMDGPUAsmParser::cvtSDWA(MCInst &Inst, const OperandVector &Operands,
Sam Koltonf7659d712017-05-23 10:08:55 +00005367 uint64_t BasicInstType, bool skipVcc) {
Sam Kolton9dffada2017-01-17 15:26:02 +00005368 using namespace llvm::AMDGPU::SDWA;
Eugene Zelenkoc8fbf6f2017-08-10 00:46:15 +00005369
Sam Kolton05ef1c92016-06-03 10:27:37 +00005370 OptionalImmIndexMap OptionalIdx;
Sam Koltonf7659d712017-05-23 10:08:55 +00005371 bool skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005372
5373 unsigned I = 1;
5374 const MCInstrDesc &Desc = MII.get(Inst.getOpcode());
5375 for (unsigned J = 0; J < Desc.getNumDefs(); ++J) {
5376 ((AMDGPUOperand &)*Operands[I++]).addRegOperands(Inst, 1);
5377 }
5378
5379 for (unsigned E = Operands.size(); I != E; ++I) {
5380 AMDGPUOperand &Op = ((AMDGPUOperand &)*Operands[I]);
Sam Koltonf7659d712017-05-23 10:08:55 +00005381 if (skipVcc && !skippedVcc && Op.isReg() && Op.Reg.RegNo == AMDGPU::VCC) {
5382 // VOP2b (v_add_u32, v_sub_u32 ...) sdwa use "vcc" token as dst.
5383 // Skip it if it's 2nd (e.g. v_add_i32_sdwa v1, vcc, v2, v3)
5384 // or 4th (v_addc_u32_sdwa v1, vcc, v2, v3, vcc) operand.
5385 // Skip VCC only if we didn't skip it on previous iteration.
5386 if (BasicInstType == SIInstrFlags::VOP2 &&
5387 (Inst.getNumOperands() == 1 || Inst.getNumOperands() == 5)) {
5388 skippedVcc = true;
5389 continue;
5390 } else if (BasicInstType == SIInstrFlags::VOPC &&
5391 Inst.getNumOperands() == 0) {
5392 skippedVcc = true;
5393 continue;
5394 }
5395 }
5396 if (isRegOrImmWithInputMods(Desc, Inst.getNumOperands())) {
Dmitry Preobrazhensky6b65f7c2018-01-17 14:00:48 +00005397 Op.addRegOrImmWithInputModsOperands(Inst, 2);
Sam Kolton05ef1c92016-06-03 10:27:37 +00005398 } else if (Op.isImm()) {
5399 // Handle optional arguments
5400 OptionalIdx[Op.getImmTy()] = I;
5401 } else {
5402 llvm_unreachable("Invalid operand type");
5403 }
Sam Koltonf7659d712017-05-23 10:08:55 +00005404 skippedVcc = false;
Sam Kolton05ef1c92016-06-03 10:27:37 +00005405 }
5406
Sam Koltonf7659d712017-05-23 10:08:55 +00005407 if (Inst.getOpcode() != AMDGPU::V_NOP_sdwa_gfx9 &&
5408 Inst.getOpcode() != AMDGPU::V_NOP_sdwa_vi) {
Sam Kolton549c89d2017-06-21 08:53:38 +00005409 // v_nop_sdwa_sdwa_vi/gfx9 has no optional sdwa arguments
Sam Koltona3ec5c12016-10-07 14:46:06 +00005410 switch (BasicInstType) {
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005411 case SIInstrFlags::VOP1:
Sam Koltonf7659d712017-05-23 10:08:55 +00005412 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005413 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005414 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5415 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005416 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5417 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5418 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005419 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005420
5421 case SIInstrFlags::VOP2:
Sam Koltonf7659d712017-05-23 10:08:55 +00005422 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton549c89d2017-06-21 08:53:38 +00005423 if (AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::omod) != -1) {
Sam Koltonf7659d712017-05-23 10:08:55 +00005424 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyOModSI, 0);
5425 }
Sam Kolton9dffada2017-01-17 15:26:02 +00005426 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstSel, SdwaSel::DWORD);
5427 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaDstUnused, DstUnused::UNUSED_PRESERVE);
5428 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5429 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005430 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005431
5432 case SIInstrFlags::VOPC:
Sam Kolton549c89d2017-06-21 08:53:38 +00005433 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTyClampSI, 0);
Sam Kolton9dffada2017-01-17 15:26:02 +00005434 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc0Sel, SdwaSel::DWORD);
5435 addOptionalImmOperand(Inst, Operands, OptionalIdx, AMDGPUOperand::ImmTySdwaSrc1Sel, SdwaSel::DWORD);
Sam Koltona3ec5c12016-10-07 14:46:06 +00005436 break;
Eugene Zelenko2bc2f332016-12-09 22:06:55 +00005437
Sam Koltona3ec5c12016-10-07 14:46:06 +00005438 default:
5439 llvm_unreachable("Invalid instruction type. Only VOP1, VOP2 and VOPC allowed");
5440 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005441 }
Matt Arsenaultf3dd8632016-11-01 00:55:14 +00005442
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005443 // special case v_mac_{f16, f32}:
Sam Koltona3ec5c12016-10-07 14:46:06 +00005444 // it has src2 register operand that is tied to dst operand
Sam Koltona568e3d2016-12-22 12:57:41 +00005445 if (Inst.getOpcode() == AMDGPU::V_MAC_F32_sdwa_vi ||
5446 Inst.getOpcode() == AMDGPU::V_MAC_F16_sdwa_vi) {
Sam Koltona3ec5c12016-10-07 14:46:06 +00005447 auto it = Inst.begin();
Konstantin Zhuravlyovf86e4b72016-11-13 07:01:11 +00005448 std::advance(
Sam Koltonf7659d712017-05-23 10:08:55 +00005449 it, AMDGPU::getNamedOperandIdx(Inst.getOpcode(), AMDGPU::OpName::src2));
Sam Koltona3ec5c12016-10-07 14:46:06 +00005450 Inst.insert(it, Inst.getOperand(0)); // src2 = dst
Sam Kolton5196b882016-07-01 09:59:21 +00005451 }
Sam Kolton05ef1c92016-06-03 10:27:37 +00005452}
Nikolay Haustov2f684f12016-02-26 09:51:05 +00005453
Tom Stellard45bb48e2015-06-13 03:28:10 +00005454/// Force static initialization.
5455extern "C" void LLVMInitializeAMDGPUAsmParser() {
Mehdi Aminif42454b2016-10-09 23:00:34 +00005456 RegisterMCAsmParser<AMDGPUAsmParser> A(getTheAMDGPUTarget());
5457 RegisterMCAsmParser<AMDGPUAsmParser> B(getTheGCNTarget());
Tom Stellard45bb48e2015-06-13 03:28:10 +00005458}
5459
5460#define GET_REGISTER_MATCHER
5461#define GET_MATCHER_IMPLEMENTATION
Matt Arsenaultf7f59b52017-12-20 18:52:57 +00005462#define GET_MNEMONIC_SPELL_CHECKER
Tom Stellard45bb48e2015-06-13 03:28:10 +00005463#include "AMDGPUGenAsmMatcher.inc"
Sam Kolton11de3702016-05-24 12:38:33 +00005464
Sam Kolton11de3702016-05-24 12:38:33 +00005465// This fuction should be defined after auto-generated include so that we have
5466// MatchClassKind enum defined
5467unsigned AMDGPUAsmParser::validateTargetOperandClass(MCParsedAsmOperand &Op,
5468 unsigned Kind) {
5469 // Tokens like "glc" would be parsed as immediate operands in ParseOperand().
Matt Arsenault37fefd62016-06-10 02:18:02 +00005470 // But MatchInstructionImpl() expects to meet token and fails to validate
Sam Kolton11de3702016-05-24 12:38:33 +00005471 // operand. This method checks if we are given immediate operand but expect to
5472 // get corresponding token.
5473 AMDGPUOperand &Operand = (AMDGPUOperand&)Op;
5474 switch (Kind) {
5475 case MCK_addr64:
5476 return Operand.isAddr64() ? Match_Success : Match_InvalidOperand;
5477 case MCK_gds:
5478 return Operand.isGDS() ? Match_Success : Match_InvalidOperand;
Dmitry Preobrazhenskyd6e1a942018-02-21 13:13:48 +00005479 case MCK_lds:
5480 return Operand.isLDS() ? Match_Success : Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005481 case MCK_glc:
5482 return Operand.isGLC() ? Match_Success : Match_InvalidOperand;
5483 case MCK_idxen:
5484 return Operand.isIdxen() ? Match_Success : Match_InvalidOperand;
5485 case MCK_offen:
5486 return Operand.isOffen() ? Match_Success : Match_InvalidOperand;
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005487 case MCK_SSrcB32:
Tom Stellard89049702016-06-15 02:54:14 +00005488 // When operands have expression values, they will return true for isToken,
5489 // because it is not possible to distinguish between a token and an
5490 // expression at parse time. MatchInstructionImpl() will always try to
5491 // match an operand as a token, when isToken returns true, and when the
5492 // name of the expression is not a valid token, the match will fail,
5493 // so we need to handle it here.
Sam Kolton1eeb11b2016-09-09 14:44:04 +00005494 return Operand.isSSrcB32() ? Match_Success : Match_InvalidOperand;
5495 case MCK_SSrcF32:
5496 return Operand.isSSrcF32() ? Match_Success : Match_InvalidOperand;
Artem Tamazov53c9de02016-07-11 12:07:18 +00005497 case MCK_SoppBrTarget:
5498 return Operand.isSoppBrTarget() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005499 case MCK_VReg32OrOff:
5500 return Operand.isVReg32OrOff() ? Match_Success : Match_InvalidOperand;
Matt Arsenault0e8a2992016-12-15 20:40:20 +00005501 case MCK_InterpSlot:
5502 return Operand.isInterpSlot() ? Match_Success : Match_InvalidOperand;
5503 case MCK_Attr:
5504 return Operand.isInterpAttr() ? Match_Success : Match_InvalidOperand;
5505 case MCK_AttrChan:
5506 return Operand.isAttrChan() ? Match_Success : Match_InvalidOperand;
Matt Arsenaultbf6bdac2016-12-05 20:42:41 +00005507 default:
5508 return Match_InvalidOperand;
Sam Kolton11de3702016-05-24 12:38:33 +00005509 }
5510}